From d2dcd9083f101584e029cbd4f0e1a4e573170d43 Mon Sep 17 00:00:00 2001 From: Prasad Joshi Date: Fri, 9 Mar 2012 06:27:12 +0530 Subject: [PATCH 0001/2891] logfs: destroy the reserved inodes while unmounting We were assuming that the evict_inode() would never be called on reserved inodes. However, (after the commit 8e22c1a4e logfs: get rid of magical inodes) while unmounting the file system, in put_super, we call iput() on all of the reserved inodes. The following simple test used to cause a kernel panic on LogFS: 1. Mount a LogFS file system on /mnt 2. Create a file $ touch /mnt/a 3. Try to unmount the FS $ umount /mnt The simple fix would be to drop the assumption and properly destroy the reserved inodes. Signed-off-by: Prasad Joshi --- fs/logfs/inode.c | 16 ++++++++++++++++ fs/logfs/readwrite.c | 1 - fs/logfs/segment.c | 2 +- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c index a422f42238b2..df093d9e4da1 100644 --- a/fs/logfs/inode.c +++ b/fs/logfs/inode.c @@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, logfs_i_callback); } +static void __logfs_destroy_meta_inode(struct inode *inode) +{ + struct logfs_inode *li = logfs_inode(inode); + BUG_ON(li->li_block); + call_rcu(&inode->i_rcu, logfs_i_callback); +} + static void logfs_destroy_inode(struct inode *inode) { struct logfs_inode *li = logfs_inode(inode); + if (inode->i_ino < LOGFS_RESERVED_INOS) { + /* + * The reserved inodes are never destroyed unless we are in + * unmont path. + */ + __logfs_destroy_meta_inode(inode); + return; + } + BUG_ON(list_empty(&li->li_freeing_list)); spin_lock(&logfs_inode_lock); li->li_refcount--; diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index e3ab5e5a904c..c8ea8664699c 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c @@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode) return; } - BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS); page = inode_to_page(inode); BUG_ON(!page); /* FIXME: Use emergency page */ logfs_put_write_page(page); diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index e28d090c98d6..038da0991794 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c @@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb) static void map_invalidatepage(struct page *page, unsigned long l) { - BUG(); + return; } static int map_releasepage(struct page *page, gfp_t g) -- GitLab From cd8bfa9c8a13cf3facc5731da17e10188b3795d1 Mon Sep 17 00:00:00 2001 From: Prasad Joshi Date: Mon, 2 Apr 2012 09:23:04 +0530 Subject: [PATCH 0002/2891] logfs: initialize the number of iovecs in bio This fixes the following crash when a LogFS file system, created on a encrypted LVM volume, was mounted [ 526.548034] BUG: unable to handle kernel NULL pointer dereference at [ 526.550106] IP: [] memcpy+0xb/0x120 [ 526.551008] PGD bd60067 PUD 1778d067 PMD 0 [ 526.551783] Oops: 0000 [#1] SMP Pid: 2043, comm: mount RIP: 0010:[] [] memcpy+0xb/0x120 Call Trace: kcryptd_io_read+0xdb/0x100 crypt_map+0xfd/0x190 __map_bio+0x48/0x150 __split_and_process_bio+0x51b/0x630 dm_request+0x138/0x230 generic_make_request+0xca/0x100 submit_bio+0x87/0x110 sync_request+0xdd/0x120 [logfs] bdev_readpage+0x2e/0x70 [logfs] do_read_cache_page+0x82/0x180 logfs_mount+0x2ad/0x770 [logfs] mount_fs+0x47/0x1c0 vfs_kern_mount+0x72/0x110 do_kern_mount+0x54/0x110 do_mount+0x520/0x7f0 sys_mount+0x90/0xe0 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=42292 Reported-by: Witold Baryluk Signed-off-by: Prasad Joshi --- fs/logfs/dev_bdev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index df0de27c2733..ea29df36893d 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) struct completion complete; bio_init(&bio); + bio.bi_max_vecs = 1; bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = PAGE_SIZE; -- GitLab From 2bbc89a8e9c652ee71c6c3b2e0679b7ecedb1a09 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 30 May 2012 15:49:59 +0200 Subject: [PATCH 0003/2891] m68k: Use Kbuild logic to import asm-generic headers Replace all headers files that just include their asm-generic version by Kbuild logic Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/Kbuild | 23 +++++++++++++++++++++++ arch/m68k/include/asm/bitsperlong.h | 1 - arch/m68k/include/asm/cputime.h | 6 ------ arch/m68k/include/asm/device.h | 7 ------- arch/m68k/include/asm/emergency-restart.h | 6 ------ arch/m68k/include/asm/errno.h | 6 ------ arch/m68k/include/asm/futex.h | 6 ------ arch/m68k/include/asm/ioctl.h | 1 - arch/m68k/include/asm/ipcbuf.h | 1 - arch/m68k/include/asm/irq_regs.h | 1 - arch/m68k/include/asm/kdebug.h | 1 - arch/m68k/include/asm/kmap_types.h | 6 ------ arch/m68k/include/asm/kvm_para.h | 1 - arch/m68k/include/asm/local.h | 6 ------ arch/m68k/include/asm/local64.h | 1 - arch/m68k/include/asm/mman.h | 1 - arch/m68k/include/asm/mutex.h | 9 --------- arch/m68k/include/asm/percpu.h | 6 ------ arch/m68k/include/asm/resource.h | 6 ------ arch/m68k/include/asm/scatterlist.h | 6 ------ arch/m68k/include/asm/siginfo.h | 6 ------ arch/m68k/include/asm/statfs.h | 6 ------ arch/m68k/include/asm/topology.h | 6 ------ arch/m68k/include/asm/xor.h | 1 - 24 files changed, 23 insertions(+), 97 deletions(-) delete mode 100644 arch/m68k/include/asm/bitsperlong.h delete mode 100644 arch/m68k/include/asm/cputime.h delete mode 100644 arch/m68k/include/asm/device.h delete mode 100644 arch/m68k/include/asm/emergency-restart.h delete mode 100644 arch/m68k/include/asm/errno.h delete mode 100644 arch/m68k/include/asm/futex.h delete mode 100644 arch/m68k/include/asm/ioctl.h delete mode 100644 arch/m68k/include/asm/ipcbuf.h delete mode 100644 arch/m68k/include/asm/irq_regs.h delete mode 100644 arch/m68k/include/asm/kdebug.h delete mode 100644 arch/m68k/include/asm/kmap_types.h delete mode 100644 arch/m68k/include/asm/kvm_para.h delete mode 100644 arch/m68k/include/asm/local.h delete mode 100644 arch/m68k/include/asm/local64.h delete mode 100644 arch/m68k/include/asm/mman.h delete mode 100644 arch/m68k/include/asm/mutex.h delete mode 100644 arch/m68k/include/asm/percpu.h delete mode 100644 arch/m68k/include/asm/resource.h delete mode 100644 arch/m68k/include/asm/scatterlist.h delete mode 100644 arch/m68k/include/asm/siginfo.h delete mode 100644 arch/m68k/include/asm/statfs.h delete mode 100644 arch/m68k/include/asm/topology.h delete mode 100644 arch/m68k/include/asm/xor.h diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index eafa2539a8ee..4d1f8fa33091 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -1,4 +1,27 @@ include include/asm-generic/Kbuild.asm header-y += cachectl.h +generic-y += bitsperlong.h +generic-y += cputime.h +generic-y += device.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += futex.h +generic-y += ioctl.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kvm_para.h +generic-y += local64.h +generic-y += local.h +generic-y += mman.h +generic-y += mutex.h +generic-y += percpu.h +generic-y += resource.h +generic-y += scatterlist.h +generic-y += siginfo.h +generic-y += statfs.h +generic-y += topology.h generic-y += word-at-a-time.h +generic-y += xor.h diff --git a/arch/m68k/include/asm/bitsperlong.h b/arch/m68k/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b2..000000000000 --- a/arch/m68k/include/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/m68k/include/asm/cputime.h b/arch/m68k/include/asm/cputime.h deleted file mode 100644 index c79c5e892305..000000000000 --- a/arch/m68k/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __M68K_CPUTIME_H -#define __M68K_CPUTIME_H - -#include - -#endif /* __M68K_CPUTIME_H */ diff --git a/arch/m68k/include/asm/device.h b/arch/m68k/include/asm/device.h deleted file mode 100644 index d8f9872b0e2d..000000000000 --- a/arch/m68k/include/asm/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include - diff --git a/arch/m68k/include/asm/emergency-restart.h b/arch/m68k/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/m68k/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/m68k/include/asm/errno.h b/arch/m68k/include/asm/errno.h deleted file mode 100644 index 0d4e188d6ef6..000000000000 --- a/arch/m68k/include/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_ERRNO_H -#define _M68K_ERRNO_H - -#include - -#endif /* _M68K_ERRNO_H */ diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h deleted file mode 100644 index 6a332a9f099c..000000000000 --- a/arch/m68k/include/asm/futex.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#include - -#endif diff --git a/arch/m68k/include/asm/ioctl.h b/arch/m68k/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/arch/m68k/include/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d0..000000000000 --- a/arch/m68k/include/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/m68k/include/asm/irq_regs.h b/arch/m68k/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/arch/m68k/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/m68k/include/asm/kdebug.h b/arch/m68k/include/asm/kdebug.h deleted file mode 100644 index 6ece1b037665..000000000000 --- a/arch/m68k/include/asm/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h deleted file mode 100644 index 3413cc1390ec..000000000000 --- a/arch/m68k/include/asm/kmap_types.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_M68K_KMAP_TYPES_H -#define __ASM_M68K_KMAP_TYPES_H - -#include - -#endif /* __ASM_M68K_KMAP_TYPES_H */ diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/arch/m68k/include/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/m68k/include/asm/local.h b/arch/m68k/include/asm/local.h deleted file mode 100644 index 6c259263e1f0..000000000000 --- a/arch/m68k/include/asm/local.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_M68K_LOCAL_H -#define _ASM_M68K_LOCAL_H - -#include - -#endif /* _ASM_M68K_LOCAL_H */ diff --git a/arch/m68k/include/asm/local64.h b/arch/m68k/include/asm/local64.h deleted file mode 100644 index 36c93b5cc239..000000000000 --- a/arch/m68k/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h deleted file mode 100644 index 8eebf89f5ab1..000000000000 --- a/arch/m68k/include/asm/mman.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/m68k/include/asm/mutex.h b/arch/m68k/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/arch/m68k/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include diff --git a/arch/m68k/include/asm/percpu.h b/arch/m68k/include/asm/percpu.h deleted file mode 100644 index 0859d048faf5..000000000000 --- a/arch/m68k/include/asm/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_M68K_PERCPU_H -#define __ASM_M68K_PERCPU_H - -#include - -#endif /* __ASM_M68K_PERCPU_H */ diff --git a/arch/m68k/include/asm/resource.h b/arch/m68k/include/asm/resource.h deleted file mode 100644 index e7d35019f337..000000000000 --- a/arch/m68k/include/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_RESOURCE_H -#define _M68K_RESOURCE_H - -#include - -#endif /* _M68K_RESOURCE_H */ diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h deleted file mode 100644 index 312505452a1e..000000000000 --- a/arch/m68k/include/asm/scatterlist.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SCATTERLIST_H -#define _M68K_SCATTERLIST_H - -#include - -#endif /* !(_M68K_SCATTERLIST_H) */ diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h deleted file mode 100644 index 851d3d784b53..000000000000 --- a/arch/m68k/include/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SIGINFO_H -#define _M68K_SIGINFO_H - -#include - -#endif diff --git a/arch/m68k/include/asm/statfs.h b/arch/m68k/include/asm/statfs.h deleted file mode 100644 index 08d93f14e061..000000000000 --- a/arch/m68k/include/asm/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_STATFS_H -#define _M68K_STATFS_H - -#include - -#endif /* _M68K_STATFS_H */ diff --git a/arch/m68k/include/asm/topology.h b/arch/m68k/include/asm/topology.h deleted file mode 100644 index ca173e9f26ff..000000000000 --- a/arch/m68k/include/asm/topology.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_M68K_TOPOLOGY_H -#define _ASM_M68K_TOPOLOGY_H - -#include - -#endif /* _ASM_M68K_TOPOLOGY_H */ diff --git a/arch/m68k/include/asm/xor.h b/arch/m68k/include/asm/xor.h deleted file mode 100644 index c82eb12a5b18..000000000000 --- a/arch/m68k/include/asm/xor.h +++ /dev/null @@ -1 +0,0 @@ -#include -- GitLab From 2ef0d3e64f0ba9aed72c2ddd410aea83cd3261ab Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 30 May 2012 19:50:04 +0200 Subject: [PATCH 0004/2891] m68k: Use asm-generic version of The extra definition for BITS_PER_LONG we had is also indirectly provided by , via and Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/Kbuild | 1 + arch/m68k/include/asm/types.h | 22 ---------------------- 2 files changed, 1 insertion(+), 22 deletions(-) delete mode 100644 arch/m68k/include/asm/types.h diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 4d1f8fa33091..064f86743b34 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -23,5 +23,6 @@ generic-y += scatterlist.h generic-y += siginfo.h generic-y += statfs.h generic-y += topology.h +generic-y += types.h generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h deleted file mode 100644 index 89705adcbd52..000000000000 --- a/arch/m68k/include/asm/types.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _M68K_TYPES_H -#define _M68K_TYPES_H - -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ -#include - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 32 - -#endif /* __KERNEL__ */ - -#endif /* _M68K_TYPES_H */ -- GitLab From 54503b1da74afdb38c730ad6413dbe54ce119134 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 31 May 2012 21:39:13 +0200 Subject: [PATCH 0005/2891] m68knommu: Clean up printing of sections - Remove casts and unneeded address-of ('&') operators, - Use %p to format pointers, %lx to format unsigned longs. Signed-off-by: Geert Uytterhoeven Acked-by: Greg Ungerer --- arch/m68k/kernel/setup_no.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 7dc186b7a85f..8a624ecf86b8 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -218,13 +218,10 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n"); #endif - pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x " - "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, - (int) &_sdata, (int) &_edata, - (int) &_sbss, (int) &_ebss); - pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ", - (int) &_ebss, (int) memory_start, - (int) memory_start, (int) memory_end); + pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n", + _stext, _etext, _sdata, _edata, _sbss, _ebss); + pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", + _ebss, memory_start, memory_start, memory_end); /* Keep a copy of command line */ *cmdline_p = &command_line[0]; -- GitLab From f2378ed90572aeeecded30fd3754575c2ff1e55f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 6 Jun 2012 17:24:47 +0200 Subject: [PATCH 0006/2891] m68k: Remove duplicate FPU config option It's also defined in arch/m68k/Kconfig.cpu Signed-off-by: Geert Uytterhoeven Acked-by: Greg Ungerer --- arch/m68k/Kconfig | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 147120128260..bc8cc504dee1 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -62,9 +62,6 @@ config CPU_HAS_NO_MULDIV64 config CPU_HAS_ADDRESS_SPACES bool -config FPU - bool - config HZ int default 1000 if CLEOPATRA -- GitLab From 022613e0b0753f1261454dea9b07c2e959363af7 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 6 Jun 2012 17:26:35 +0200 Subject: [PATCH 0007/2891] m68k: Move CPU_HAS_* config options They belong together with the CPU selection Signed-off-by: Geert Uytterhoeven Acked-by: Greg Ungerer --- arch/m68k/Kconfig | 9 --------- arch/m68k/Kconfig.cpu | 9 +++++++++ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index bc8cc504dee1..87b504cfe001 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -53,15 +53,6 @@ config ZONE_DMA bool default y -config CPU_HAS_NO_BITFIELDS - bool - -config CPU_HAS_NO_MULDIV64 - bool - -config CPU_HAS_ADDRESS_SPACES - bool - config HZ int default 1000 if CLEOPATRA diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 2b53254ad994..6cd8c1a32893 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -360,6 +360,15 @@ config NODES_SHIFT default "3" depends on !SINGLE_MEMORY_CHUNK +config CPU_HAS_NO_BITFIELDS + bool + +config CPU_HAS_NO_MULDIV64 + bool + +config CPU_HAS_ADDRESS_SPACES + bool + config FPU bool -- GitLab From 5df58f3aac58bb0f08fa205a006c6b7840cd7ac4 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 6 Jun 2012 18:35:13 +0200 Subject: [PATCH 0008/2891] m68k: delay, muldi3 - Use CONFIG_CPU_HAS_NO_MULDIV64 instead of open coding CONFIG_M68000 || CONFIG_COLDFIRE Signed-off-by: Geert Uytterhoeven Acked-by: Greg Ungerer --- arch/m68k/include/asm/delay.h | 2 +- arch/m68k/lib/muldi3.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h index 9c09becfd4c9..12d8fe4f1d30 100644 --- a/arch/m68k/include/asm/delay.h +++ b/arch/m68k/include/asm/delay.h @@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops) extern void __bad_udelay(void); -#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) +#ifdef CONFIG_CPU_HAS_NO_MULDIV64 /* * The simpler m68k and ColdFire processors do not have a 32*32->64 * multiply instruction. So we need to handle them a little differently. diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c index 79e928a525d0..ee5f0b1b5c5d 100644 --- a/arch/m68k/lib/muldi3.c +++ b/arch/m68k/lib/muldi3.c @@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) +#ifdef CONFIG_CPU_HAS_NO_MULDIV64 #define SI_TYPE_SIZE 32 #define __BITS4 (SI_TYPE_SIZE / 4) -- GitLab From 9f1f118035b2d28bb741844537b9ee87705b4c6e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 6 Jun 2012 19:37:52 +0200 Subject: [PATCH 0009/2891] m68k: Introduce config option CPU_HAS_NO_UNALIGNED Use CONFIG_CPU_HAS_NO_UNALIGNED instead of open coding CONFIG_M68000 || CONFIG_COLDFIRE Signed-off-by: Geert Uytterhoeven Acked-by: Greg Ungerer --- arch/m68k/Kconfig.cpu | 5 +++++ arch/m68k/include/asm/unaligned.h | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 6cd8c1a32893..c4ed65081475 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -27,6 +27,7 @@ config COLDFIRE select ARCH_HAVE_CUSTOM_GPIO_H select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 + select CPU_HAS_NO_UNALIGNED select GENERIC_CSUM endchoice @@ -37,6 +38,7 @@ config M68000 bool select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 + select CPU_HAS_NO_UNALIGNED select GENERIC_CSUM help The Freescale (was Motorola) 68000 CPU is the first generation of @@ -366,6 +368,9 @@ config CPU_HAS_NO_BITFIELDS config CPU_HAS_NO_MULDIV64 bool +config CPU_HAS_NO_UNALIGNED + bool + config CPU_HAS_ADDRESS_SPACES bool diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h index f4043ae63db1..2b3ca0bf7a0d 100644 --- a/arch/m68k/include/asm/unaligned.h +++ b/arch/m68k/include/asm/unaligned.h @@ -2,7 +2,7 @@ #define _ASM_M68K_UNALIGNED_H -#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000) +#ifdef CONFIG_CPU_HAS_NO_UNALIGNED #include #include #include @@ -12,7 +12,7 @@ #else /* - * The m68k can do unaligned accesses itself. + * The m68k can do unaligned accesses itself. */ #include #include -- GitLab From 7df0d27f3d78282e9ab97da060e85f53a5f556b5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 6 Jun 2012 19:39:39 +0200 Subject: [PATCH 0010/2891] m68k: CPU32 does not support unaligned accesses Hence select CPU_HAS_NO_UNALIGNED Reported-by: Philippe De Muyter Signed-off-by: Geert Uytterhoeven Acked-by: Greg Ungerer --- arch/m68k/Kconfig.cpu | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index c4ed65081475..d4f3a9839cff 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -50,6 +50,7 @@ config M68000 config MCPU32 bool select CPU_HAS_NO_BITFIELDS + select CPU_HAS_NO_UNALIGNED help The Freescale (was then Motorola) CPU32 is a CPU core that is based on the 68020 processor. For the most part it is used in -- GitLab From efd68e7254503f3207805f674a1ea1d743f5dfe2 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Sun, 3 Jun 2012 22:04:33 -0700 Subject: [PATCH 0011/2891] devicetree: add helper inline for retrieving a node's full name The pattern (np ? np->full_name : "") is rather common in the kernel, but can also make for quite long lines. This patch adds a new inline function, of_node_full_name() so that the test for a valid node pointer doesn't need to be open coded at all call sites. Signed-off-by: Grant Likely Cc: Paul Mundt Cc: Benjamin Herrenschmidt Cc: Thomas Gleixner --- arch/microblaze/pci/pci-common.c | 6 ++---- arch/powerpc/kernel/pci-common.c | 6 ++---- arch/powerpc/kernel/vio.c | 5 ++--- arch/powerpc/platforms/cell/iommu.c | 3 +-- arch/powerpc/platforms/pseries/iommu.c | 2 +- arch/sparc/kernel/of_device_64.c | 2 +- drivers/of/base.c | 2 +- drivers/of/irq.c | 2 +- include/linux/of.h | 10 ++++++++++ kernel/irq/irqdomain.c | 8 ++++---- 10 files changed, 25 insertions(+), 21 deletions(-) diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index ed22bfc5db14..ca8f6e769960 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -249,8 +249,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev) } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", oirq.size, oirq.specifier[0], oirq.specifier[1], - oirq.controller ? oirq.controller->full_name : - ""); + of_node_full_name(oirq.controller)); virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); @@ -1493,8 +1492,7 @@ static void __devinit pcibios_scan_phb(struct pci_controller *hose) struct pci_bus *bus; struct device_node *node = hose->dn; - pr_debug("PCI: Scanning PHB %s\n", - node ? node->full_name : ""); + pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node)); pcibios_setup_phb_resources(hose, &resources); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 8e78e93c8185..886c254fd565 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -248,8 +248,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev) } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", oirq.size, oirq.specifier[0], oirq.specifier[1], - oirq.controller ? oirq.controller->full_name : - ""); + of_node_full_name(oirq.controller)); virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); @@ -1628,8 +1627,7 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose) struct device_node *node = hose->dn; int mode; - pr_debug("PCI: Scanning PHB %s\n", - node ? node->full_name : ""); + pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node)); /* Get some IO space for the new PHB */ pcibios_setup_phb_io_space(hose); diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index cb87301ccd55..63f72ede4341 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1296,8 +1296,7 @@ static void __devinit vio_dev_release(struct device *dev) struct iommu_table *tbl = get_iommu_table_base(dev); if (tbl) - iommu_free_table(tbl, dev->of_node ? - dev->of_node->full_name : dev_name(dev)); + iommu_free_table(tbl, of_node_full_name(dev->of_node)); of_node_put(dev->of_node); kfree(to_vio_dev(dev)); } @@ -1509,7 +1508,7 @@ static ssize_t devspec_show(struct device *dev, { struct device_node *of_node = dev->of_node; - return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); + return sprintf(buf, "%s\n", of_node_full_name(of_node)); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index b9f509a34c01..b6732004c882 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -552,8 +552,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) iommu = cell_iommu_for_node(dev_to_node(dev)); if (iommu == NULL || list_empty(&iommu->windows)) { printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", - dev->of_node ? dev->of_node->full_name : "?", - dev_to_node(dev)); + of_node_full_name(dev->of_node), dev_to_node(dev)); return NULL; } window = list_entry(iommu->windows.next, struct iommu_window, list); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 0915b1ad66ce..aab5fbc924e6 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -1051,7 +1051,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) if (!pdn || !PCI_DN(pdn)) { printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: " "no DMA window found for pci dev=%s dn=%s\n", - pci_name(dev), dn? dn->full_name : ""); + pci_name(dev), of_node_full_name(dn)); return; } pr_debug(" parent is %s\n", pdn->full_name); diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 7a3be6f6737a..7bbdc26d9512 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -580,7 +580,7 @@ static unsigned int __init build_one_device_irq(struct platform_device *op, printk("%s: Apply [%s:%x] imap --> [%s:%x]\n", op->dev.of_node->full_name, pp->full_name, this_orig_irq, - (iret ? iret->full_name : "NULL"), irq); + of_node_full_name(iret), irq); if (!iret) break; diff --git a/drivers/of/base.c b/drivers/of/base.c index d9bfd49b1935..9282d4cc1091 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1173,7 +1173,7 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np, ap->stem[stem_len] = 0; list_add_tail(&ap->link, &aliases_lookup); pr_debug("adding DT alias:%s: stem=%s id=%i node=%s\n", - ap->alias, ap->stem, ap->id, np ? np->full_name : NULL); + ap->alias, ap->stem, ap->id, of_node_full_name(np)); } /** diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 9cf00602f566..ff8ab7b27373 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -255,7 +255,7 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec, skiplevel: /* Iterate again with new parent */ - pr_debug(" -> new parent: %s\n", newpar ? newpar->full_name : "<>"); + pr_debug(" -> new parent: %s\n", of_node_full_name(newpar)); of_node_put(ipar); ipar = newpar; newpar = NULL; diff --git a/include/linux/of.h b/include/linux/of.h index 2ec1083af7ff..1012377cae92 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -163,6 +163,11 @@ static inline int of_node_to_nid(struct device_node *np) { return -1; } #define of_node_to_nid of_node_to_nid #endif +static inline const char* of_node_full_name(struct device_node *np) +{ + return np ? np->full_name : ""; +} + extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); #define for_each_node_by_name(dn, name) \ @@ -303,6 +308,11 @@ const char *of_prop_next_string(struct property *prop, const char *cur); #else /* CONFIG_OF */ +static inline const char* of_node_full_name(struct device_node *np) +{ + return ""; +} + static inline bool of_have_populated_dt(void) { return false; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 41c1564103f1..38c5eb839c92 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -448,7 +448,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, } pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", - hwirq, domain->of_node ? domain->of_node->full_name : "null", virq); + hwirq, of_node_full_name(domain->of_node), virq); return virq; } @@ -477,7 +477,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller, return intspec[0]; #endif pr_warning("no irq domain found for %s !\n", - controller->full_name); + of_node_full_name(controller)); return 0; } @@ -725,8 +725,8 @@ static int virq_debug_show(struct seq_file *m, void *private) data = irq_desc_get_chip_data(desc); seq_printf(m, data ? "0x%p " : " %p ", data); - if (desc->irq_data.domain && desc->irq_data.domain->of_node) - p = desc->irq_data.domain->of_node->full_name; + if (desc->irq_data.domain) + p = of_node_full_name(desc->irq_data.domain->of_node); else p = none; seq_printf(m, "%s\n", p); -- GitLab From 5ca4db61e859526b2dbee3bcea3626d3de49a0b2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 3 Jun 2012 22:04:34 -0700 Subject: [PATCH 0012/2891] irqdomain: Simple NUMA awareness. While common irqdesc allocation is node aware, the irqdomain code is not. Presently we observe a number of regressions/inconsistencies on NUMA-capable platforms: - Platforms using irqdomains with legacy mappings, where the irq_descs are allocated node-local and the irqdomain data structure is not. - Drivers implementing irqdomains will lose node locality regardless of the underlying struct device's node id. This plugs in NUMA node id proliferation across the various allocation callsites by way of_node_to_nid() node lookup. While of_node_to_nid() does the right thing for OF-capable platforms it doesn't presently handle the non-DT case. This is trivially dealt with by simply wraping in to numa_node_id() unconditionally. Signed-off-by: Paul Mundt Cc: Benjamin Herrenschmidt Cc: Thomas Gleixner Cc: Rob Herring Signed-off-by: Grant Likely --- include/linux/of.h | 15 ++++++++++----- kernel/irq/irqdomain.c | 13 ++++++++----- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/include/linux/of.h b/include/linux/of.h index 1012377cae92..76930ee78db5 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -158,11 +159,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) #define OF_BAD_ADDR ((u64)-1) -#ifndef of_node_to_nid -static inline int of_node_to_nid(struct device_node *np) { return -1; } -#define of_node_to_nid of_node_to_nid -#endif - static inline const char* of_node_full_name(struct device_node *np) { return np ? np->full_name : ""; @@ -412,6 +408,15 @@ static inline int of_machine_is_compatible(const char *compat) while (0) #endif /* CONFIG_OF */ +#ifndef of_node_to_nid +static inline int of_node_to_nid(struct device_node *np) +{ + return numa_node_id(); +} + +#define of_node_to_nid of_node_to_nid +#endif + /** * of_property_read_bool - Findfrom a property * @np: device node from which the property value is to be read. diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 38c5eb839c92..79ae0ebb90ba 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -45,7 +46,8 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node, { struct irq_domain *domain; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); + domain = kzalloc_node(sizeof(*domain), GFP_KERNEL, + of_node_to_nid(of_node)); if (WARN_ON(!domain)) return NULL; @@ -229,7 +231,8 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node, struct irq_domain *domain; unsigned int *revmap; - revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL); + revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL, + of_node_to_nid(of_node)); if (WARN_ON(!revmap)) return NULL; @@ -367,7 +370,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) BUG_ON(domain == NULL); WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP); - virq = irq_alloc_desc_from(1, 0); + virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); if (!virq) { pr_debug("create_direct virq allocation failed\n"); return 0; @@ -433,9 +436,9 @@ unsigned int irq_create_mapping(struct irq_domain *domain, hint = hwirq % nr_irqs; if (hint == 0) hint++; - virq = irq_alloc_desc_from(hint, 0); + virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node)); if (virq <= 0) - virq = irq_alloc_desc_from(1, 0); + virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); if (virq <= 0) { pr_debug("-> virq allocation failed\n"); return 0; -- GitLab From 732557047128e5c200c3590efba39f10ac46bcb2 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Sun, 3 Jun 2012 22:04:35 -0700 Subject: [PATCH 0013/2891] irqdomain: Remove unnecessary test for IRQ_DOMAIN_MAP_LEGACY Where irq_domain_associate() is called in irq_create_mapping, there is no need to test for IRQ_DOMAIN_MAP_LEGACY because it is already tested for earlier in the routine. Signed-off-by: Grant Likely Cc: Paul Mundt Cc: Benjamin Herrenschmidt Cc: Thomas Gleixner Cc: Rob Herring --- kernel/irq/irqdomain.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 79ae0ebb90ba..b1f774cfd089 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -445,8 +445,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, } if (irq_setup_virq(domain, virq, hwirq)) { - if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY) - irq_free_desc(virq); + irq_free_desc(virq); return 0; } -- GitLab From aed98048bd1c83469d96932c1901e867d9ba519a Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Sun, 3 Jun 2012 22:04:39 -0700 Subject: [PATCH 0014/2891] irqdomain: Make ops->map hook optional There isn't a really compelling reason to force ->map to be populated, so allow it to be left unset. Signed-off-by: Grant Likely Acked-by: Benjamin Herrenschmidt Cc: Paul Mundt Cc: Thomas Gleixner Cc: Rob Herring --- kernel/irq/irqdomain.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index b1f774cfd089..d3968e91bfd2 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -205,7 +205,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, * one can then use irq_create_mapping() to * explicitly change them */ - ops->map(domain, irq, hwirq); + if (ops->map) + ops->map(domain, irq, hwirq); /* Clear norequest flags */ irq_clear_status_flags(irq, IRQ_NOREQUEST); @@ -340,8 +341,8 @@ static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, irq_data->hwirq = hwirq; irq_data->domain = domain; - if (domain->ops->map(domain, virq, hwirq)) { - pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq); + if (domain->ops->map && domain->ops->map(domain, virq, hwirq)) { + pr_err("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq); irq_data->domain = NULL; irq_data->hwirq = 0; return -1; @@ -763,12 +764,6 @@ static int __init irq_debugfs_init(void) __initcall(irq_debugfs_init); #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ -static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - return 0; -} - /** * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings * @@ -831,7 +826,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); const struct irq_domain_ops irq_domain_simple_ops = { - .map = irq_domain_simple_map, .xlate = irq_domain_xlate_onetwocell, }; EXPORT_SYMBOL_GPL(irq_domain_simple_ops); -- GitLab From e6765ffa6897f7fb84469bab2e1be885c57ea519 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 14 Jun 2012 09:53:33 +0200 Subject: [PATCH 0015/2891] fbdev: sh_mipi_dsi: fix a section mismatch sh_mipi_setup() is called from a .text function, therefore it cannot be __init. Additionally, sh_mipi_remove() can be moved to the .devexit.text section. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Laurent Pinchart --- drivers/video/sh_mipi_dsi.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c index 4c6b84488561..3951fdae5f68 100644 --- a/drivers/video/sh_mipi_dsi.c +++ b/drivers/video/sh_mipi_dsi.c @@ -127,8 +127,7 @@ static void sh_mipi_shutdown(struct platform_device *pdev) sh_mipi_dsi_enable(mipi, false); } -static int __init sh_mipi_setup(struct sh_mipi *mipi, - struct sh_mipi_dsi_info *pdata) +static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata) { void __iomem *base = mipi->base; struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan; @@ -551,7 +550,7 @@ efindslot: return ret; } -static int __exit sh_mipi_remove(struct platform_device *pdev) +static int __devexit sh_mipi_remove(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -592,7 +591,7 @@ static int __exit sh_mipi_remove(struct platform_device *pdev) } static struct platform_driver sh_mipi_driver = { - .remove = __exit_p(sh_mipi_remove), + .remove = __devexit_p(sh_mipi_remove), .shutdown = sh_mipi_shutdown, .driver = { .name = "sh-mipi-dsi", -- GitLab From 3281e54c80195b90ed12a5b6cddef4ab42e656e1 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 22 Nov 2011 00:56:58 +0100 Subject: [PATCH 0016/2891] fbdev: sh_mobile_lcdc: Constify sh_mobile_lcdc_fix structure The structure is only read, make it const. Signed-off-by: Laurent Pinchart --- drivers/video/sh_mobile_lcdcfb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index e672698bd820..5461abeb5c5a 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -1003,7 +1003,7 @@ static int sh_mobile_lcdc_setcolreg(u_int regno, return 0; } -static struct fb_fix_screeninfo sh_mobile_lcdc_fix = { +static const struct fb_fix_screeninfo sh_mobile_lcdc_fix = { .id = "SH Mobile LCDC", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, -- GitLab From d7ad33421863308fe7ddf865737d4d83b64e5b81 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 22 Nov 2011 00:56:58 +0100 Subject: [PATCH 0017/2891] fbdev: sh_mobile_lcdc: Rename fb operation handlers with a common prefix Make all fb operation handlers start with sh_mobile_lcdc_ in preparation for the multi-plane support. Signed-off-by: Laurent Pinchart --- drivers/video/sh_mobile_lcdcfb.c | 48 +++++++++++++++++--------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 5461abeb5c5a..799f87171e04 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -384,8 +384,8 @@ sh_mobile_lcdc_must_reconfigure(struct sh_mobile_lcdc_chan *ch, return true; } -static int sh_mobile_check_var(struct fb_var_screeninfo *var, - struct fb_info *info); +static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, enum sh_mobile_lcdc_entity_event event, @@ -439,7 +439,7 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, fb_videomode_to_var(&var, mode); var.bits_per_pixel = info->var.bits_per_pixel; var.grayscale = info->var.grayscale; - ret = sh_mobile_check_var(&var, info); + ret = sh_mobile_lcdc_check_var(&var, info); break; } @@ -585,7 +585,7 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) return IRQ_HANDLED; } -static int sh_mobile_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) +static int sh_mobile_lcdc_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) { unsigned long ldintr; int ret; @@ -686,7 +686,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) } /* - * __sh_mobile_lcdc_start - Configure and tart the LCDC + * __sh_mobile_lcdc_start - Configure and start the LCDC * @priv: LCDC device * * Configure all enabled channels and start the LCDC device. All external @@ -1035,8 +1035,8 @@ static void sh_mobile_lcdc_imageblit(struct fb_info *info, sh_mobile_lcdc_deferred_io_touch(info); } -static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) +static int sh_mobile_lcdc_pan(struct fb_var_screeninfo *var, + struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_priv *priv = ch->lcdc; @@ -1099,14 +1099,15 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var, return 0; } -static int sh_mobile_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) +static int sh_mobile_lcdc_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) { + struct sh_mobile_lcdc_chan *ch = info->par; int retval; switch (cmd) { case FBIO_WAITFORVSYNC: - retval = sh_mobile_wait_for_vsync(info->par); + retval = sh_mobile_lcdc_wait_for_vsync(ch); break; default: @@ -1158,7 +1159,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) * Locking: both .fb_release() and .fb_open() are called with info->lock held if * user == 1, or with console sem held, if user == 0. */ -static int sh_mobile_release(struct fb_info *info, int user) +static int sh_mobile_lcdc_release(struct fb_info *info, int user) { struct sh_mobile_lcdc_chan *ch = info->par; @@ -1179,7 +1180,7 @@ static int sh_mobile_release(struct fb_info *info, int user) return 0; } -static int sh_mobile_open(struct fb_info *info, int user) +static int sh_mobile_lcdc_open(struct fb_info *info, int user) { struct sh_mobile_lcdc_chan *ch = info->par; @@ -1192,7 +1193,8 @@ static int sh_mobile_open(struct fb_info *info, int user) return 0; } -static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_priv *p = ch->lcdc; @@ -1313,7 +1315,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in return 0; } -static int sh_mobile_set_par(struct fb_info *info) +static int sh_mobile_lcdc_set_par(struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; int ret; @@ -1383,8 +1385,8 @@ static int sh_mobile_lcdc_blank(int blank, struct fb_info *info) * mode will reenable the clocks and update the screen in time, * so it does not need this. */ if (!info->fbdefio) { - sh_mobile_wait_for_vsync(ch); - sh_mobile_wait_for_vsync(ch); + sh_mobile_lcdc_wait_for_vsync(ch); + sh_mobile_lcdc_wait_for_vsync(ch); } sh_mobile_lcdc_clk_off(p); } @@ -1402,12 +1404,12 @@ static struct fb_ops sh_mobile_lcdc_ops = { .fb_copyarea = sh_mobile_lcdc_copyarea, .fb_imageblit = sh_mobile_lcdc_imageblit, .fb_blank = sh_mobile_lcdc_blank, - .fb_pan_display = sh_mobile_fb_pan_display, - .fb_ioctl = sh_mobile_ioctl, - .fb_open = sh_mobile_open, - .fb_release = sh_mobile_release, - .fb_check_var = sh_mobile_check_var, - .fb_set_par = sh_mobile_set_par, + .fb_pan_display = sh_mobile_lcdc_pan, + .fb_ioctl = sh_mobile_lcdc_ioctl, + .fb_open = sh_mobile_lcdc_open, + .fb_release = sh_mobile_lcdc_release, + .fb_check_var = sh_mobile_lcdc_check_var, + .fb_set_par = sh_mobile_lcdc_set_par, }; static void @@ -1537,7 +1539,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, else var->grayscale = ch->format->fourcc; - ret = sh_mobile_check_var(var, info); + ret = sh_mobile_lcdc_check_var(var, info); if (ret) return ret; -- GitLab From c5deac3c9b2284a64326e8799dfe7416bc619c02 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 12 Dec 2011 18:43:16 +0100 Subject: [PATCH 0018/2891] fbdev: sh_mobile_lcdc: Implement overlays support Signed-off-by: Laurent Pinchart --- .../sysfs-devices-platform-sh_mobile_lcdc_fb | 44 + drivers/video/sh_mobile_lcdcfb.c | 932 ++++++++++++++++-- include/video/sh_mobile_lcdc.h | 7 + 3 files changed, 911 insertions(+), 72 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb diff --git a/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb b/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb new file mode 100644 index 000000000000..2107082426da --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb @@ -0,0 +1,44 @@ +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_alpha +Date: May 2012 +Contact: Laurent Pinchart +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the alpha blending value for the overlay. Values range + from 0 (transparent) to 255 (opaque). The value is ignored if + the mode is not set to Alpha Blending. + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_mode +Date: May 2012 +Contact: Laurent Pinchart +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Selects the composition mode for the overlay. Possible values + are + + 0 - Alpha Blending + 1 - ROP3 + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_position +Date: May 2012 +Contact: Laurent Pinchart +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the x,y overlay position on the display in pixels. The + position format is `[0-9]+,[0-9]+'. + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_rop3 +Date: May 2012 +Contact: Laurent Pinchart +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the raster operation (ROP3) for the overlay. Values + range from 0 to 255. The value is ignored if the mode is not + set to ROP3. diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 799f87171e04..98e81b31fbc4 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -32,12 +33,176 @@ #include "sh_mobile_lcdcfb.h" +/* ---------------------------------------------------------------------------- + * Overlay register definitions + */ + +#define LDBCR 0xb00 +#define LDBCR_UPC(n) (1 << ((n) + 16)) +#define LDBCR_UPF(n) (1 << ((n) + 8)) +#define LDBCR_UPD(n) (1 << ((n) + 0)) +#define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00) +#define LDBBSIFR_EN (1 << 31) +#define LDBBSIFR_VS (1 << 29) +#define LDBBSIFR_BRSEL (1 << 28) +#define LDBBSIFR_MX (1 << 27) +#define LDBBSIFR_MY (1 << 26) +#define LDBBSIFR_CV3 (3 << 24) +#define LDBBSIFR_CV2 (2 << 24) +#define LDBBSIFR_CV1 (1 << 24) +#define LDBBSIFR_CV0 (0 << 24) +#define LDBBSIFR_CV_MASK (3 << 24) +#define LDBBSIFR_LAY_MASK (0xff << 16) +#define LDBBSIFR_LAY_SHIFT 16 +#define LDBBSIFR_ROP3_MASK (0xff << 16) +#define LDBBSIFR_ROP3_SHIFT 16 +#define LDBBSIFR_AL_PL8 (3 << 14) +#define LDBBSIFR_AL_PL1 (2 << 14) +#define LDBBSIFR_AL_PK (1 << 14) +#define LDBBSIFR_AL_1 (0 << 14) +#define LDBBSIFR_AL_MASK (3 << 14) +#define LDBBSIFR_SWPL (1 << 10) +#define LDBBSIFR_SWPW (1 << 9) +#define LDBBSIFR_SWPB (1 << 8) +#define LDBBSIFR_RY (1 << 7) +#define LDBBSIFR_CHRR_420 (2 << 0) +#define LDBBSIFR_CHRR_422 (1 << 0) +#define LDBBSIFR_CHRR_444 (0 << 0) +#define LDBBSIFR_RPKF_ARGB32 (0x00 << 0) +#define LDBBSIFR_RPKF_RGB16 (0x03 << 0) +#define LDBBSIFR_RPKF_RGB24 (0x0b << 0) +#define LDBBSIFR_RPKF_MASK (0x1f << 0) +#define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04) +#define LDBBSSZR_BVSS_MASK (0xfff << 16) +#define LDBBSSZR_BVSS_SHIFT 16 +#define LDBBSSZR_BHSS_MASK (0xfff << 0) +#define LDBBSSZR_BHSS_SHIFT 0 +#define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08) +#define LDBBLOCR_CVLC_MASK (0xfff << 16) +#define LDBBLOCR_CVLC_SHIFT 16 +#define LDBBLOCR_CHLC_MASK (0xfff << 0) +#define LDBBLOCR_CHLC_SHIFT 0 +#define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c) +#define LDBBSMWR_BSMWA_MASK (0xffff << 16) +#define LDBBSMWR_BSMWA_SHIFT 16 +#define LDBBSMWR_BSMW_MASK (0xffff << 0) +#define LDBBSMWR_BSMW_SHIFT 0 +#define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10) +#define LDBBSAYR_FG1A_MASK (0xff << 24) +#define LDBBSAYR_FG1A_SHIFT 24 +#define LDBBSAYR_FG1R_MASK (0xff << 16) +#define LDBBSAYR_FG1R_SHIFT 16 +#define LDBBSAYR_FG1G_MASK (0xff << 8) +#define LDBBSAYR_FG1G_SHIFT 8 +#define LDBBSAYR_FG1B_MASK (0xff << 0) +#define LDBBSAYR_FG1B_SHIFT 0 +#define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14) +#define LDBBSACR_FG2A_MASK (0xff << 24) +#define LDBBSACR_FG2A_SHIFT 24 +#define LDBBSACR_FG2R_MASK (0xff << 16) +#define LDBBSACR_FG2R_SHIFT 16 +#define LDBBSACR_FG2G_MASK (0xff << 8) +#define LDBBSACR_FG2G_SHIFT 8 +#define LDBBSACR_FG2B_MASK (0xff << 0) +#define LDBBSACR_FG2B_SHIFT 0 +#define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18) +#define LDBBSAAR_AP_MASK (0xff << 24) +#define LDBBSAAR_AP_SHIFT 24 +#define LDBBSAAR_R_MASK (0xff << 16) +#define LDBBSAAR_R_SHIFT 16 +#define LDBBSAAR_GY_MASK (0xff << 8) +#define LDBBSAAR_GY_SHIFT 8 +#define LDBBSAAR_B_MASK (0xff << 0) +#define LDBBSAAR_B_SHIFT 0 +#define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c) +#define LDBBPPCR_AP_MASK (0xff << 24) +#define LDBBPPCR_AP_SHIFT 24 +#define LDBBPPCR_R_MASK (0xff << 16) +#define LDBBPPCR_R_SHIFT 16 +#define LDBBPPCR_GY_MASK (0xff << 8) +#define LDBBPPCR_GY_SHIFT 8 +#define LDBBPPCR_B_MASK (0xff << 0) +#define LDBBPPCR_B_SHIFT 0 +#define LDBnBBGCL(n) (0xb10 + (n) * 0x04) +#define LDBBBGCL_BGA_MASK (0xff << 24) +#define LDBBBGCL_BGA_SHIFT 24 +#define LDBBBGCL_BGR_MASK (0xff << 16) +#define LDBBBGCL_BGR_SHIFT 16 +#define LDBBBGCL_BGG_MASK (0xff << 8) +#define LDBBBGCL_BGG_SHIFT 8 +#define LDBBBGCL_BGB_MASK (0xff << 0) +#define LDBBBGCL_BGB_SHIFT 0 + #define SIDE_B_OFFSET 0x1000 #define MIRROR_OFFSET 0x2000 #define MAX_XRES 1920 #define MAX_YRES 1080 +enum sh_mobile_lcdc_overlay_mode { + LCDC_OVERLAY_BLEND, + LCDC_OVERLAY_ROP3, +}; + +/* + * struct sh_mobile_lcdc_overlay - LCDC display overlay + * + * @channel: LCDC channel this overlay belongs to + * @cfg: Overlay configuration + * @info: Frame buffer device + * @index: Overlay index (0-3) + * @base: Overlay registers base address + * @enabled: True if the overlay is enabled + * @mode: Overlay blending mode (alpha blend or ROP3) + * @alpha: Global alpha blending value (0-255, for alpha blending mode) + * @rop3: Raster operation (for ROP3 mode) + * @fb_mem: Frame buffer virtual memory address + * @fb_size: Frame buffer size in bytes + * @dma_handle: Frame buffer DMA address + * @base_addr_y: Overlay base address (RGB or luma component) + * @base_addr_c: Overlay base address (chroma component) + * @pan_offset: Current pan offset in bytes + * @format: Current pixelf format + * @xres: Horizontal visible resolution + * @xres_virtual: Horizontal total resolution + * @yres: Vertical visible resolution + * @yres_virtual: Vertical total resolution + * @pitch: Overlay line pitch + * @pos_x: Horizontal overlay position + * @pos_y: Vertical overlay position + */ +struct sh_mobile_lcdc_overlay { + struct sh_mobile_lcdc_chan *channel; + + const struct sh_mobile_lcdc_overlay_cfg *cfg; + struct fb_info *info; + + unsigned int index; + unsigned long base; + + bool enabled; + enum sh_mobile_lcdc_overlay_mode mode; + unsigned int alpha; + unsigned int rop3; + + void *fb_mem; + unsigned long fb_size; + + dma_addr_t dma_handle; + unsigned long base_addr_y; + unsigned long base_addr_c; + unsigned long pan_offset; + + const struct sh_mobile_lcdc_format_info *format; + unsigned int xres; + unsigned int xres_virtual; + unsigned int yres; + unsigned int yres_virtual; + unsigned int pitch; + int pos_x; + int pos_y; +}; + struct sh_mobile_lcdc_priv { void __iomem *base; int irq; @@ -45,7 +210,10 @@ struct sh_mobile_lcdc_priv { struct device *dev; struct clk *dot_clk; unsigned long lddckr; + struct sh_mobile_lcdc_chan ch[2]; + struct sh_mobile_lcdc_overlay overlays[4]; + struct notifier_block notifier; int started; int forced_fourcc; /* 2 channel LCDC must share fourcc setting */ @@ -141,6 +309,13 @@ static unsigned long lcdc_read_chan(struct sh_mobile_lcdc_chan *chan, return ioread32(chan->lcdc->base + chan->reg_offs[reg_nr]); } +static void lcdc_write_overlay(struct sh_mobile_lcdc_overlay *ovl, + int reg, unsigned long data) +{ + iowrite32(data, ovl->channel->lcdc->base + reg); + iowrite32(data, ovl->channel->lcdc->base + reg + SIDE_B_OFFSET); +} + static void lcdc_write(struct sh_mobile_lcdc_priv *priv, unsigned long reg_offs, unsigned long data) { @@ -685,6 +860,96 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) lcdc_write_chan(ch, LDHAJR, tmp); } +static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) +{ + u32 format = 0; + + if (!ovl->enabled) { + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), 0); + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); + return; + } + + ovl->base_addr_y = ovl->dma_handle; + ovl->base_addr_c = ovl->base_addr_y + ovl->xres + * ovl->yres_virtual; + + switch (ovl->mode) { + case LCDC_OVERLAY_BLEND: + format = LDBBSIFR_EN | (ovl->alpha << LDBBSIFR_LAY_SHIFT); + break; + + case LCDC_OVERLAY_ROP3: + format = LDBBSIFR_EN | LDBBSIFR_BRSEL + | (ovl->rop3 << LDBBSIFR_ROP3_SHIFT); + break; + } + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV61: + case V4L2_PIX_FMT_NV42: + format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW; + break; + case V4L2_PIX_FMT_BGR24: + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV24: + format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB; + break; + case V4L2_PIX_FMT_BGR32: + default: + format |= LDBBSIFR_SWPL; + break; + } + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_RGB565: + format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16; + break; + case V4L2_PIX_FMT_BGR24: + format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24; + break; + case V4L2_PIX_FMT_BGR32: + format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32; + break; + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420; + break; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422; + break; + case V4L2_PIX_FMT_NV24: + case V4L2_PIX_FMT_NV42: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444; + break; + } + + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + + lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), format); + + lcdc_write_overlay(ovl, LDBnBSSZR(ovl->index), + (ovl->yres << LDBBSSZR_BVSS_SHIFT) | + (ovl->xres << LDBBSSZR_BHSS_SHIFT)); + lcdc_write_overlay(ovl, LDBnBLOCR(ovl->index), + (ovl->pos_y << LDBBLOCR_CVLC_SHIFT) | + (ovl->pos_x << LDBBLOCR_CHLC_SHIFT)); + lcdc_write_overlay(ovl, LDBnBSMWR(ovl->index), + ovl->pitch << LDBBSMWR_BSMW_SHIFT); + + lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y); + lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c); + + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); +} + /* * __sh_mobile_lcdc_start - Configure and start the LCDC * @priv: LCDC device @@ -892,6 +1157,11 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) } } + for (k = 0; k < ARRAY_SIZE(priv->overlays); ++k) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[k]; + sh_mobile_lcdc_overlay_setup(ovl); + } + /* Start the LCDC. */ __sh_mobile_lcdc_start(priv); @@ -975,8 +1245,506 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) sh_mobile_lcdc_clk_off(priv); } +static int __sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + if (var->xres > MAX_XRES || var->yres > MAX_YRES) + return -EINVAL; + + /* Make sure the virtual resolution is at least as big as the visible + * resolution. + */ + if (var->xres_virtual < var->xres) + var->xres_virtual = var->xres; + if (var->yres_virtual < var->yres) + var->yres_virtual = var->yres; + + if (sh_mobile_format_is_fourcc(var)) { + const struct sh_mobile_lcdc_format_info *format; + + format = sh_mobile_format_info(var->grayscale); + if (format == NULL) + return -EINVAL; + var->bits_per_pixel = format->bpp; + + /* Default to RGB and JPEG color-spaces for RGB and YUV formats + * respectively. + */ + if (!format->yuv) + var->colorspace = V4L2_COLORSPACE_SRGB; + else if (var->colorspace != V4L2_COLORSPACE_REC709) + var->colorspace = V4L2_COLORSPACE_JPEG; + } else { + if (var->bits_per_pixel <= 16) { /* RGB 565 */ + var->bits_per_pixel = 16; + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + } else if (var->bits_per_pixel <= 24) { /* RGB 888 */ + var->bits_per_pixel = 24; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */ + var->bits_per_pixel = 32; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + } else + return -EINVAL; + + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + } + + /* Make sure we don't exceed our allocated memory. */ + if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 > + info->fix.smem_len) + return -EINVAL; + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Frame buffer operations - Overlays + */ + +static ssize_t +overlay_alpha_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->alpha); +} + +static ssize_t +overlay_alpha_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int alpha; + char *endp; + + alpha = simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (alpha > 255) + return -EINVAL; + + if (ovl->alpha != alpha) { + ovl->alpha = alpha; + + if (ovl->mode == LCDC_OVERLAY_BLEND && ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->mode); +} + +static ssize_t +overlay_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int mode; + char *endp; + + mode = simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (mode != LCDC_OVERLAY_BLEND && mode != LCDC_OVERLAY_ROP3) + return -EINVAL; + + if (ovl->mode != mode) { + ovl->mode = mode; + + if (ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_position_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%d,%d\n", ovl->pos_x, ovl->pos_y); +} + +static ssize_t +overlay_position_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + char *endp; + int pos_x; + int pos_y; + + pos_x = simple_strtol(buf, &endp, 10); + if (*endp != ',') + return -EINVAL; + + pos_y = simple_strtol(endp + 1, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (ovl->pos_x != pos_x || ovl->pos_y != pos_y) { + ovl->pos_x = pos_x; + ovl->pos_y = pos_y; + + if (ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_rop3_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->rop3); +} + +static ssize_t +overlay_rop3_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int rop3; + char *endp; + + rop3 = !!simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (rop3 > 255) + return -EINVAL; + + if (ovl->rop3 != rop3) { + ovl->rop3 = rop3; + + if (ovl->mode == LCDC_OVERLAY_ROP3 && ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static const struct device_attribute overlay_sysfs_attrs[] = { + __ATTR(ovl_alpha, S_IRUGO|S_IWUSR, + overlay_alpha_show, overlay_alpha_store), + __ATTR(ovl_mode, S_IRUGO|S_IWUSR, + overlay_mode_show, overlay_mode_store), + __ATTR(ovl_position, S_IRUGO|S_IWUSR, + overlay_position_show, overlay_position_store), + __ATTR(ovl_rop3, S_IRUGO|S_IWUSR, + overlay_rop3_show, overlay_rop3_store), +}; + +static const struct fb_fix_screeninfo sh_mobile_lcdc_overlay_fix = { + .id = "SH Mobile LCDC", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, + .xpanstep = 0, + .ypanstep = 1, + .ywrapstep = 0, + .capabilities = FB_CAP_FOURCC, +}; + +static int sh_mobile_lcdc_overlay_pan(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned long base_addr_y; + unsigned long base_addr_c; + unsigned long pan_offset; + unsigned long c_offset; + + if (!ovl->format->yuv) + pan_offset = var->yoffset * ovl->pitch + + var->xoffset * (ovl->format->bpp / 8); + else + pan_offset = var->yoffset * ovl->pitch + var->xoffset; + + if (pan_offset == ovl->pan_offset) + return 0; /* No change, do nothing */ + + /* Set the source address for the next refresh */ + base_addr_y = ovl->dma_handle + pan_offset; + + ovl->base_addr_y = base_addr_y; + ovl->base_addr_c = base_addr_y; + + if (ovl->format->yuv) { + /* Set Y offset */ + c_offset = var->yoffset * ovl->pitch + * (ovl->format->bpp - 8) / 8; + base_addr_c = ovl->dma_handle + + ovl->xres * ovl->yres_virtual + + c_offset; + /* Set X offset */ + if (ovl->format->fourcc == V4L2_PIX_FMT_NV24) + base_addr_c += 2 * var->xoffset; + else + base_addr_c += var->xoffset; + + ovl->base_addr_c = base_addr_c; + } + + lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y); + lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c); + + ovl->pan_offset = pan_offset; + + return 0; +} + +static int sh_mobile_lcdc_overlay_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + switch (cmd) { + case FBIO_WAITFORVSYNC: + return sh_mobile_lcdc_wait_for_vsync(ovl->channel); + + default: + return -ENOIOCTLCMD; + } +} + +static int sh_mobile_lcdc_overlay_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return __sh_mobile_lcdc_check_var(var, info); +} + +static int sh_mobile_lcdc_overlay_set_par(struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + ovl->format = + sh_mobile_format_info(sh_mobile_format_fourcc(&info->var)); + + ovl->xres = info->var.xres; + ovl->xres_virtual = info->var.xres_virtual; + ovl->yres = info->var.yres; + ovl->yres_virtual = info->var.yres_virtual; + + if (ovl->format->yuv) + ovl->pitch = info->var.xres; + else + ovl->pitch = info->var.xres * ovl->format->bpp / 8; + + sh_mobile_lcdc_overlay_setup(ovl); + + info->fix.line_length = ovl->pitch; + + if (sh_mobile_format_is_fourcc(&info->var)) { + info->fix.type = FB_TYPE_FOURCC; + info->fix.visual = FB_VISUAL_FOURCC; + } else { + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + } + + return 0; +} + +/* Overlay blanking. Disable the overlay when blanked. */ +static int sh_mobile_lcdc_overlay_blank(int blank, struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + ovl->enabled = !blank; + sh_mobile_lcdc_overlay_setup(ovl); + + /* Prevent the backlight from receiving a blanking event by returning + * a non-zero value. + */ + return 1; +} + +static struct fb_ops sh_mobile_lcdc_overlay_ops = { + .owner = THIS_MODULE, + .fb_read = fb_sys_read, + .fb_write = fb_sys_write, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_blank = sh_mobile_lcdc_overlay_blank, + .fb_pan_display = sh_mobile_lcdc_overlay_pan, + .fb_ioctl = sh_mobile_lcdc_overlay_ioctl, + .fb_check_var = sh_mobile_lcdc_overlay_check_var, + .fb_set_par = sh_mobile_lcdc_overlay_set_par, +}; + +static void +sh_mobile_lcdc_overlay_fb_unregister(struct sh_mobile_lcdc_overlay *ovl) +{ + struct fb_info *info = ovl->info; + + if (info == NULL || info->dev == NULL) + return; + + unregister_framebuffer(ovl->info); +} + +static int __devinit +sh_mobile_lcdc_overlay_fb_register(struct sh_mobile_lcdc_overlay *ovl) +{ + struct sh_mobile_lcdc_priv *lcdc = ovl->channel->lcdc; + struct fb_info *info = ovl->info; + unsigned int i; + int ret; + + if (info == NULL) + return 0; + + ret = register_framebuffer(info); + if (ret < 0) + return ret; + + dev_info(lcdc->dev, "registered %s/overlay %u as %dx%d %dbpp.\n", + dev_name(lcdc->dev), ovl->index, info->var.xres, + info->var.yres, info->var.bits_per_pixel); + + for (i = 0; i < ARRAY_SIZE(overlay_sysfs_attrs); ++i) { + ret = device_create_file(info->dev, &overlay_sysfs_attrs[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static void +sh_mobile_lcdc_overlay_fb_cleanup(struct sh_mobile_lcdc_overlay *ovl) +{ + struct fb_info *info = ovl->info; + + if (info == NULL || info->device == NULL) + return; + + framebuffer_release(info); +} + +static int __devinit +sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl) +{ + struct sh_mobile_lcdc_priv *priv = ovl->channel->lcdc; + struct fb_var_screeninfo *var; + struct fb_info *info; + + /* Allocate and initialize the frame buffer device. */ + info = framebuffer_alloc(0, priv->dev); + if (info == NULL) { + dev_err(priv->dev, "unable to allocate fb_info\n"); + return -ENOMEM; + } + + ovl->info = info; + + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &sh_mobile_lcdc_overlay_ops; + info->device = priv->dev; + info->screen_base = ovl->fb_mem; + info->par = ovl; + + /* Initialize fixed screen information. Restrict pan to 2 lines steps + * for NV12 and NV21. + */ + info->fix = sh_mobile_lcdc_overlay_fix; + snprintf(info->fix.id, sizeof(info->fix.id), + "SH Mobile LCDC Overlay %u", ovl->index); + info->fix.smem_start = ovl->dma_handle; + info->fix.smem_len = ovl->fb_size; + info->fix.line_length = ovl->pitch; + + if (ovl->format->yuv) + info->fix.visual = FB_VISUAL_FOURCC; + else + info->fix.visual = FB_VISUAL_TRUECOLOR; + + if (ovl->format->fourcc == V4L2_PIX_FMT_NV12 || + ovl->format->fourcc == V4L2_PIX_FMT_NV21) + info->fix.ypanstep = 2; + + /* Initialize variable screen information. */ + var = &info->var; + memset(var, 0, sizeof(*var)); + var->xres = ovl->xres; + var->yres = ovl->yres; + var->xres_virtual = ovl->xres_virtual; + var->yres_virtual = ovl->yres_virtual; + var->activate = FB_ACTIVATE_NOW; + + /* Use the legacy API by default for RGB formats, and the FOURCC API + * for YUV formats. + */ + if (!ovl->format->yuv) + var->bits_per_pixel = ovl->format->bpp; + else + var->grayscale = ovl->format->fourcc; + + return sh_mobile_lcdc_overlay_check_var(var, info); +} + /* ----------------------------------------------------------------------------- - * Frame buffer operations + * Frame buffer operations - main frame buffer */ static int sh_mobile_lcdc_setcolreg(u_int regno, @@ -1202,9 +1970,7 @@ static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, unsigned int best_xres = 0; unsigned int best_yres = 0; unsigned int i; - - if (var->xres > MAX_XRES || var->yres > MAX_YRES) - return -EINVAL; + int ret; /* If board code provides us with a list of available modes, make sure * we use one of them. Find the mode closest to the requested one. The @@ -1239,73 +2005,9 @@ static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, var->yres = best_yres; } - /* Make sure the virtual resolution is at least as big as the visible - * resolution. - */ - if (var->xres_virtual < var->xres) - var->xres_virtual = var->xres; - if (var->yres_virtual < var->yres) - var->yres_virtual = var->yres; - - if (sh_mobile_format_is_fourcc(var)) { - const struct sh_mobile_lcdc_format_info *format; - - format = sh_mobile_format_info(var->grayscale); - if (format == NULL) - return -EINVAL; - var->bits_per_pixel = format->bpp; - - /* Default to RGB and JPEG color-spaces for RGB and YUV formats - * respectively. - */ - if (!format->yuv) - var->colorspace = V4L2_COLORSPACE_SRGB; - else if (var->colorspace != V4L2_COLORSPACE_REC709) - var->colorspace = V4L2_COLORSPACE_JPEG; - } else { - if (var->bits_per_pixel <= 16) { /* RGB 565 */ - var->bits_per_pixel = 16; - var->red.offset = 11; - var->red.length = 5; - var->green.offset = 5; - var->green.length = 6; - var->blue.offset = 0; - var->blue.length = 5; - var->transp.offset = 0; - var->transp.length = 0; - } else if (var->bits_per_pixel <= 24) { /* RGB 888 */ - var->bits_per_pixel = 24; - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.offset = 0; - var->transp.length = 0; - } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */ - var->bits_per_pixel = 32; - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.offset = 24; - var->transp.length = 8; - } else - return -EINVAL; - - var->red.msb_right = 0; - var->green.msb_right = 0; - var->blue.msb_right = 0; - var->transp.msb_right = 0; - } - - /* Make sure we don't exceed our allocated memory. */ - if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 > - info->fix.smem_len) - return -EINVAL; + ret = __sh_mobile_lcdc_check_var(var, info); + if (ret < 0) + return ret; /* only accept the forced_fourcc for dual channel configurations */ if (p->forced_fourcc && @@ -1714,15 +2416,27 @@ static const struct fb_videomode default_720p __devinitconst = { static int sh_mobile_lcdc_remove(struct platform_device *pdev) { struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev); - int i; + unsigned int i; fb_unregister_client(&priv->notifier); + for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) + sh_mobile_lcdc_overlay_fb_unregister(&priv->overlays[i]); for (i = 0; i < ARRAY_SIZE(priv->ch); i++) sh_mobile_lcdc_channel_fb_unregister(&priv->ch[i]); sh_mobile_lcdc_stop(priv); + for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + sh_mobile_lcdc_overlay_fb_cleanup(ovl); + + if (ovl->fb_mem) + dma_free_coherent(&pdev->dev, ovl->fb_size, + ovl->fb_mem, ovl->dma_handle); + } + for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; @@ -1797,6 +2511,61 @@ static int __devinit sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan * return 0; } +static int __devinit +sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv, + struct sh_mobile_lcdc_overlay *ovl) +{ + const struct sh_mobile_lcdc_format_info *format; + int ret; + + if (ovl->cfg->fourcc == 0) + return 0; + + /* Validate the format. */ + format = sh_mobile_format_info(ovl->cfg->fourcc); + if (format == NULL) { + dev_err(priv->dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc); + return -EINVAL; + } + + ovl->enabled = false; + ovl->mode = LCDC_OVERLAY_BLEND; + ovl->alpha = 255; + ovl->rop3 = 0; + ovl->pos_x = 0; + ovl->pos_y = 0; + + /* The default Y virtual resolution is twice the panel size to allow for + * double-buffering. + */ + ovl->format = format; + ovl->xres = ovl->cfg->max_xres; + ovl->xres_virtual = ovl->xres; + ovl->yres = ovl->cfg->max_yres; + ovl->yres_virtual = ovl->yres * 2; + + if (!format->yuv) + ovl->pitch = ovl->xres * format->bpp / 8; + else + ovl->pitch = ovl->xres; + + /* Allocate frame buffer memory. */ + ovl->fb_size = ovl->cfg->max_xres * ovl->cfg->max_yres + * format->bpp / 8 * 2; + ovl->fb_mem = dma_alloc_coherent(priv->dev, ovl->fb_size, + &ovl->dma_handle, GFP_KERNEL); + if (!ovl->fb_mem) { + dev_err(priv->dev, "unable to allocate buffer\n"); + return -ENOMEM; + } + + ret = sh_mobile_lcdc_overlay_fb_init(ovl); + if (ret < 0) + return ret; + + return 0; +} + static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, struct sh_mobile_lcdc_chan *ch) @@ -2005,6 +2774,17 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + ovl->cfg = &pdata->overlays[i]; + ovl->channel = &priv->ch[0]; + + error = sh_mobile_lcdc_overlay_init(priv, ovl); + if (error) + goto err1; + } + error = sh_mobile_lcdc_start(priv); if (error) { dev_err(&pdev->dev, "unable to start hardware\n"); @@ -2019,6 +2799,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + error = sh_mobile_lcdc_overlay_fb_register(ovl); + if (error) + goto err1; + } + /* Failure ignored */ priv->notifier.notifier_call = sh_mobile_lcdc_notify; fb_register_client(&priv->notifier); diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h index 7571b27a0ba1..ff43ffc1aab2 100644 --- a/include/video/sh_mobile_lcdc.h +++ b/include/video/sh_mobile_lcdc.h @@ -166,6 +166,12 @@ struct sh_mobile_lcdc_bl_info { int (*get_brightness)(void); }; +struct sh_mobile_lcdc_overlay_cfg { + int fourcc; + unsigned int max_xres; + unsigned int max_yres; +}; + struct sh_mobile_lcdc_chan_cfg { int chan; int fourcc; @@ -186,6 +192,7 @@ struct sh_mobile_lcdc_chan_cfg { struct sh_mobile_lcdc_info { int clock_source; struct sh_mobile_lcdc_chan_cfg ch[2]; + struct sh_mobile_lcdc_overlay_cfg overlays[4]; struct sh_mobile_meram_info *meram_dev; }; -- GitLab From 64c43dfa8636a8b7c8f40e8f93eb91aca049f8bb Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Wed, 20 Jun 2012 10:25:48 +0900 Subject: [PATCH 0019/2891] video: exynos_dp: fix build warning due to uninitialized value This patch fixes build warning due to uninitialized value dereference. drivers/video/exynos/exynos_dp_core.c: In function 'exynos_dp_set_link_train': drivers/video/exynos/exynos_dp_core.c:529:18: warning: 'reg' may be used uninitialized in this function [-Wuninitialized] drivers/video/exynos/exynos_dp_core.c:395:6: note: 'reg' was declared here Signed-off-by: Olof Johansson Signed-off-by: Jingoo Han Signed-off-by: Florian Tobias Schandinat --- drivers/video/exynos/exynos_dp_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c index a36b2d28280e..9db7b9f581e6 100644 --- a/drivers/video/exynos/exynos_dp_core.c +++ b/drivers/video/exynos/exynos_dp_core.c @@ -407,6 +407,9 @@ static unsigned int exynos_dp_get_lane_link_training( case 3: reg = exynos_dp_get_lane3_link_training(dp); break; + default: + WARN_ON(1); + return 0; } return reg; -- GitLab From db131b1024b1fe913a2c225e3792098cefa825e7 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Thu, 21 Jun 2012 18:57:23 +0900 Subject: [PATCH 0020/2891] video: exynos_dp: remove duplicated declarations from header file Some functions are declared twice in header file; thus, these declarations are unnecessary. Signed-off-by: Jingoo Han Signed-off-by: Florian Tobias Schandinat --- drivers/video/exynos/exynos_dp_core.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h index 1e0f998e0c9f..8526e548c385 100644 --- a/drivers/video/exynos/exynos_dp_core.h +++ b/drivers/video/exynos/exynos_dp_core.h @@ -85,10 +85,6 @@ void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); -void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); -void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); -void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); -void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable); void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, enum pattern_set pattern); -- GitLab From 44edebca3964824b9126c49bab1187df7df5bd89 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Wed, 13 Jun 2012 09:47:31 +0200 Subject: [PATCH 0021/2891] video: backlight: remove unused header Commit 9befe40f6e018e508b047eb76d189ede9b4ff03d ("video: backlight: support s6e8ax0 panel driver based on MIPI DSI") added s6e8ax0.h, but no file includes it. That's probably a good thing, because it declares an extern void function that is defined static int in s6e8ax0.c. Besides, that function is also wrapped in the module_init() macro, which should do everything needed to make that function available to the code outside of s6e8ax0.c. This header can safely be removed. Signed-off-by: Paul Bolle Acked-by: Donghwa Lee Signed-off-by: Florian Tobias Schandinat --- drivers/video/exynos/s6e8ax0.h | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 drivers/video/exynos/s6e8ax0.h diff --git a/drivers/video/exynos/s6e8ax0.h b/drivers/video/exynos/s6e8ax0.h deleted file mode 100644 index 1f1b270484b0..000000000000 --- a/drivers/video/exynos/s6e8ax0.h +++ /dev/null @@ -1,21 +0,0 @@ -/* linux/drivers/video/backlight/s6e8ax0.h - * - * MIPI-DSI based s6e8ax0 AMOLED LCD Panel definitions. - * - * Copyright (c) 2011 Samsung Electronics - * - * Inki Dae, - * Donghwa Lee - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef _S6E8AX0_H -#define _S6E8AX0_H - -extern void s6e8ax0_init(void); - -#endif - -- GitLab From 13589864be74736ca4e6def7376742eb1d2099bf Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:51 -0700 Subject: [PATCH 0022/2891] blkcg: __blkg_lookup_create() doesn't need radix preload There's no point in calling radix_tree_preload() if preloading doesn't use more permissible GFP mask. Drop preloading from __blkg_lookup_create(). While at it, drop sparse locking annotation which no longer applies. v2: Vivek pointed out the odd preload usage. Instead of updating, just drop it. Signed-off-by: Tejun Heo Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index e7dee617358e..c3882bbbf0fc 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -177,7 +177,6 @@ EXPORT_SYMBOL_GPL(blkg_lookup); static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) { struct blkcg_gq *blkg; int ret; @@ -203,10 +202,6 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, goto err_put; /* insert */ - ret = radix_tree_preload(GFP_ATOMIC); - if (ret) - goto err_free; - spin_lock(&blkcg->lock); ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); if (likely(!ret)) { @@ -215,14 +210,11 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, } spin_unlock(&blkcg->lock); - radix_tree_preload_end(); - if (!ret) return blkg; -err_free: - blkg_free(blkg); err_put: css_put(&blkcg->css); + blkg_free(blkg); return ERR_PTR(ret); } -- GitLab From 159749937a3e1605068a454b1607cdc5714f16e6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:52 -0700 Subject: [PATCH 0023/2891] blkcg: make root blkcg allocation use %GFP_KERNEL Currently, blkcg_activate_policy() depends on %GFP_ATOMIC allocation from __blkg_lookup_create() for root blkcg creation. This could make policy fail unnecessarily. Make blkg_alloc() take @gfp_mask, __blkg_lookup_create() take an optional @new_blkg for preallocated blkg, and blkcg_activate_policy() preload radix tree and preallocate blkg with %GFP_KERNEL before trying to create the root blkg. v2: __blkg_lookup_create() was returning %NULL on blkg alloc failure instead of ERR_PTR() value. Fixed. Signed-off-by: Tejun Heo Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 59 +++++++++++++++++++++++++++++++++------------- 1 file changed, 43 insertions(+), 16 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c3882bbbf0fc..96248d2578db 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -91,16 +91,18 @@ static void blkg_free(struct blkcg_gq *blkg) * blkg_alloc - allocate a blkg * @blkcg: block cgroup the new blkg is associated with * @q: request_queue the new blkg is associated with + * @gfp_mask: allocation mask to use * * Allocate a new blkg assocating @blkcg and @q. */ -static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) +static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, + gfp_t gfp_mask) { struct blkcg_gq *blkg; int i; /* alloc and init base part */ - blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); + blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); if (!blkg) return NULL; @@ -117,7 +119,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) continue; /* alloc per-policy data and attach it to blkg */ - pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); + pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); if (!pd) { blkg_free(blkg); return NULL; @@ -175,8 +177,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) } EXPORT_SYMBOL_GPL(blkg_lookup); +/* + * If @new_blkg is %NULL, this function tries to allocate a new one as + * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. + */ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) + struct request_queue *q, + struct blkcg_gq *new_blkg) { struct blkcg_gq *blkg; int ret; @@ -188,18 +195,24 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, blkg = __blkg_lookup(blkcg, q); if (blkg) { rcu_assign_pointer(blkcg->blkg_hint, blkg); - return blkg; + goto out_free; } /* blkg holds a reference to blkcg */ - if (!css_tryget(&blkcg->css)) - return ERR_PTR(-EINVAL); + if (!css_tryget(&blkcg->css)) { + blkg = ERR_PTR(-EINVAL); + goto out_free; + } /* allocate */ - ret = -ENOMEM; - blkg = blkg_alloc(blkcg, q); - if (unlikely(!blkg)) - goto err_put; + if (!new_blkg) { + new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); + if (unlikely(!new_blkg)) { + blkg = ERR_PTR(-ENOMEM); + goto out_put; + } + } + blkg = new_blkg; /* insert */ spin_lock(&blkcg->lock); @@ -212,10 +225,13 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, if (!ret) return blkg; -err_put: + + blkg = ERR_PTR(ret); +out_put: css_put(&blkcg->css); - blkg_free(blkg); - return ERR_PTR(ret); +out_free: + blkg_free(new_blkg); + return blkg; } struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, @@ -227,7 +243,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, */ if (unlikely(blk_queue_bypass(q))) return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); - return __blkg_lookup_create(blkcg, q); + return __blkg_lookup_create(blkcg, q, NULL); } EXPORT_SYMBOL_GPL(blkg_lookup_create); @@ -726,19 +742,30 @@ int blkcg_activate_policy(struct request_queue *q, struct blkcg_gq *blkg; struct blkg_policy_data *pd, *n; int cnt = 0, ret; + bool preloaded; if (blkcg_policy_enabled(q, pol)) return 0; + /* preallocations for root blkg */ + blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); + if (!blkg) + return -ENOMEM; + + preloaded = !radix_tree_preload(GFP_KERNEL); + blk_queue_bypass_start(q); /* make sure the root blkg exists and count the existing blkgs */ spin_lock_irq(q->queue_lock); rcu_read_lock(); - blkg = __blkg_lookup_create(&blkcg_root, q); + blkg = __blkg_lookup_create(&blkcg_root, q, blkg); rcu_read_unlock(); + if (preloaded) + radix_tree_preload_end(); + if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); goto out_unlock; -- GitLab From a91a5ac6858fbf7477131e1210cb3e897b668e6f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:53 -0700 Subject: [PATCH 0024/2891] mempool: add @gfp_mask to mempool_create_node() mempool_create_node() currently assumes %GFP_KERNEL. Its only user, blk_init_free_list(), is about to be updated to use other allocation flags - add @gfp_mask argument to the function. Signed-off-by: Tejun Heo Cc: Andrew Morton Cc: Hugh Dickins Signed-off-by: Jens Axboe --- block/blk-core.c | 4 ++-- include/linux/mempool.h | 3 ++- mm/mempool.c | 12 +++++++----- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 93eb3e4f88ce..64f9a8668253 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -531,8 +531,8 @@ static int blk_init_free_list(struct request_queue *q) init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, - mempool_free_slab, request_cachep, q->node); - + mempool_free_slab, request_cachep, + GFP_KERNEL, q->node); if (!rl->rq_pool) return -ENOMEM; diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 7c08052e3321..39ed62ab5b8a 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -26,7 +26,8 @@ typedef struct mempool_s { extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, int nid); + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int nid); extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); extern void mempool_destroy(mempool_t *pool); diff --git a/mm/mempool.c b/mm/mempool.c index d9049811f352..54990476c049 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -63,19 +63,21 @@ EXPORT_SYMBOL(mempool_destroy); mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { - return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1); + return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, + GFP_KERNEL, NUMA_NO_NODE); } EXPORT_SYMBOL(mempool_create); mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, int node_id) + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id) { mempool_t *pool; - pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); + pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); if (!pool) return NULL; pool->elements = kmalloc_node(min_nr * sizeof(void *), - GFP_KERNEL, node_id); + gfp_mask, node_id); if (!pool->elements) { kfree(pool); return NULL; @@ -93,7 +95,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, while (pool->curr_nr < pool->min_nr) { void *element; - element = pool->alloc(GFP_KERNEL, pool->pool_data); + element = pool->alloc(gfp_mask, pool->pool_data); if (unlikely(!element)) { mempool_destroy(pool); return NULL; -- GitLab From 86072d8112595ea1b6beeb33f578e7c2839e014e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:54 -0700 Subject: [PATCH 0025/2891] block: drop custom queue draining used by scsi_transport_{iscsi|fc} iscsi_remove_host() uses bsg_remove_queue() which implements custom queue draining. fc_bsg_remove() open-codes mostly identical logic. The draining logic isn't correct in that blk_stop_queue() doesn't prevent new requests from being queued - it just stops processing, so nothing prevents new requests to be queued after the logic determines that the queue is drained. blk_cleanup_queue() now implements proper queue draining and these custom draining logics aren't necessary. Drop them and use bsg_unregister_queue() + blk_cleanup_queue() instead. Signed-off-by: Tejun Heo Reviewed-by: Mike Christie Acked-by: Vivek Goyal Cc: James Bottomley Cc: James Smart Signed-off-by: Jens Axboe --- block/bsg-lib.c | 53 ----------------------------- drivers/scsi/scsi_transport_fc.c | 38 --------------------- drivers/scsi/scsi_transport_iscsi.c | 2 +- include/linux/bsg-lib.h | 1 - 4 files changed, 1 insertion(+), 93 deletions(-) diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 7ad49c88f6b1..deee61fbb741 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -243,56 +243,3 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q, return 0; } EXPORT_SYMBOL_GPL(bsg_setup_queue); - -/** - * bsg_remove_queue - Deletes the bsg dev from the q - * @q: the request_queue that is to be torn down. - * - * Notes: - * Before unregistering the queue empty any requests that are blocked - */ -void bsg_remove_queue(struct request_queue *q) -{ - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - - if (!q) - return; - - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); -} -EXPORT_SYMBOL_GPL(bsg_remove_queue); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 579760420d53..a9617ad05f33 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -4130,45 +4130,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) static void fc_bsg_remove(struct request_queue *q) { - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - if (q) { - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); blk_cleanup_queue(q); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 1cf640e575da..c737a16b0a1d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -575,7 +575,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct iscsi_cls_host *ihost = shost->shost_data; if (ihost->bsg_q) { - bsg_remove_queue(ihost->bsg_q); + bsg_unregister_queue(ihost->bsg_q); blk_cleanup_queue(ihost->bsg_q); } return 0; diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index f55ab8cdc106..4d0fb3df2f4a 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -67,7 +67,6 @@ void bsg_job_done(struct bsg_job *job, int result, int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, bsg_job_fn *job_fn, int dd_job_size); void bsg_request_fn(struct request_queue *q); -void bsg_remove_queue(struct request_queue *q); void bsg_goose_queue(struct request_queue *q); #endif -- GitLab From a06e05e6afab70b4b23c0a7975aaeae24b195cd6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:55 -0700 Subject: [PATCH 0026/2891] block: refactor get_request[_wait]() Currently, there are two request allocation functions - get_request() and get_request_wait(). The former tries to allocate a request once and the latter keeps retrying until it succeeds. The latter wraps the former and keeps retrying until allocation succeeds. The combination of two functions deliver fallible non-wait allocation, fallible wait allocation and unfailing wait allocation. However, given that forward progress is guaranteed, fallible wait allocation isn't all that useful and in fact nobody uses it. This patch simplifies the interface as follows. * get_request() is renamed to __get_request() and is only used by the wrapper function. * get_request_wait() is renamed to get_request(). It now takes @gfp_mask and retries iff it contains %__GFP_WAIT. This patch doesn't introduce any functional change and is to prepare for further updates to request allocation path. Signed-off-by: Tejun Heo Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-core.c | 74 +++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 39 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 64f9a8668253..080204a10fcf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -837,7 +837,7 @@ static struct io_context *rq_ioc(struct bio *bio) } /** - * get_request - get a free request + * __get_request - get a free request * @q: request_queue to allocate request from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) @@ -850,8 +850,8 @@ static struct io_context *rq_ioc(struct bio *bio) * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request(struct request_queue *q, int rw_flags, - struct bio *bio, gfp_t gfp_mask) +static struct request *__get_request(struct request_queue *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { struct request *rq; struct request_list *rl = &q->rq; @@ -1029,56 +1029,55 @@ rq_starved: } /** - * get_request_wait - get a free request with retry + * get_request - get a free request * @q: request_queue to allocate request from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask * - * Get a free request from @q. This function keeps retrying under memory - * pressure and fails iff @q is dead. + * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this + * function keeps retrying under memory pressure and fails iff @q is dead. * * Must be callled with @q->queue_lock held and, * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request_wait(struct request_queue *q, int rw_flags, - struct bio *bio) +static struct request *get_request(struct request_queue *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { const bool is_sync = rw_is_sync(rw_flags) != 0; + DEFINE_WAIT(wait); + struct request_list *rl = &q->rq; struct request *rq; +retry: + rq = __get_request(q, rw_flags, bio, gfp_mask); + if (rq) + return rq; - rq = get_request(q, rw_flags, bio, GFP_NOIO); - while (!rq) { - DEFINE_WAIT(wait); - struct request_list *rl = &q->rq; - - if (unlikely(blk_queue_dead(q))) - return NULL; - - prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, - TASK_UNINTERRUPTIBLE); + if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) + return NULL; - trace_block_sleeprq(q, bio, rw_flags & 1); + /* wait on @rl and retry */ + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, + TASK_UNINTERRUPTIBLE); - spin_unlock_irq(q->queue_lock); - io_schedule(); + trace_block_sleeprq(q, bio, rw_flags & 1); - /* - * After sleeping, we become a "batching" process and - * will be able to allocate at least one request, and - * up to a big batch of them for a small period time. - * See ioc_batching, ioc_set_batching - */ - create_io_context(GFP_NOIO, q->node); - ioc_set_batching(q, current->io_context); + spin_unlock_irq(q->queue_lock); + io_schedule(); - spin_lock_irq(q->queue_lock); - finish_wait(&rl->wait[is_sync], &wait); + /* + * After sleeping, we become a "batching" process and will be able + * to allocate at least one request, and up to a big batch of them + * for a small period time. See ioc_batching, ioc_set_batching + */ + create_io_context(GFP_NOIO, q->node); + ioc_set_batching(q, current->io_context); - rq = get_request(q, rw_flags, bio, GFP_NOIO); - }; + spin_lock_irq(q->queue_lock); + finish_wait(&rl->wait[is_sync], &wait); - return rq; + goto retry; } struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) @@ -1088,10 +1087,7 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) BUG_ON(rw != READ && rw != WRITE); spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) - rq = get_request_wait(q, rw, NULL); - else - rq = get_request(q, rw, NULL, gfp_mask); + rq = get_request(q, rw, NULL, gfp_mask); if (!rq) spin_unlock_irq(q->queue_lock); /* q->queue_lock is unlocked at this point */ @@ -1481,7 +1477,7 @@ get_rq: * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, rw_flags, bio); + req = get_request(q, rw_flags, bio, GFP_NOIO); if (unlikely(!req)) { bio_endio(bio, -ENODEV); /* @q is dead */ goto out_unlock; -- GitLab From 7f4b35d155a5f9e5748539a79558533aa08d6a81 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:56 -0700 Subject: [PATCH 0027/2891] block: allocate io_context upfront Block layer very lazy allocation of ioc. It waits until the moment ioc is absolutely necessary; unfortunately, that time could be inside queue lock and __get_request() performs unlock - try alloc - retry dancing. Just allocate it up-front on entry to block layer. We're not saving the rain forest by deferring it to the last possible moment and complicating things unnecessarily. This patch is to prepare for further updates to request allocation path. Signed-off-by: Tejun Heo Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-core.c | 42 +++++++++++++++--------------------------- block/blk-throttle.c | 3 --- 2 files changed, 15 insertions(+), 30 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 080204a10fcf..71894e143b91 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -855,15 +855,11 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, { struct request *rq; struct request_list *rl = &q->rq; - struct elevator_type *et; - struct io_context *ioc; + struct elevator_type *et = q->elevator->type; + struct io_context *ioc = rq_ioc(bio); struct io_cq *icq = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; - bool retried = false; int may_queue; -retry: - et = q->elevator->type; - ioc = rq_ioc(bio); if (unlikely(blk_queue_dead(q))) return NULL; @@ -874,20 +870,6 @@ retry: if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { - /* - * We want ioc to record batching state. If it's - * not already there, creating a new one requires - * dropping queue_lock, which in turn requires - * retesting conditions to avoid queue hang. - */ - if (!ioc && !retried) { - spin_unlock_irq(q->queue_lock); - create_io_context(gfp_mask, q->node); - spin_lock_irq(q->queue_lock); - retried = true; - goto retry; - } - /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". @@ -955,12 +937,8 @@ retry: /* init elvpriv */ if (rw_flags & REQ_ELVPRIV) { if (unlikely(et->icq_cache && !icq)) { - create_io_context(gfp_mask, q->node); - ioc = rq_ioc(bio); - if (!ioc) - goto fail_elvpriv; - - icq = ioc_create_icq(ioc, q, gfp_mask); + if (ioc) + icq = ioc_create_icq(ioc, q, gfp_mask); if (!icq) goto fail_elvpriv; } @@ -1071,7 +1049,6 @@ retry: * to allocate at least one request, and up to a big batch of them * for a small period time. See ioc_batching, ioc_set_batching */ - create_io_context(GFP_NOIO, q->node); ioc_set_batching(q, current->io_context); spin_lock_irq(q->queue_lock); @@ -1086,6 +1063,9 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) BUG_ON(rw != READ && rw != WRITE); + /* create ioc upfront */ + create_io_context(gfp_mask, q->node); + spin_lock_irq(q->queue_lock); rq = get_request(q, rw, NULL, gfp_mask); if (!rq) @@ -1698,6 +1678,14 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + /* + * Various block parts want %current->io_context and lazy ioc + * allocation ends up trading a lot of pain for a small amount of + * memory. Just allocate it upfront. This may fail and block + * layer knows how to live with it. + */ + create_io_context(GFP_ATOMIC, q->node); + if (blk_throtl_bio(q, bio)) return false; /* throttled, will be resubmitted later */ diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5b0659512047..e287c19908c8 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) goto out; } - /* bio_associate_current() needs ioc, try creating */ - create_io_context(GFP_ATOMIC, q->node); - /* * A throtl_grp pointer retrieved under rcu can be used to access * basic fields like stats and io rates. If a group has no rules, -- GitLab From b1208b56f31408f7d8381ff5d08e970aa5ee761c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:57 -0700 Subject: [PATCH 0028/2891] blkcg: inline bio_blkcg() and friends Make bio_blkcg() and friends inline. They all are very simple and used only in few places. This patch is to prepare for further updates to request allocation path. Signed-off-by: Tejun Heo Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 21 --------------------- block/blk-cgroup.h | 26 ++++++++++++++++++++++---- 2 files changed, 22 insertions(+), 25 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 96248d2578db..63b31ebae6e2 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -31,27 +31,6 @@ EXPORT_SYMBOL_GPL(blkcg_root); static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; -struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) -{ - return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), - struct blkcg, css); -} -EXPORT_SYMBOL_GPL(cgroup_to_blkcg); - -static struct blkcg *task_blkcg(struct task_struct *tsk) -{ - return container_of(task_subsys_state(tsk, blkio_subsys_id), - struct blkcg, css); -} - -struct blkcg *bio_blkcg(struct bio *bio) -{ - if (bio && bio->bi_css) - return container_of(bio->bi_css, struct blkcg, css); - return task_blkcg(current); -} -EXPORT_SYMBOL_GPL(bio_blkcg); - static bool blkcg_policy_enabled(struct request_queue *q, const struct blkcg_policy *pol) { diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 8ac457ce7783..e74cce1fbac9 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -120,8 +120,6 @@ struct blkcg_policy { extern struct blkcg blkcg_root; -struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup); -struct blkcg *bio_blkcg(struct bio *bio); struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); @@ -160,6 +158,25 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, void blkg_conf_finish(struct blkg_conf_ctx *ctx); +static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), + struct blkcg, css); +} + +static inline struct blkcg *task_blkcg(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, blkio_subsys_id), + struct blkcg, css); +} + +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_css) + return container_of(bio->bi_css, struct blkcg, css); + return task_blkcg(current); +} + /** * blkg_to_pdata - get policy private data * @blkg: blkg of interest @@ -351,6 +368,7 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) #else /* CONFIG_BLK_CGROUP */ struct cgroup; +struct blkcg; struct blkg_policy_data { }; @@ -361,8 +379,6 @@ struct blkcg_gq { struct blkcg_policy { }; -static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } -static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } static inline int blkcg_init_queue(struct request_queue *q) { return 0; } static inline void blkcg_drain_queue(struct request_queue *q) { } @@ -374,6 +390,8 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } +static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } +static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) { return NULL; } static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } -- GitLab From 8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:58 -0700 Subject: [PATCH 0029/2891] block: add q->nr_rqs[] and move q->rq.elvpriv to q->nr_rqs_elvpriv Add q->nr_rqs[] which currently behaves the same as q->rq.count[] and move q->rq.elvpriv to q->nr_rqs_elvpriv. blk_drain_queue() is updated to use q->nr_rqs[] instead of q->rq.count[]. These counters separates queue-wide request statistics from the request list and allow implementation of per-queue request allocation. While at it, properly indent fields of struct request_list. Signed-off-by: Tejun Heo Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-core.c | 13 +++++++------ include/linux/blkdev.h | 11 ++++++----- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 71894e143b91..a2648153691f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); - drain |= q->rq.elvpriv; + drain |= q->nr_rqs_elvpriv; /* * Unfortunately, requests are queued at and tracked from @@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (drain_all) { drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { - drain |= q->rq.count[i]; + drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; drain |= !list_empty(&q->flush_queue[i]); } @@ -526,7 +526,6 @@ static int blk_init_free_list(struct request_queue *q) rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; - rl->elvpriv = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); @@ -791,9 +790,10 @@ static void freed_request(struct request_queue *q, unsigned int flags) struct request_list *rl = &q->rq; int sync = rw_is_sync(flags); + q->nr_rqs[sync]--; rl->count[sync]--; if (flags & REQ_ELVPRIV) - rl->elvpriv--; + q->nr_rqs_elvpriv--; __freed_request(q, sync); @@ -902,6 +902,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) return NULL; + q->nr_rqs[is_sync]++; rl->count[is_sync]++; rl->starved[is_sync] = 0; @@ -917,7 +918,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, */ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { rw_flags |= REQ_ELVPRIV; - rl->elvpriv++; + q->nr_rqs_elvpriv++; if (et->icq_cache && ioc) icq = ioc_lookup_icq(ioc, q); } @@ -978,7 +979,7 @@ fail_elvpriv: rq->elv.icq = NULL; spin_lock_irq(q->queue_lock); - rl->elvpriv--; + q->nr_rqs_elvpriv--; spin_unlock_irq(q->queue_lock); goto out; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 07954b05b86c..7e44ed93f84b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -51,11 +51,10 @@ struct request_list { * count[], starved[], and wait[] are indexed by * BLK_RW_SYNC/BLK_RW_ASYNC */ - int count[2]; - int starved[2]; - int elvpriv; - mempool_t *rq_pool; - wait_queue_head_t wait[2]; + int count[2]; + int starved[2]; + mempool_t *rq_pool; + wait_queue_head_t wait[2]; }; /* @@ -282,6 +281,8 @@ struct request_queue { struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; + int nr_rqs[2]; /* # allocated [a]sync rqs */ + int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ /* * the queue request freelist, one for reads and one for writes -- GitLab From 5b788ce3e2acac9bf109743b1281d77347cf2101 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:59 -0700 Subject: [PATCH 0030/2891] block: prepare for multiple request_lists Request allocation is about to be made per-blkg meaning that there'll be multiple request lists. * Make queue full state per request_list. blk_*queue_full() functions are renamed to blk_*rl_full() and takes @rl instead of @q. * Rename blk_init_free_list() to blk_init_rl() and make it take @rl instead of @q. Also add @gfp_mask parameter. * Add blk_exit_rl() instead of destroying rl directly from blk_release_queue(). * Add request_list->q and make request alloc/free functions - blk_free_request(), [__]freed_request(), __get_request() - take @rl instead of @q. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-core.c | 56 +++++++++++++++++++++++------------------- block/blk-sysfs.c | 12 ++++----- block/blk.h | 3 +++ include/linux/blkdev.h | 32 +++++++++++++----------- 4 files changed, 57 insertions(+), 46 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index a2648153691f..f392a2edf462 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -517,13 +517,13 @@ void blk_cleanup_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_cleanup_queue); -static int blk_init_free_list(struct request_queue *q) +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask) { - struct request_list *rl = &q->rq; - if (unlikely(rl->rq_pool)) return 0; + rl->q = q; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); @@ -531,13 +531,19 @@ static int blk_init_free_list(struct request_queue *q) rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep, - GFP_KERNEL, q->node); + gfp_mask, q->node); if (!rl->rq_pool) return -ENOMEM; return 0; } +void blk_exit_rl(struct request_list *rl) +{ + if (rl->rq_pool) + mempool_destroy(rl->rq_pool); +} + struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); @@ -679,7 +685,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, if (!q) return NULL; - if (blk_init_free_list(q)) + if (blk_init_rl(&q->rq, q, GFP_KERNEL)) return NULL; q->request_fn = rfn; @@ -721,15 +727,15 @@ bool blk_get_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_get_queue); -static inline void blk_free_request(struct request_queue *q, struct request *rq) +static inline void blk_free_request(struct request_list *rl, struct request *rq) { if (rq->cmd_flags & REQ_ELVPRIV) { - elv_put_request(q, rq); + elv_put_request(rl->q, rq); if (rq->elv.icq) put_io_context(rq->elv.icq->ioc); } - mempool_free(rq, q->rq.rq_pool); + mempool_free(rq, rl->rq_pool); } /* @@ -766,9 +772,9 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) ioc->last_waited = jiffies; } -static void __freed_request(struct request_queue *q, int sync) +static void __freed_request(struct request_list *rl, int sync) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; if (rl->count[sync] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, sync); @@ -777,7 +783,7 @@ static void __freed_request(struct request_queue *q, int sync) if (waitqueue_active(&rl->wait[sync])) wake_up(&rl->wait[sync]); - blk_clear_queue_full(q, sync); + blk_clear_rl_full(rl, sync); } } @@ -785,9 +791,9 @@ static void __freed_request(struct request_queue *q, int sync) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(struct request_queue *q, unsigned int flags) +static void freed_request(struct request_list *rl, unsigned int flags) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; int sync = rw_is_sync(flags); q->nr_rqs[sync]--; @@ -795,10 +801,10 @@ static void freed_request(struct request_queue *q, unsigned int flags) if (flags & REQ_ELVPRIV) q->nr_rqs_elvpriv--; - __freed_request(q, sync); + __freed_request(rl, sync); if (unlikely(rl->starved[sync ^ 1])) - __freed_request(q, sync ^ 1); + __freed_request(rl, sync ^ 1); } /* @@ -838,7 +844,7 @@ static struct io_context *rq_ioc(struct bio *bio) /** * __get_request - get a free request - * @q: request_queue to allocate request from + * @rl: request list to allocate from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) * @gfp_mask: allocation mask @@ -850,11 +856,11 @@ static struct io_context *rq_ioc(struct bio *bio) * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *__get_request(struct request_queue *q, int rw_flags, +static struct request *__get_request(struct request_list *rl, int rw_flags, struct bio *bio, gfp_t gfp_mask) { + struct request_queue *q = rl->q; struct request *rq; - struct request_list *rl = &q->rq; struct elevator_type *et = q->elevator->type; struct io_context *ioc = rq_ioc(bio); struct io_cq *icq = NULL; @@ -876,9 +882,9 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, * This process will be allowed to complete a batch of * requests, others will be blocked. */ - if (!blk_queue_full(q, is_sync)) { + if (!blk_rl_full(rl, is_sync)) { ioc_set_batching(q, ioc); - blk_set_queue_full(q, is_sync); + blk_set_rl_full(rl, is_sync); } else { if (may_queue != ELV_MQUEUE_MUST && !ioc_batching(q, ioc)) { @@ -928,7 +934,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, spin_unlock_irq(q->queue_lock); /* allocate and init request */ - rq = mempool_alloc(q->rq.rq_pool, gfp_mask); + rq = mempool_alloc(rl->rq_pool, gfp_mask); if (!rq) goto fail_alloc; @@ -992,7 +998,7 @@ fail_alloc: * queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); - freed_request(q, rw_flags); + freed_request(rl, rw_flags); /* * in the very unlikely event that allocation failed and no @@ -1029,7 +1035,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, struct request_list *rl = &q->rq; struct request *rq; retry: - rq = __get_request(q, rw_flags, bio, gfp_mask); + rq = __get_request(&q->rq, rw_flags, bio, gfp_mask); if (rq) return rq; @@ -1229,8 +1235,8 @@ void __blk_put_request(struct request_queue *q, struct request *req) BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); - blk_free_request(q, req); - freed_request(q, flags); + blk_free_request(&q->rq, req); + freed_request(&q->rq, flags); } } EXPORT_SYMBOL_GPL(__blk_put_request); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index aa41b47c22d2..234ce7c082fa 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -66,16 +66,16 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) blk_clear_queue_congested(q, BLK_RW_ASYNC); if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_SYNC); + blk_set_rl_full(rl, BLK_RW_SYNC); } else { - blk_clear_queue_full(q, BLK_RW_SYNC); + blk_clear_rl_full(rl, BLK_RW_SYNC); wake_up(&rl->wait[BLK_RW_SYNC]); } if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_ASYNC); + blk_set_rl_full(rl, BLK_RW_ASYNC); } else { - blk_clear_queue_full(q, BLK_RW_ASYNC); + blk_clear_rl_full(rl, BLK_RW_ASYNC); wake_up(&rl->wait[BLK_RW_ASYNC]); } spin_unlock_irq(q->queue_lock); @@ -476,7 +476,6 @@ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); - struct request_list *rl = &q->rq; blk_sync_queue(q); @@ -489,8 +488,7 @@ static void blk_release_queue(struct kobject *kobj) elevator_exit(q->elevator); } - if (rl->rq_pool) - mempool_destroy(rl->rq_pool); + blk_exit_rl(&q->rq); if (q->queue_tags) __blk_queue_free_tags(q); diff --git a/block/blk.h b/block/blk.h index 85f6ae42f7d3..a134231fd22a 100644 --- a/block/blk.h +++ b/block/blk.h @@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask); +void blk_exit_rl(struct request_list *rl); void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7e44ed93f84b..f2385ee7c7b2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -46,7 +46,12 @@ struct blkcg_gq; struct request; typedef void (rq_end_io_fn)(struct request *, int); +#define BLK_RL_SYNCFULL (1U << 0) +#define BLK_RL_ASYNCFULL (1U << 1) + struct request_list { + struct request_queue *q; /* the queue this rl belongs to */ + /* * count[], starved[], and wait[] are indexed by * BLK_RW_SYNC/BLK_RW_ASYNC @@ -55,6 +60,7 @@ struct request_list { int starved[2]; mempool_t *rq_pool; wait_queue_head_t wait[2]; + unsigned int flags; }; /* @@ -562,27 +568,25 @@ static inline bool rq_is_sync(struct request *rq) return rw_is_sync(rq->cmd_flags); } -static inline int blk_queue_full(struct request_queue *q, int sync) +static inline bool blk_rl_full(struct request_list *rl, bool sync) { - if (sync) - return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); - return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + return rl->flags & flag; } -static inline void blk_set_queue_full(struct request_queue *q, int sync) +static inline void blk_set_rl_full(struct request_list *rl, bool sync) { - if (sync) - queue_flag_set(QUEUE_FLAG_SYNCFULL, q); - else - queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags |= flag; } -static inline void blk_clear_queue_full(struct request_queue *q, int sync) +static inline void blk_clear_rl_full(struct request_list *rl, bool sync) { - if (sync) - queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); - else - queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags &= ~flag; } -- GitLab From a051661ca6d134c18599498b185b667859d4339b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 26 Jun 2012 15:05:44 -0700 Subject: [PATCH 0031/2891] blkcg: implement per-blkg request allocation Currently, request_queue has one request_list to allocate requests from regardless of blkcg of the IO being issued. When the unified request pool is used up, cfq proportional IO limits become meaningless - whoever grabs the next request being freed wins the race regardless of the configured weights. This can be easily demonstrated by creating a blkio cgroup w/ very low weight, put a program which can issue a lot of random direct IOs there and running a sequential IO from a different cgroup. As soon as the request pool is used up, the sequential IO bandwidth crashes. This patch implements per-blkg request_list. Each blkg has its own request_list and any IO allocates its request from the matching blkg making blkcgs completely isolated in terms of request allocation. * Root blkcg uses the request_list embedded in each request_queue, which was renamed to @q->root_rl from @q->rq. While making blkcg rl handling a bit harier, this enables avoiding most overhead for root blkcg. * Queue fullness is properly per request_list but bdi isn't blkcg aware yet, so congestion state currently just follows the root blkcg. As writeback isn't aware of blkcg yet, this works okay for async congestion but readahead may get the wrong signals. It's better than blkcg completely collapsing with shared request_list but needs to be improved with future changes. * After this change, each block cgroup gets a full request pool making resource consumption of each cgroup higher. This makes allowing non-root users to create cgroups less desirable; however, note that allowing non-root users to directly manage cgroups is already severely broken regardless of this patch - each block cgroup consumes kernel memory and skews IO weight (IO weights are not hierarchical). v2: queue-sysfs.txt updated and patch description udpated as suggested by Vivek. v3: blk_get_rl() wasn't checking error return from blkg_lookup_create() and may cause oops on lookup failure. Fix it by falling back to root_rl on blkg lookup failures. This problem was spotted by Rakesh Iyer . v4: Updated to accomodate 458f27a982 "block: Avoid missed wakeup in request waitqueue". blk_drain_queue() now wakes up waiters on all blkg->rl on the target queue. Signed-off-by: Tejun Heo Acked-by: Vivek Goyal Cc: Wu Fengguang Signed-off-by: Jens Axboe --- Documentation/block/queue-sysfs.txt | 7 ++ block/blk-cgroup.c | 51 ++++++++++++-- block/blk-cgroup.h | 102 ++++++++++++++++++++++++++++ block/blk-core.c | 42 +++++++++--- block/blk-sysfs.c | 32 +++++---- include/linux/blkdev.h | 12 +++- 6 files changed, 216 insertions(+), 30 deletions(-) diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index d8147b336c35..6518a55273e7 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -38,6 +38,13 @@ read or write requests. Note that the total allocated number may be twice this amount, since it applies only to reads or writes (not the accumulated sum). +To avoid priority inversion through request starvation, a request +queue maintains a separate request pool per each cgroup when +CONFIG_BLK_CGROUP is enabled, and this parameter applies to each such +per-block-cgroup request pool. IOW, if there are N block cgroups, +each request queue may have upto N request pools, each independently +regulated by nr_requests. + read_ahead_kb (RW) ------------------ Maximum number of kilobytes to read-ahead for filesystems on this block diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 63b31ebae6e2..f3b44a65fc7a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -63,6 +63,7 @@ static void blkg_free(struct blkcg_gq *blkg) kfree(pd); } + blk_exit_rl(&blkg->rl); kfree(blkg); } @@ -90,6 +91,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, blkg->blkcg = blkcg; blkg->refcnt = 1; + /* root blkg uses @q->root_rl, init rl only for !root blkgs */ + if (blkcg != &blkcg_root) { + if (blk_init_rl(&blkg->rl, q, gfp_mask)) + goto err_free; + blkg->rl.blkg = blkg; + } + for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkg_policy_data *pd; @@ -99,10 +107,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, /* alloc per-policy data and attach it to blkg */ pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); - if (!pd) { - blkg_free(blkg); - return NULL; - } + if (!pd) + goto err_free; blkg->pd[i] = pd; pd->blkg = blkg; @@ -113,6 +119,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, } return blkg; + +err_free: + blkg_free(blkg); + return NULL; } static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, @@ -300,6 +310,38 @@ void __blkg_release(struct blkcg_gq *blkg) } EXPORT_SYMBOL_GPL(__blkg_release); +/* + * The next function used by blk_queue_for_each_rl(). It's a bit tricky + * because the root blkg uses @q->root_rl instead of its own rl. + */ +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q) +{ + struct list_head *ent; + struct blkcg_gq *blkg; + + /* + * Determine the current blkg list_head. The first entry is + * root_rl which is off @q->blkg_list and mapped to the head. + */ + if (rl == &q->root_rl) { + ent = &q->blkg_list; + } else { + blkg = container_of(rl, struct blkcg_gq, rl); + ent = &blkg->q_node; + } + + /* walk to the next list_head, skip root blkcg */ + ent = ent->next; + if (ent == &q->root_blkg->q_node) + ent = ent->next; + if (ent == &q->blkg_list) + return NULL; + + blkg = container_of(ent, struct blkcg_gq, q_node); + return &blkg->rl; +} + static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) { @@ -750,6 +792,7 @@ int blkcg_activate_policy(struct request_queue *q, goto out_unlock; } q->root_blkg = blkg; + q->root_rl.blkg = blkg; list_for_each_entry(blkg, &q->blkg_list, q_node) cnt++; diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index e74cce1fbac9..24597309e23d 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -17,6 +17,7 @@ #include #include #include +#include /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX @@ -93,6 +94,8 @@ struct blkcg_gq { struct list_head q_node; struct hlist_node blkcg_node; struct blkcg *blkcg; + /* request allocation list for this blkcg-q pair */ + struct request_list rl; /* reference count */ int refcnt; @@ -250,6 +253,95 @@ static inline void blkg_put(struct blkcg_gq *blkg) __blkg_release(blkg); } +/** + * blk_get_rl - get request_list to use + * @q: request_queue of interest + * @bio: bio which will be attached to the allocated request (may be %NULL) + * + * The caller wants to allocate a request from @q to use for @bio. Find + * the request_list to use and obtain a reference on it. Should be called + * under queue_lock. This function is guaranteed to return non-%NULL + * request_list. + */ +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) +{ + struct blkcg *blkcg; + struct blkcg_gq *blkg; + + rcu_read_lock(); + + blkcg = bio_blkcg(bio); + + /* bypass blkg lookup and use @q->root_rl directly for root */ + if (blkcg == &blkcg_root) + goto root_rl; + + /* + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. + */ + blkg = blkg_lookup_create(blkcg, q); + if (unlikely(IS_ERR(blkg))) + goto root_rl; + + blkg_get(blkg); + rcu_read_unlock(); + return &blkg->rl; +root_rl: + rcu_read_unlock(); + return &q->root_rl; +} + +/** + * blk_put_rl - put request_list + * @rl: request_list to put + * + * Put the reference acquired by blk_get_rl(). Should be called under + * queue_lock. + */ +static inline void blk_put_rl(struct request_list *rl) +{ + /* root_rl may not have blkg set */ + if (rl->blkg && rl->blkg->blkcg != &blkcg_root) + blkg_put(rl->blkg); +} + +/** + * blk_rq_set_rl - associate a request with a request_list + * @rq: request of interest + * @rl: target request_list + * + * Associate @rq with @rl so that accounting and freeing can know the + * request_list @rq came from. + */ +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) +{ + rq->rl = rl; +} + +/** + * blk_rq_rl - return the request_list a request came from + * @rq: request of interest + * + * Return the request_list @rq is allocated from. + */ +static inline struct request_list *blk_rq_rl(struct request *rq) +{ + return rq->rl; +} + +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q); +/** + * blk_queue_for_each_rl - iterate through all request_lists of a request_queue + * + * Should be used under queue_lock. + */ +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) + /** * blkg_stat_add - add a value to a blkg_stat * @stat: target blkg_stat @@ -392,6 +484,7 @@ static inline void blkcg_deactivate_policy(struct request_queue *q, static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } + static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) { return NULL; } static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } @@ -399,5 +492,14 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) { return &q->root_rl; } +static inline void blk_put_rl(struct request_list *rl) { } +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } +static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } + +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) + #endif /* CONFIG_BLK_CGROUP */ #endif /* _BLK_CGROUP_H */ diff --git a/block/blk-core.c b/block/blk-core.c index f392a2edf462..dd134d834d58 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -416,9 +416,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) * left with hung waiters. We need to wake up those waiters. */ if (q->request_fn) { + struct request_list *rl; + spin_lock_irq(q->queue_lock); - for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++) - wake_up_all(&q->rq.wait[i]); + + blk_queue_for_each_rl(rl, q) + for (i = 0; i < ARRAY_SIZE(rl->wait); i++) + wake_up_all(&rl->wait[i]); + spin_unlock_irq(q->queue_lock); } } @@ -685,7 +690,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, if (!q) return NULL; - if (blk_init_rl(&q->rq, q, GFP_KERNEL)) + if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) return NULL; q->request_fn = rfn; @@ -776,7 +781,12 @@ static void __freed_request(struct request_list *rl, int sync) { struct request_queue *q = rl->q; - if (rl->count[sync] < queue_congestion_off_threshold(q)) + /* + * bdi isn't aware of blkcg yet. As all async IOs end up root + * blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl && + rl->count[sync] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, sync); if (rl->count[sync] + 1 <= q->nr_requests) { @@ -897,7 +907,12 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, } } } - blk_set_queue_congested(q, is_sync); + /* + * bdi isn't aware of blkcg yet. As all async IOs end up + * root blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl) + blk_set_queue_congested(q, is_sync); } /* @@ -939,6 +954,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, goto fail_alloc; blk_rq_init(q, rq); + blk_rq_set_rl(rq, rl); rq->cmd_flags = rw_flags | REQ_ALLOCED; /* init elvpriv */ @@ -1032,15 +1048,19 @@ static struct request *get_request(struct request_queue *q, int rw_flags, { const bool is_sync = rw_is_sync(rw_flags) != 0; DEFINE_WAIT(wait); - struct request_list *rl = &q->rq; + struct request_list *rl; struct request *rq; + + rl = blk_get_rl(q, bio); /* transferred to @rq on success */ retry: - rq = __get_request(&q->rq, rw_flags, bio, gfp_mask); + rq = __get_request(rl, rw_flags, bio, gfp_mask); if (rq) return rq; - if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) + if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { + blk_put_rl(rl); return NULL; + } /* wait on @rl and retry */ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, @@ -1231,12 +1251,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) */ if (req->cmd_flags & REQ_ALLOCED) { unsigned int flags = req->cmd_flags; + struct request_list *rl = blk_rq_rl(req); BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); - blk_free_request(&q->rq, req); - freed_request(&q->rq, flags); + blk_free_request(rl, req); + freed_request(rl, flags); + blk_put_rl(rl); } } EXPORT_SYMBOL_GPL(__blk_put_request); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 234ce7c082fa..9628b291f960 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -40,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { - struct request_list *rl = &q->rq; + struct request_list *rl; unsigned long nr; int ret; @@ -55,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) q->nr_requests = nr; blk_queue_congestion_threshold(q); + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) @@ -65,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_ASYNC); - if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_rl_full(rl, BLK_RW_SYNC); - } else { - blk_clear_rl_full(rl, BLK_RW_SYNC); - wake_up(&rl->wait[BLK_RW_SYNC]); + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } } - if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_rl_full(rl, BLK_RW_ASYNC); - } else { - blk_clear_rl_full(rl, BLK_RW_ASYNC); - wake_up(&rl->wait[BLK_RW_ASYNC]); - } spin_unlock_irq(q->queue_lock); return ret; } @@ -488,7 +494,7 @@ static void blk_release_queue(struct kobject *kobj) elevator_exit(q->elevator); } - blk_exit_rl(&q->rq); + blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f2385ee7c7b2..3816ce8a08fc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -51,7 +51,9 @@ typedef void (rq_end_io_fn)(struct request *, int); struct request_list { struct request_queue *q; /* the queue this rl belongs to */ - +#ifdef CONFIG_BLK_CGROUP + struct blkcg_gq *blkg; /* blkg this request pool belongs to */ +#endif /* * count[], starved[], and wait[] are indexed by * BLK_RW_SYNC/BLK_RW_ASYNC @@ -143,6 +145,7 @@ struct request { struct hd_struct *part; unsigned long start_time; #ifdef CONFIG_BLK_CGROUP + struct request_list *rl; /* rl this rq is alloced from */ unsigned long long start_time_ns; unsigned long long io_start_time_ns; /* when passed to hardware */ #endif @@ -291,9 +294,12 @@ struct request_queue { int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ /* - * the queue request freelist, one for reads and one for writes + * If blkcg is not used, @q->root_rl serves all requests. If blkcg + * is used, root blkg allocates from @q->root_rl and all other + * blkgs from their own blkg->rl. Which one to use should be + * determined using bio_request_list(). */ - struct request_list rq; + struct request_list root_rl; request_fn_proc *request_fn; make_request_fn *make_request_fn; -- GitLab From 6cd77e00c00c5da3b6f8546dcb6dfac9611ca10b Mon Sep 17 00:00:00 2001 From: Liu Ying Date: Mon, 11 Jun 2012 09:06:49 +0800 Subject: [PATCH 0032/2891] mx3fb: support pan display with fb_set_var Users may call FBIOPUT_VSCREENINFO ioctrl to do pan display. This ioctrl relies on fb_set_var() to do the job. fb_set_var() calls the custom fb_set_par() method and then calls the custom fb_pan_display() method. Before calling the custom fb_pan_display() method, info->var is already updated from the new *var in fb_set_var(). And, the custom fb_pan_display() method checks if xoffset and yoffset in info->var and the new *var are different before doing actual panning, which prevents the panning from happening within fb_set_var() context. This patch caches the current var info locally in mx3fb driver so that pan display with fb_set_var is supported. Signed-off-by: Liu Ying Signed-off-by: Florian Tobias Schandinat --- drivers/video/mx3fb.c | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index eec0d7b748eb..da1d052a18a5 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -269,7 +269,7 @@ struct mx3fb_info { dma_cookie_t cookie; struct scatterlist sg[2]; - u32 sync; /* preserve var->sync flags */ + struct fb_var_screeninfo cur_var; /* current var info */ }; static void mx3fb_dma_done(void *); @@ -700,7 +700,7 @@ static void mx3fb_dma_done(void *arg) static int __set_par(struct fb_info *fbi, bool lock) { - u32 mem_len; + u32 mem_len, cur_xoffset, cur_yoffset; struct ipu_di_signal_cfg sig_cfg; enum ipu_panel mode = IPU_PANEL_TFT; struct mx3fb_info *mx3_fbi = fbi->par; @@ -780,8 +780,25 @@ static int __set_par(struct fb_info *fbi, bool lock) video->out_height = fbi->var.yres; video->out_stride = fbi->var.xres_virtual; - if (mx3_fbi->blank == FB_BLANK_UNBLANK) + if (mx3_fbi->blank == FB_BLANK_UNBLANK) { sdc_enable_channel(mx3_fbi); + /* + * sg[0] points to fb smem_start address + * and is actually active in controller. + */ + mx3_fbi->cur_var.xoffset = 0; + mx3_fbi->cur_var.yoffset = 0; + } + + /* + * Preserve xoffset and yoffest in case they are + * inactive in controller as fb is blanked. + */ + cur_xoffset = mx3_fbi->cur_var.xoffset; + cur_yoffset = mx3_fbi->cur_var.yoffset; + mx3_fbi->cur_var = fbi->var; + mx3_fbi->cur_var.xoffset = cur_xoffset; + mx3_fbi->cur_var.yoffset = cur_yoffset; return 0; } @@ -901,8 +918,8 @@ static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) var->grayscale = 0; /* Preserve sync flags */ - var->sync |= mx3_fbi->sync; - mx3_fbi->sync |= var->sync; + var->sync |= mx3_fbi->cur_var.sync; + mx3_fbi->cur_var.sync |= var->sync; return 0; } @@ -1043,8 +1060,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, return -EINVAL; } - if (fbi->var.xoffset == var->xoffset && - fbi->var.yoffset == var->yoffset) + if (mx3_fbi->cur_var.xoffset == var->xoffset && + mx3_fbi->cur_var.yoffset == var->yoffset) return 0; /* No change, do nothing */ y_bottom = var->yoffset; @@ -1127,6 +1144,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, else fbi->var.vmode &= ~FB_VMODE_YWRAP; + mx3_fbi->cur_var = fbi->var; + mutex_unlock(&mx3_fbi->mutex); dev_dbg(fbi->device, "Update complete\n"); -- GitLab From 41a490ec019fd172f5d0356e01d8101b4708aee2 Mon Sep 17 00:00:00 2001 From: Liu Ying Date: Mon, 11 Jun 2012 09:06:50 +0800 Subject: [PATCH 0033/2891] mx3fb: avoid screen flash when panning with fb_set_var Users may call FBIOPUT_VSCREENINFO ioctrl to do pan display. This ioctrl relies on fb_set_var() to do the job. fb_set_var() calls custom fb_set_par() method and then calls custom fb_pan_display() method. The current implementation of mx3fb reinitializes IPU display controller every time the custom fb_set_par() method is called, which makes the screen flash if fb_set_var() is called to do panning frequently. This patch compares the new var info with the cached old one to decide whether we really need to reinitialize IPU display controller. We ignore xoffset and yoffset update because it doesn't require to reinitialize the controller. Users may specify activate field of var info with FB_ACTIVATE_NOW and FB_ACTIVATE_FORCE to reinialize the controller by force. Signed-off-by: Liu Ying Signed-off-by: Florian Tobias Schandinat --- drivers/video/mx3fb.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index da1d052a18a5..c89f8a8d36d2 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -698,6 +698,26 @@ static void mx3fb_dma_done(void *arg) complete(&mx3_fbi->flip_cmpl); } +static bool mx3fb_must_set_par(struct fb_info *fbi) +{ + struct mx3fb_info *mx3_fbi = fbi->par; + struct fb_var_screeninfo old_var = mx3_fbi->cur_var; + struct fb_var_screeninfo new_var = fbi->var; + + if ((fbi->var.activate & FB_ACTIVATE_FORCE) && + (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) + return true; + + /* + * Ignore xoffset and yoffset update, + * because pan display handles this case. + */ + old_var.xoffset = new_var.xoffset; + old_var.yoffset = new_var.yoffset; + + return !!memcmp(&old_var, &new_var, sizeof(struct fb_var_screeninfo)); +} + static int __set_par(struct fb_info *fbi, bool lock) { u32 mem_len, cur_xoffset, cur_yoffset; @@ -819,7 +839,7 @@ static int mx3fb_set_par(struct fb_info *fbi) mutex_lock(&mx3_fbi->mutex); - ret = __set_par(fbi, true); + ret = mx3fb_must_set_par(fbi) ? __set_par(fbi, true) : 0; mutex_unlock(&mx3_fbi->mutex); -- GitLab From 3b9cc4ea4735d3c73ee27ef7b829b6fe4535eefe Mon Sep 17 00:00:00 2001 From: Aditya Nellutla Date: Wed, 23 May 2012 11:36:31 +0530 Subject: [PATCH 0034/2891] da8xx-fb: Rounding FB size to satisfy SGX buffer requirements In the real time use-case when SGX is used for rendering to FB buffers it has been observed that, the available memory from framebuffer driver is not sufficient for SGX under certain cases (like 16-bit WVGA resolution). SGX requires 2 swap buffers with each of the buffers aligned to lcm(line_length, PAGE_SIZE). Inorder to satisfy this requirement, we have two options, - Increase number of FB buffers (LCD_NUM_BUFFERS) to 3. This is not recommended as we end up wasting huge memory in most of the cases. - Align FB buffers to lcm(line_length, PAGE_SIZE).This ensures framebuffer size is increased to satisfy SGX requirements keeping alignment intact. This patch makes sure that FB allocates buffers aligned to above formula. Signed-off-by: Aditya Nellutla Signed-off-by: Florian Tobias Schandinat --- drivers/video/da8xx-fb.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 47118c75a4c0..2f24c19d7d08 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -31,6 +31,7 @@ #include #include #include +#include #include