patches/0000775000077200007720000000000010655544577011543 5ustar mingomingopatches/radix-tree-optimistic.patch0000664000077200007720000002660610655544576017023 0ustar mingomingoSubject: radix-tree: optimistic locking Implement optimistic locking for the concurrent radix tree. Optimistic locking is aimed at avoiding taking higher level node locks. We decent the tree using an RCU lookup, looking for the lowest modification termination point. If found, we try to acquire the lock of that node. After we have obtained this lock, we will need to validate if the initial conditions still hold true. We do this by repeating the steps that found us this node in the first place. Signed-off-by: Peter Zijlstra --- include/linux/radix-tree.h | 27 +++++- init/Kconfig | 6 + lib/radix-tree.c | 194 +++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 206 insertions(+), 21 deletions(-) Index: linux-rt-rebase.q/include/linux/radix-tree.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/radix-tree.h +++ linux-rt-rebase.q/include/linux/radix-tree.h @@ -197,28 +197,47 @@ static inline void radix_tree_replace_sl rcu_assign_pointer(*pslot, item); } +#if defined(CONFIG_RADIX_TREE_OPTIMISTIC) +static inline void radix_tree_lock(struct radix_tree_context *context) +{ + rcu_read_lock(); + BUG_ON(context->locked); +} +#elif defined(CONFIG_RADIX_TREE_CONCURRENT) static inline void radix_tree_lock(struct radix_tree_context *context) { struct radix_tree_root *root = context->root; + rcu_read_lock(); spin_lock(&root->lock); -#ifdef CONFIG_RADIX_TREE_CONCURRENT BUG_ON(context->locked); context->locked = &root->lock; -#endif } +#else +static inline void radix_tree_lock(struct radix_tree_context *context) +{ + struct radix_tree_root *root = context->root; + + rcu_read_lock(); + spin_lock(&root->lock); +} +#endif +#if defined(CONFIG_RADIX_TREE_CONCURRENT) static inline void radix_tree_unlock(struct radix_tree_context *context) { -#ifdef CONFIG_RADIX_TREE_CONCURRENT BUG_ON(!context->locked); spin_unlock(context->locked); context->locked = NULL; + rcu_read_unlock(); +} #else +static inline void radix_tree_unlock(struct radix_tree_context *context) +{ spin_unlock(&context->root->lock); -#endif rcu_read_unlock(); } +#endif int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); void *radix_tree_lookup(struct radix_tree_root *, unsigned long); Index: linux-rt-rebase.q/init/Kconfig =================================================================== --- linux-rt-rebase.q.orig/init/Kconfig +++ linux-rt-rebase.q/init/Kconfig @@ -352,8 +352,14 @@ config SYSCTL config RADIX_TREE_CONCURRENT bool "Enable concurrent radix tree operations (EXPERIMENTAL)" + depends on EXPERIMENTAL default y if SMP +config RADIX_TREE_OPTIMISTIC + bool "Enabled optimistic locking (EXPERIMENTAL)" + depends on RADIX_TREE_CONCURRENT + default y + menuconfig EMBEDDED bool "Configure standard kernel features (for small systems)" help Index: linux-rt-rebase.q/lib/radix-tree.c =================================================================== --- linux-rt-rebase.q.orig/lib/radix-tree.c +++ linux-rt-rebase.q/lib/radix-tree.c @@ -368,6 +368,117 @@ static inline void radix_path_unlock(str #define radix_path_unlock(context, punlock) do { } while (0) #endif +#ifdef CONFIG_RADIX_TREE_OPTIMISTIC +typedef int (*radix_valid_fn)(struct radix_tree_node *, int, int); + +static struct radix_tree_node * +radix_optimistic_lookup(struct radix_tree_context *context, unsigned long index, + int tag, radix_valid_fn valid) +{ + unsigned int height, shift; + struct radix_tree_node *node, *ret = NULL, **slot; + struct radix_tree_root *root = context->root; + + node = rcu_dereference(root->rnode); + if (node == NULL) + return NULL; + + if (!radix_tree_is_indirect_ptr(node)) + return NULL; + + node = radix_tree_indirect_to_ptr(node); + + height = node->height; + if (index > radix_tree_maxindex(height)) + return NULL; + + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + do { + int offset = (index >> shift) & RADIX_TREE_MAP_MASK; + if ((*valid)(node, offset, tag)) + ret = node; + slot = (struct radix_tree_node **)(node->slots + offset); + node = rcu_dereference(*slot); + if (!node) + break; + + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } while (height > 0); + + return ret; +} + +static struct radix_tree_node * +__radix_optimistic_lock(struct radix_tree_context *context, unsigned long index, + int tag, radix_valid_fn valid) +{ + struct radix_tree_node *node; + spinlock_t *locked; + unsigned int shift, offset; + + node = radix_optimistic_lookup(context, index, tag, valid); + if (!node) + goto out; + + locked = radix_node_lock(context->root, node); + if (!locked) + goto out; + +#if 0 + if (node != radix_optimistic_lookup(context, index, tag, valid)) + goto out_unlock; +#else + /* check if the node got freed */ + if (!node->count) + goto out_unlock; + + /* check if the node is still a valid termination point */ + shift = (node->height - 1) * RADIX_TREE_MAP_SHIFT; + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + if (!(*valid)(node, offset, tag)) + goto out_unlock; +#endif + + context->locked = locked; + return node; + +out_unlock: + spin_unlock(locked); +out: + return NULL; +} + +static struct radix_tree_node * +radix_optimistic_lock(struct radix_tree_context *context, unsigned long index, + int tag, radix_valid_fn valid) +{ + struct radix_tree_node *node = NULL; + + if (context) { + node = __radix_optimistic_lock(context, index, tag, valid); + if (!node) { + BUG_ON(context->locked); + spin_lock(&context->root->lock); + context->locked = &context->root->lock; + } + } + return node; +} + +static int radix_valid_always(struct radix_tree_node *node, int offset, int tag) +{ + return 1; +} + +static int radix_valid_tag(struct radix_tree_node *node, int offset, int tag) +{ + return tag_get(node, tag, offset); +} +#else +#define radix_optimistic_lock(context, index, tag, valid) NULL +#endif + /** * radix_tree_insert - insert into a radix tree * @root: radix tree root @@ -388,6 +499,13 @@ int radix_tree_insert(struct radix_tree_ BUG_ON(radix_tree_is_indirect_ptr(item)); + node = radix_optimistic_lock(context, index, 0, radix_valid_always); + if (node) { + height = node->height; + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + goto optimistic; + } + /* Make sure the tree is high enough. */ if (index > radix_tree_maxindex(root->height)) { error = radix_tree_extend(root, index); @@ -396,7 +514,6 @@ int radix_tree_insert(struct radix_tree_ } slot = radix_tree_indirect_to_ptr(root->rnode); - height = root->height; shift = (height-1) * RADIX_TREE_MAP_SHIFT; @@ -415,11 +532,11 @@ int radix_tree_insert(struct radix_tree_ } /* Go a level down */ - offset = (index >> shift) & RADIX_TREE_MAP_MASK; node = slot; - radix_ladder_lock(context, node); +optimistic: + offset = (index >> shift) & RADIX_TREE_MAP_MASK; slot = node->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; @@ -462,6 +579,10 @@ void **radix_tree_lookup_slot(struct rad struct radix_tree_node *node, **slot; RADIX_TREE_CONTEXT(context, root); + node = radix_optimistic_lock(context, index, 0, radix_valid_always); + if (node) + goto optimistic; + node = rcu_dereference(root->rnode); if (node == NULL) return NULL; @@ -473,6 +594,7 @@ void **radix_tree_lookup_slot(struct rad } node = radix_tree_indirect_to_ptr(node); +optimistic: height = node->height; if (index > radix_tree_maxindex(height)) return NULL; @@ -565,6 +687,13 @@ void *radix_tree_tag_set(struct radix_tr struct radix_tree_node *slot; RADIX_TREE_CONTEXT(context, root); + slot = radix_optimistic_lock(context, index, tag, radix_valid_tag); + if (slot) { + height = slot->height; + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + goto optimistic; + } + height = root->height; BUG_ON(index > radix_tree_maxindex(height)); @@ -580,6 +709,7 @@ void *radix_tree_tag_set(struct radix_tr radix_ladder_lock(context, slot); +optimistic: offset = (index >> shift) & RADIX_TREE_MAP_MASK; if (!tag_get(slot, tag, offset)) tag_set(slot, tag, offset); @@ -596,13 +726,13 @@ EXPORT_SYMBOL(radix_tree_tag_set); /* * the change can never propagate upwards from here. */ -static inline int radix_tree_unlock_tag(struct radix_tree_root *root, - struct radix_tree_path *pathp, int tag) +static +int radix_valid_tag_clear(struct radix_tree_node *node, int offset, int tag) { int this, other; - this = tag_get(pathp->node, tag, pathp->offset); - other = any_tag_set_but(pathp->node, tag, pathp->offset); + this = tag_get(node, tag, offset); + other = any_tag_set_but(node, tag, offset); return !this || other; } @@ -627,9 +757,22 @@ void *radix_tree_tag_clear(struct radix_ struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; struct radix_tree_path *punlock = path, *piter; struct radix_tree_node *slot = NULL; - unsigned int height, shift; + unsigned int height, shift, offset; + RADIX_TREE_CONTEXT(context, root); + slot = radix_optimistic_lock(context, index, tag, + radix_valid_tag_clear); + if (slot) { + height = slot->height; + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + pathp->offset = offset; + pathp->node = slot; + radix_path_init(context, pathp); + goto optimistic; + } + pathp->node = NULL; radix_path_init(context, pathp); @@ -641,8 +784,6 @@ void *radix_tree_tag_clear(struct radix_ slot = radix_tree_indirect_to_ptr(root->rnode); while (height > 0) { - int offset; - if (slot == NULL) goto out; @@ -652,11 +793,12 @@ void *radix_tree_tag_clear(struct radix_ pathp->node = slot; radix_path_lock(context, pathp, slot); - if (radix_tree_unlock_tag(root, pathp, tag)) { + if (radix_valid_tag_clear(slot, offset, tag)) { for (; punlock < pathp; punlock++) radix_path_unlock(context, punlock); } +optimistic: slot = slot->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; @@ -1167,14 +1309,20 @@ static inline void radix_tree_shrink(str } } -static inline int radix_tree_unlock_all(struct radix_tree_root *root, - struct radix_tree_path *pathp) +static +int radix_valid_delete(struct radix_tree_node *node, int offset, int tag) { - int tag; - int unlock = 1; + /* + * we need to check for > 2, because nodes with a single child + * can still be deleted, see radix_tree_shrink(). + */ + int unlock = (node->count > 2); + + if (!unlock) + return unlock; for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { - if (!radix_tree_unlock_tag(root, pathp, tag)) { + if (!radix_valid_tag_clear(node, offset, tag)) { unlock = 0; break; } @@ -1202,6 +1350,17 @@ void *radix_tree_delete(struct radix_tre int offset; RADIX_TREE_CONTEXT(context, root); + slot = radix_optimistic_lock(context, index, 0, radix_valid_delete); + if (slot) { + height = slot->height; + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + pathp->offset = offset; + pathp->node = slot; + radix_path_init(context, pathp); + goto optimistic; + } + pathp->node = NULL; radix_path_init(context, pathp); @@ -1229,11 +1388,12 @@ void *radix_tree_delete(struct radix_tre pathp->node = slot; radix_path_lock(context, pathp, slot); - if (slot->count > 2 && radix_tree_unlock_all(root, pathp)) { + if (radix_valid_delete(slot, offset, 0)) { for (; punlock < pathp; punlock++) radix_path_unlock(context, punlock); } +optimistic: slot = slot->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; patches/latency-tracer-one-off-fix.patch0000664000077200007720000000171710655544572017615 0ustar mingomingoFix a simple issue in latency_tracer.c Fix a simple issue in latency_tracer.c Signed-off-by: Jan Altenberg --- kernel/latency_trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -1003,7 +1003,7 @@ static int min_idx(struct block_idx *bid idx = bidx->idx[cpu]; if (idx >= min(max_tr.traces[cpu].trace_idx, MAX_TRACE)) continue; - if (idx >= MAX_TRACE*NR_CPUS) { + if (idx > MAX_TRACE*NR_CPUS) { printk("huh: idx (%d) > %ld*%d!\n", idx, MAX_TRACE, NR_CPUS); WARN_ON(1); @@ -1150,7 +1150,7 @@ static void update_out_trace(void) *out_entry = *entry; out_entry++; sum++; - if (sum >= MAX_TRACE*NR_CPUS) { + if (sum > MAX_TRACE*NR_CPUS) { printk("huh: sum (%d) > %ld*%d!\n", sum, MAX_TRACE, NR_CPUS); WARN_ON(1); patches/print-might-sleep-hack.patch0000664000077200007720000000510310655544576017036 0ustar mingomingoTemporary HACK!!!! PREEMPT_RT suffers from the on going problem of running printk in atomic operations. It is very advantageous to do so but with PREEMPT_RT making spin_locks sleep, it can also be devastating. This patch does not solve the problem of printk sleeping in an atomic operation. This patch just makes printk not report that it is. Of course if printk does report that it's sleeping in an atomic operation, then that printing of the report will also print a report, and you go into recursive hell. We need to really sit down and solve the real issue here. --- include/linux/sched.h | 13 +++++++++++++ kernel/printk.c | 4 ++++ kernel/rtmutex.c | 4 +++- 3 files changed, 20 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -1393,8 +1393,21 @@ struct task_struct { #ifdef CONFIG_FAULT_INJECTION int make_it_fail; #endif +#ifdef CONFIG_PREEMPT_RT + /* + * Temporary hack, until we find a solution to + * handle printk in atomic operations. + */ + int in_printk; +#endif }; +#ifdef CONFIG_PREEMPT_RT +# define set_printk_might_sleep(x) do { current->in_printk = x; } while(0) +#else +# define set_printk_might_sleep(x) do { } while(0) +#endif + /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH Index: linux-rt-rebase.q/kernel/printk.c =================================================================== --- linux-rt-rebase.q.orig/kernel/printk.c +++ linux-rt-rebase.q/kernel/printk.c @@ -339,10 +339,14 @@ static void __call_console_drivers(unsig int trace_save = trace_enabled; trace_enabled = 0; + set_printk_might_sleep(1); con->write(con, &LOG_BUF(start), end - start); + set_printk_might_sleep(0); trace_enabled = trace_save; #else + set_printk_might_sleep(1); con->write(con, &LOG_BUF(start), end - start); + set_printk_might_sleep(0); #endif } } Index: linux-rt-rebase.q/kernel/rtmutex.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rtmutex.c +++ linux-rt-rebase.q/kernel/rtmutex.c @@ -631,7 +631,9 @@ static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, void fastcall (*slowfn)(struct rt_mutex *lock)) { - might_sleep(); + /* Temporary HACK! */ + if (!current->in_printk) + might_sleep(); if (likely(rt_mutex_cmpxchg(lock, NULL, current))) rt_mutex_deadlock_account_lock(lock, current); patches/preempt-realtime-netconsole.patch0000664000077200007720000000141410655544575020205 0ustar mingomingo--- drivers/net/netconsole.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) Index: linux-rt-rebase.q/drivers/net/netconsole.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/netconsole.c +++ linux-rt-rebase.q/drivers/net/netconsole.c @@ -68,21 +68,16 @@ static int configured = 0; static void write_msg(struct console *con, const char *msg, unsigned int len) { int frag, left; - unsigned long flags; if (!np.dev) return; - local_irq_save(flags); - - for(left = len; left; ) { + for (left = len; left; ) { frag = min(left, MAX_PRINT_CHUNK); netpoll_send_udp(&np, msg, frag); msg += frag; left -= frag; } - - local_irq_restore(flags); } static struct console netconsole = { patches/2.6.21-rc6-lockless8-spinlock-tree_lock.patch0000664000077200007720000003161110655544576021475 0ustar mingomingoFrom: Nick Piggin Subject: [patch 8/9] mm: spinlock tree_lock mapping->tree_lock has no read lockers. convert the lock from an rwlock to a spinlock. Signed-off-by: Nick Piggin --- fs/buffer.c | 4 ++-- fs/inode.c | 2 +- include/asm-arm/cacheflush.h | 4 ++-- include/asm-parisc/cacheflush.h | 4 ++-- include/linux/fs.h | 2 +- mm/filemap.c | 10 +++++----- mm/migrate.c | 6 +++--- mm/page-writeback.c | 14 +++++++------- mm/swap_state.c | 10 +++++----- mm/swapfile.c | 4 ++-- mm/truncate.c | 6 +++--- mm/vmscan.c | 8 ++++---- 12 files changed, 37 insertions(+), 37 deletions(-) Index: linux-rt-rebase.q/fs/buffer.c =================================================================== --- linux-rt-rebase.q.orig/fs/buffer.c +++ linux-rt-rebase.q/fs/buffer.c @@ -684,7 +684,7 @@ static int __set_page_dirty(struct page if (TestSetPageDirty(page)) return 0; - write_lock_irq(&mapping->tree_lock); + spin_lock_irq(&mapping->tree_lock); if (page->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !PageUptodate(page)); @@ -695,7 +695,7 @@ static int __set_page_dirty(struct page radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); return 1; Index: linux-rt-rebase.q/fs/inode.c =================================================================== --- linux-rt-rebase.q.orig/fs/inode.c +++ linux-rt-rebase.q/fs/inode.c @@ -193,7 +193,7 @@ void inode_init_once(struct inode *inode mutex_init(&inode->i_mutex); init_rwsem(&inode->i_alloc_sem); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); - rwlock_init(&inode->i_data.tree_lock); + spin_lock_init(&inode->i_data.tree_lock); spin_lock_init(&inode->i_data.i_mmap_lock); INIT_LIST_HEAD(&inode->i_data.private_list); spin_lock_init(&inode->i_data.private_lock); Index: linux-rt-rebase.q/include/asm-arm/cacheflush.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/cacheflush.h +++ linux-rt-rebase.q/include/asm-arm/cacheflush.h @@ -413,9 +413,9 @@ static inline void flush_anon_page(struc } #define flush_dcache_mmap_lock(mapping) \ - write_lock_irq(&(mapping)->tree_lock) + spin_lock_irq(&(mapping)->tree_lock) #define flush_dcache_mmap_unlock(mapping) \ - write_unlock_irq(&(mapping)->tree_lock) + spin_unlock_irq(&(mapping)->tree_lock) #define flush_icache_user_range(vma,page,addr,len) \ flush_dcache_page(page) Index: linux-rt-rebase.q/include/asm-parisc/cacheflush.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-parisc/cacheflush.h +++ linux-rt-rebase.q/include/asm-parisc/cacheflush.h @@ -45,9 +45,9 @@ void flush_cache_mm(struct mm_struct *mm extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) \ - write_lock_irq(&(mapping)->tree_lock) + spin_lock_irq(&(mapping)->tree_lock) #define flush_dcache_mmap_unlock(mapping) \ - write_unlock_irq(&(mapping)->tree_lock) + spin_unlock_irq(&(mapping)->tree_lock) #define flush_icache_page(vma,page) do { \ flush_kernel_dcache_page(page); \ Index: linux-rt-rebase.q/include/linux/fs.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/fs.h +++ linux-rt-rebase.q/include/linux/fs.h @@ -441,7 +441,7 @@ struct backing_dev_info; struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ - rwlock_t tree_lock; /* and rwlock protecting it */ + spinlock_t tree_lock; /* and lock protecting it */ unsigned int i_mmap_writable;/* count VM_SHARED mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ Index: linux-rt-rebase.q/mm/filemap.c =================================================================== --- linux-rt-rebase.q.orig/mm/filemap.c +++ linux-rt-rebase.q/mm/filemap.c @@ -110,7 +110,7 @@ generic_file_direct_IO(int rw, struct ki /* * Remove a page from the page cache and free it. Caller has to make * sure the page is locked and that nobody else uses it - or that usage - * is safe. The caller must hold a write_lock on the mapping's tree_lock. + * is safe. The caller must hold the mapping's tree_lock. */ void __remove_from_page_cache(struct page *page) { @@ -129,9 +129,9 @@ void remove_from_page_cache(struct page BUG_ON(!PageLocked(page)); - write_lock_irq(&mapping->tree_lock); + spin_lock_irq(&mapping->tree_lock); __remove_from_page_cache(page); - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); } static int sync_page(void *word) @@ -442,7 +442,7 @@ int add_to_page_cache(struct page *page, if (error == 0) { set_page_nonewrefs(page); - write_lock_irq(&mapping->tree_lock); + spin_lock_irq(&mapping->tree_lock); error = radix_tree_insert(&mapping->page_tree, offset, page); if (!error) { page_cache_get(page); @@ -452,7 +452,7 @@ int add_to_page_cache(struct page *page, mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); } - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); clear_page_nonewrefs(page); radix_tree_preload_end(); } Index: linux-rt-rebase.q/mm/migrate.c =================================================================== --- linux-rt-rebase.q.orig/mm/migrate.c +++ linux-rt-rebase.q/mm/migrate.c @@ -303,14 +303,14 @@ static int migrate_page_move_mapping(str } set_page_nonewrefs(page); - write_lock_irq(&mapping->tree_lock); + spin_lock_irq(&mapping->tree_lock); pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); if (page_count(page) != 2 + !!PagePrivate(page) || (struct page *)radix_tree_deref_slot(pslot) != page) { - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); clear_page_nonewrefs(page); return -EAGAIN; } @@ -328,7 +328,7 @@ static int migrate_page_move_mapping(str radix_tree_replace_slot(pslot, newpage); page->mapping = NULL; - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); clear_page_nonewrefs(page); /* Index: linux-rt-rebase.q/mm/page-writeback.c =================================================================== --- linux-rt-rebase.q.orig/mm/page-writeback.c +++ linux-rt-rebase.q/mm/page-writeback.c @@ -809,7 +809,7 @@ int __set_page_dirty_no_writeback(struct * mapping is pinned by the vma's ->vm_file reference. * * We take care to handle the case where the page was truncated from the - * mapping by re-checking page_mapping() insode tree_lock. + * mapping by re-checking page_mapping() inside tree_lock. */ int __set_page_dirty_nobuffers(struct page *page) { @@ -820,7 +820,7 @@ int __set_page_dirty_nobuffers(struct pa if (!mapping) return 1; - write_lock_irq(&mapping->tree_lock); + spin_lock_irq(&mapping->tree_lock); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); @@ -832,7 +832,7 @@ int __set_page_dirty_nobuffers(struct pa radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); @@ -977,13 +977,13 @@ int test_clear_page_writeback(struct pag if (mapping) { unsigned long flags; - write_lock_irqsave(&mapping->tree_lock, flags); + spin_lock_irqsave(&mapping->tree_lock, flags); ret = TestClearPageWriteback(page); if (ret) radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); - write_unlock_irqrestore(&mapping->tree_lock, flags); + spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { ret = TestClearPageWriteback(page); } @@ -1000,7 +1000,7 @@ int test_set_page_writeback(struct page if (mapping) { unsigned long flags; - write_lock_irqsave(&mapping->tree_lock, flags); + spin_lock_irqsave(&mapping->tree_lock, flags); ret = TestSetPageWriteback(page); if (!ret) radix_tree_tag_set(&mapping->page_tree, @@ -1010,7 +1010,7 @@ int test_set_page_writeback(struct page radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); - write_unlock_irqrestore(&mapping->tree_lock, flags); + spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { ret = TestSetPageWriteback(page); } Index: linux-rt-rebase.q/mm/swap_state.c =================================================================== --- linux-rt-rebase.q.orig/mm/swap_state.c +++ linux-rt-rebase.q/mm/swap_state.c @@ -38,7 +38,7 @@ static struct backing_dev_info swap_back struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), - .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), + .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, @@ -80,7 +80,7 @@ static int __add_to_swap_cache(struct pa error = radix_tree_preload(gfp_mask); if (!error) { set_page_nonewrefs(page); - write_lock_irq(&swapper_space.tree_lock); + spin_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); if (!error) { @@ -90,7 +90,7 @@ static int __add_to_swap_cache(struct pa total_swapcache_pages++; __inc_zone_page_state(page, NR_FILE_PAGES); } - write_unlock_irq(&swapper_space.tree_lock); + spin_unlock_irq(&swapper_space.tree_lock); clear_page_nonewrefs(page); radix_tree_preload_end(); } @@ -202,9 +202,9 @@ void delete_from_swap_cache(struct page entry.val = page_private(page); - write_lock_irq(&swapper_space.tree_lock); + spin_lock_irq(&swapper_space.tree_lock); __delete_from_swap_cache(page); - write_unlock_irq(&swapper_space.tree_lock); + spin_unlock_irq(&swapper_space.tree_lock); swap_free(entry); page_cache_release(page); Index: linux-rt-rebase.q/mm/swapfile.c =================================================================== --- linux-rt-rebase.q.orig/mm/swapfile.c +++ linux-rt-rebase.q/mm/swapfile.c @@ -367,13 +367,13 @@ int remove_exclusive_swap_page(struct pa retval = 0; if (p->swap_map[swp_offset(entry)] == 1) { /* Recheck the page count with the swapcache lock held.. */ - write_lock_irq(&swapper_space.tree_lock); + spin_lock_irq(&swapper_space.tree_lock); if ((page_count(page) == 2) && !PageWriteback(page)) { __delete_from_swap_cache(page); SetPageDirty(page); retval = 1; } - write_unlock_irq(&swapper_space.tree_lock); + spin_unlock_irq(&swapper_space.tree_lock); } spin_unlock(&swap_lock); Index: linux-rt-rebase.q/mm/truncate.c =================================================================== --- linux-rt-rebase.q.orig/mm/truncate.c +++ linux-rt-rebase.q/mm/truncate.c @@ -347,18 +347,18 @@ invalidate_complete_page2(struct address if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) return 0; - write_lock_irq(&mapping->tree_lock); + spin_lock_irq(&mapping->tree_lock); if (PageDirty(page)) goto failed; BUG_ON(PagePrivate(page)); __remove_from_page_cache(page); - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); ClearPageUptodate(page); page_cache_release(page); /* pagecache ref */ return 1; failed: - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); return 0; } Index: linux-rt-rebase.q/mm/vmscan.c =================================================================== --- linux-rt-rebase.q.orig/mm/vmscan.c +++ linux-rt-rebase.q/mm/vmscan.c @@ -370,7 +370,7 @@ int remove_mapping(struct address_space BUG_ON(mapping != page_mapping(page)); set_page_nonewrefs(page); - write_lock_irq(&mapping->tree_lock); + spin_lock_irq(&mapping->tree_lock); /* * The non racy check for a busy page. * @@ -405,13 +405,13 @@ int remove_mapping(struct address_space if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); swap_free(swap); goto free_it; } __remove_from_page_cache(page); - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); free_it: __clear_page_nonewrefs(page); @@ -419,7 +419,7 @@ free_it: return 1; cannot_free: - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); clear_page_nonewrefs(page); return 0; } patches/x86_64-convert-to-clockevents.patch0000664000077200007720000003047010655544570020133 0ustar mingomingoSubject: x86_64: convert to clock events Finally switch to the clockevents code. Share code with i386 for hpet and PIT. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/Kconfig | 10 +++ arch/x86_64/kernel/Makefile | 4 + arch/x86_64/kernel/apic.c | 90 +++++++++++++++++++---------------- arch/x86_64/kernel/i8259.c | 46 ------------------ arch/x86_64/kernel/smpboot.c | 4 - arch/x86_64/kernel/time.c | 109 +++++-------------------------------------- include/asm-x86_64/hpet.h | 16 ------ 7 files changed, 76 insertions(+), 203 deletions(-) Index: linux/arch/x86_64/Kconfig =================================================================== --- linux.orig/arch/x86_64/Kconfig +++ linux/arch/x86_64/Kconfig @@ -28,7 +28,15 @@ config GENERIC_TIME bool default y -config GENERIC_CLOCKEVENTS_MIGR +config GENERIC_CLOCKEVENTS + bool + default y + +config GENERIC_CLOCKEVENTS_BROADCAST + bool + default y + +config NONIRQ_WAKEUP bool default y Index: linux/arch/x86_64/kernel/Makefile =================================================================== --- linux.orig/arch/x86_64/kernel/Makefile +++ linux/arch/x86_64/kernel/Makefile @@ -9,7 +9,7 @@ obj-y := process.o signal.o entry.o trap x8664_ksyms.o i387.o syscall.o vsyscall.o \ setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ pci-dma.o pci-nommu.o alternative.o hpet.o tsc.o bugs.o \ - perfctr-watchdog.o + perfctr-watchdog.o i8253.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_X86_MCE) += mce.o therm_throt.o @@ -48,6 +48,8 @@ obj-y += pcspeaker.o CFLAGS_vsyscall.o := $(PROFILING) -g0 +i8253-y += ../../i386/kernel/i8253.o +hpet-y += ../../i386/kernel/hpet.o therm_throt-y += ../../i386/kernel/cpu/mcheck/therm_throt.o bootflag-y += ../../i386/kernel/bootflag.o cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -858,25 +858,12 @@ static void __setup_APIC_LVTT(unsigned i static void setup_APIC_timer(void) { - unsigned long flags; - int irqen; + struct clock_event_device *levt = &__get_cpu_var(lapic_events); - local_irq_save(flags); + memcpy(levt, &lapic_clockevent, sizeof(*levt)); + levt->cpumask = cpumask_of_cpu(smp_processor_id()); - irqen = ! cpu_isset(smp_processor_id(), - timer_interrupt_broadcast_ipi_mask); - __setup_APIC_LVTT(calibration_result, 0, irqen); - /* Turn off PIT interrupt if we use APIC timer as main timer. - Only works with the PM timer right now - TBD fix it for HPET too. */ - if ((pmtmr_ioport != 0) && - smp_processor_id() == boot_cpu_id && - apic_runs_main_timer == 1 && - !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) { - stop_timer_interrupt(); - apic_runs_main_timer++; - } - local_irq_restore(flags); + clockevents_register_device(levt); } /* @@ -951,18 +938,34 @@ static void __init calibrate_APIC_clock( void __init setup_boot_APIC_clock (void) { + /* + * The local apic timer can be disabled via the kernel commandline. + * Register the lapic timer as a dummy clock event source on SMP + * systems, so the broadcast mechanism is used. On UP systems simply + * ignore it. + */ if (disable_apic_timer) { printk(KERN_INFO "Disabling APIC timer\n"); + /* No broadcast on UP ! */ + if (num_possible_cpus() > 1) + setup_APIC_timer(); return; } printk(KERN_INFO "Using local APIC timer interrupts.\n"); - using_apic_timer = 1; - calibrate_APIC_clock(); + /* - * Now set up the timer for real. + * If nmi_watchdog is set to IO_APIC, we need the + * PIT/HPET going. Otherwise register lapic as a dummy + * device. */ + if (nmi_watchdog != NMI_IO_APIC) + lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; + else + printk(KERN_WARNING "APIC timer registered as dummy," + " due to nmi_watchdog=1!\n"); + setup_APIC_timer(); } @@ -1074,22 +1077,34 @@ void setup_APIC_extended_lvt(unsigned ch void smp_local_timer_interrupt(void) { - profile_tick(CPU_PROFILING); -#ifdef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id) - main_timer_handler(); + int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(lapic_events, cpu); + /* - * We take the 'long' return path, and there every subsystem - * grabs the appropriate locks (kernel lock/ irq lock). + * Normally we should not be here till LAPIC has been initialized but + * in some cases like kdump, its possible that there is a pending LAPIC + * timer interrupt from previous kernel's context and is delivered in + * new kernel the moment interrupts are enabled. * - * We might want to decouple profiling from the 'long path', - * and do the profiling totally in assembly. - * - * Currently this isn't too much of an issue (performance wise), - * we can take more than 100K local irqs per second on a 100 MHz P5. + * Interrupts are enabled early and LAPIC is setup much later, hence + * its possible that when we get here evt->event_handler is NULL. + * Check for event_handler being NULL and discard the interrupt as + * spurious. + */ + if (!evt->event_handler) { + printk(KERN_WARNING + "Spurious LAPIC timer interrupt on cpu %d\n", cpu); + /* Switch it off */ + lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); + return; + } + + /* + * the NMI deadlock-detector uses this. */ + add_pda(apic_timer_irqs, 1); + + evt->event_handler(evt); } /* @@ -1105,11 +1120,6 @@ void smp_apic_timer_interrupt(struct pt_ struct pt_regs *old_regs = set_irq_regs(regs); /* - * the NMI deadlock-detector uses this. - */ - add_pda(apic_timer_irqs, 1); - - /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ @@ -1292,7 +1302,7 @@ static __init int setup_noapictimer(char static __init int setup_apicmaintimer(char *str) { apic_runs_main_timer = 1; - nohpet = 1; + return 1; } __setup("apicmaintimer", setup_apicmaintimer); @@ -1308,7 +1318,7 @@ static __init int setup_apicpmtimer(char { apic_calibrate_pmtmr = 1; notsc_setup(NULL); - return setup_apicmaintimer(NULL); + return 0; } __setup("apicpmtimer", setup_apicpmtimer); Index: linux/arch/x86_64/kernel/i8259.c =================================================================== --- linux.orig/arch/x86_64/kernel/i8259.c +++ linux/arch/x86_64/kernel/i8259.c @@ -444,46 +444,6 @@ void __init init_ISA_irqs (void) } } -static void setup_timer_hardware(void) -{ - outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ - udelay(10); - outb_p(LATCH & 0xff , 0x40); /* LSB */ - udelay(10); - outb(LATCH >> 8 , 0x40); /* MSB */ -} - -static int timer_resume(struct sys_device *dev) -{ - setup_timer_hardware(); - return 0; -} - -void i8254_timer_resume(void) -{ - setup_timer_hardware(); -} - -static struct sysdev_class timer_sysclass = { - set_kset_name("timer_pit"), - .resume = timer_resume, -}; - -static struct sys_device device_timer = { - .id = 0, - .cls = &timer_sysclass, -}; - -static int __init init_timer_sysfs(void) -{ - int error = sysdev_class_register(&timer_sysclass); - if (!error) - error = sysdev_register(&device_timer); - return error; -} - -device_initcall(init_timer_sysfs); - void __init init_IRQ(void) { int i; @@ -533,12 +493,6 @@ void __init init_IRQ(void) set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); - /* - * Set the clock to HZ Hz, we already have a valid - * vector now: - */ - setup_timer_hardware(); - if (!acpi_ioapic) setup_irq(2, &irq2); } Index: linux/arch/x86_64/kernel/smpboot.c =================================================================== --- linux.orig/arch/x86_64/kernel/smpboot.c +++ linux/arch/x86_64/kernel/smpboot.c @@ -223,8 +223,6 @@ void __cpuinit smp_callin(void) local_irq_disable(); Dprintk("Stack at about %p\n",&cpuid); - disable_APIC_timer(); - /* * Save our processor parameters */ @@ -348,8 +346,6 @@ void __cpuinit start_secondary(void) enable_8259A_irq(0); } - enable_APIC_timer(); - /* * The sibling maps must be set before turing the online map on for * this cpu Index: linux/arch/x86_64/kernel/time.c =================================================================== --- linux.orig/arch/x86_64/kernel/time.c +++ linux/arch/x86_64/kernel/time.c @@ -28,6 +28,8 @@ #include #include #include +#include + #ifdef CONFIG_ACPI #include /* for PM timer frequency */ #include @@ -46,12 +48,8 @@ #include #include -static char *timename = NULL; - DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); -DEFINE_SPINLOCK(i8253_lock); -EXPORT_SYMBOL(i8253_lock); volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; @@ -194,6 +192,13 @@ static irqreturn_t timer_interrupt(int i return IRQ_HANDLED; } +static irqreturn_t timer_event_interrupt(int irq, void *dev_id) +{ + global_clock_event->event_handler(global_clock_event); + + return IRQ_HANDLED; +} + unsigned long read_persistent_clock(void) { unsigned int year, mon, day, hour, min, sec; @@ -291,63 +296,19 @@ static unsigned int __init tsc_calibrate return pmc_now * tsc_khz / (tsc_now - tsc_start); } -static void __pit_init(int val, u8 mode) -{ - unsigned long flags; - - spin_lock_irqsave(&i8253_lock, flags); - outb_p(mode, PIT_MODE); - outb_p(val & 0xff, PIT_CH0); /* LSB */ - outb_p(val >> 8, PIT_CH0); /* MSB */ - spin_unlock_irqrestore(&i8253_lock, flags); -} - -void __init pit_init(void) -{ - __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */ -} - -void pit_stop_interrupt(void) -{ - __pit_init(0, 0x30); /* mode 0 */ -} - -void stop_timer_interrupt(void) -{ - char *name; - if (hpet_address) { - name = "HPET"; - hpet_timer_stop_set_go(0); - } else { - name = "PIT"; - pit_stop_interrupt(); - } - printk(KERN_INFO "timer: %s interrupt stopped.\n", name); -} - static struct irqaction irq0 = { - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_IRQPOLL, + .handler = timer_event_interrupt, + .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING, .mask = CPU_MASK_NONE, .name = "timer" }; void __init time_init(void) { - if (nohpet) - hpet_address = 0; - - if (hpet_arch_init()) - hpet_address = 0; + if (!hpet_enable()) + setup_pit_timer(); - if (hpet_use_timer) { - /* set tick_nsec to use the proper rate for HPET */ - tick_nsec = TICK_NSEC_HPET; - timename = "HPET"; - } else { - pit_init(); - timename = "PIT"; - } + setup_irq(0, &irq0); tsc_calibrate(); @@ -369,46 +330,4 @@ void __init time_init(void) printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); init_tsc_clocksource(); - - setup_irq(0, &irq0); -} - -/* - * sysfs support for the timer. - */ - -static int timer_suspend(struct sys_device *dev, pm_message_t state) -{ - return 0; } - -static int timer_resume(struct sys_device *dev) -{ - if (hpet_address) - hpet_reenable(); - else - i8254_timer_resume(); - return 0; -} - -static struct sysdev_class timer_sysclass = { - .resume = timer_resume, - .suspend = timer_suspend, - set_kset_name("timer"), -}; - -/* XXX this sysfs stuff should probably go elsewhere later -john */ -static struct sys_device device_timer = { - .id = 0, - .cls = &timer_sysclass, -}; - -static int time_init_device(void) -{ - int error = sysdev_class_register(&timer_sysclass); - if (!error) - error = sysdev_register(&device_timer); - return error; -} - -device_initcall(time_init_device); Index: linux/include/asm-x86_64/hpet.h =================================================================== --- linux.orig/include/asm-x86_64/hpet.h +++ linux/include/asm-x86_64/hpet.h @@ -1,18 +1,2 @@ -#ifndef _ASM_X8664_HPET_H -#define _ASM_X8664_HPET_H 1 #include - -#define HPET_TICK_RATE (HZ * 100000UL) - -extern int hpet_rtc_timer_init(void); -extern int hpet_arch_init(void); -extern int hpet_timer_stop_set_go(unsigned long tick); -extern int hpet_reenable(void); -extern unsigned int hpet_calibrate_tsc(void); - -extern int hpet_use_timer; -extern unsigned long hpet_period; -extern unsigned long hpet_tick; - -#endif patches/ppc-gtod-notrace-fix.patch0000664000077200007720000000075010655544572016513 0ustar mingomingo--- arch/powerpc/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -922,7 +922,7 @@ void div128_by_32(u64 dividend_high, u64 #include -static cycle_t timebase_read(void) +static cycle_t notrace timebase_read(void) { return (cycle_t)get_tb(); } patches/i386-nmi-watchdog-show-regs.patch0000664000077200007720000000101610655544576017545 0ustar mingomingo--- arch/i386/kernel/nmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/i386/kernel/nmi.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/nmi.c +++ linux-rt-rebase.q/arch/i386/kernel/nmi.c @@ -392,7 +392,7 @@ notrace __kprobes int nmi_watchdog_tick( spin_lock(&lock); printk("NMI backtrace for cpu %d\n", cpu); - dump_stack(); + show_regs(regs); spin_unlock(&lock); cpu_clear(cpu, backtrace_mask); } patches/softlockup-use-cpu-clock.patch0000664000077200007720000000247210655544576017431 0ustar mingomingoSubject: softlockup: use cpu_clock() instead of sched_clock() From: Ingo Molnar sched_clock() is not a reliable time-source, use cpu_clock() instead. Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/kernel/softlockup.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softlockup.c +++ linux-rt-rebase.q/kernel/softlockup.c @@ -42,14 +42,16 @@ static struct notifier_block panic_block * resolution, and we don't need to waste time with a big divide when * 2^30ns == 1.074s. */ -static unsigned long get_timestamp(void) +static unsigned long get_timestamp(int this_cpu) { - return sched_clock() >> 30; /* 2^30 ~= 10^9 */ + return cpu_clock(this_cpu) >> 30; /* 2^30 ~= 10^9 */ } void touch_softlockup_watchdog(void) { - __raw_get_cpu_var(touch_timestamp) = get_timestamp(); + int this_cpu = raw_smp_processor_id(); + + per_cpu(touch_timestamp, this_cpu) = get_timestamp(this_cpu); } EXPORT_SYMBOL(touch_softlockup_watchdog); @@ -95,7 +97,7 @@ void softlockup_tick(void) return; } - now = get_timestamp(); + now = get_timestamp(this_cpu); /* Wake up the high-prio watchdog task every second: */ if (now > (touch_timestamp + 1)) patches/hpet-force-enable-on-ich34.patch0000664000077200007720000000554110655544571017365 0ustar mingomingoFrom us15@os.inf.tu-dresden.de Wed Jun 6 14:34:18 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.1 required=5.0 tests=AWL,MAILTO_TO_SPAM_ADDR autolearn=no version=3.1.7-deb Received: from os.inf.tu-dresden.de (os.inf.tu-dresden.de [141.76.48.99]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by mail.tglx.de (Postfix) with ESMTP id CB67965C065 for ; Wed, 6 Jun 2007 14:34:18 +0200 (CEST) Received: from nova.inf.tu-dresden.de ([141.76.48.73] helo=laptop.hypervisor.org) by os.inf.tu-dresden.de with esmtpsa (TLSv1:AES256-SHA:256) (Exim 4.67) id 1HvuiQ-0000WF-8q; Wed, 06 Jun 2007 14:34:18 +0200 Date: Wed, 6 Jun 2007 14:34:14 +0200 From: "Udo A. Steinberg" To: Thomas Gleixner , Venkatesh Pallipadi Subject: [PATCH]: Enable HPET on ICH3 and ICH4 Message-ID: <20070606143414.6003edd0@laptop.hypervisor.org> X-Mailer: X-Mailer 5.0 Gold Mime-Version: 1.0 Content-Type: multipart/signed; boundary=Sig_TyoZ8hpf907DzN6.B9sCrGr; protocol="application/pgp-signature"; micalg=PGP-SHA1 X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ ICH3 and ICH4 have undocumented HPET capabilities. This patch enables HPET for platforms based around these ICHs. Tested on various ICH3 and ICH4 platforms. Because HPET is not officially documented for ICH3/4 and may not have been validated by chipset folks, we're on thin ice here. I'd recommend testing this patch in -hrt or -mm for a while and wait for success/failure reports before feeding it upstream. Signed-off-by: Udo A. Steinberg --- arch/i386/kernel/quirks.c | 8 ++++++++ 1 file changed, 8 insertions(+) Index: linux/arch/i386/kernel/quirks.c =================================================================== --- linux.orig/arch/i386/kernel/quirks.c +++ linux/arch/i386/kernel/quirks.c @@ -232,6 +232,14 @@ static void old_ich_force_enable_hpet(st printk(KERN_DEBUG "Failed to force enable HPET\n"); } +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, + old_ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, + old_ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, + old_ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, + old_ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, old_ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, patches/2.6.21-rc6-lockless5-lockless-probe.patch0000664000077200007720000000431110655544576020624 0ustar mingomingoFrom: Nick Piggin Subject: [patch 5/9] mm: lockless probe Probing pages and radix_tree_tagged are lockless operations with the lockless radix-tree. Convert these users to RCU locking rather than using tree_lock. Signed-off-by: Nick Piggin --- mm/page-writeback.c | 8 +++----- mm/readahead.c | 6 ++---- 2 files changed, 5 insertions(+), 9 deletions(-) Index: linux-rt-rebase.q/mm/page-writeback.c =================================================================== --- linux-rt-rebase.q.orig/mm/page-writeback.c +++ linux-rt-rebase.q/mm/page-writeback.c @@ -1022,17 +1022,15 @@ int test_set_page_writeback(struct page EXPORT_SYMBOL(test_set_page_writeback); /* - * Return true if any of the pages in the mapping are marged with the + * Return true if any of the pages in the mapping are marked with the * passed tag. */ int mapping_tagged(struct address_space *mapping, int tag) { - unsigned long flags; int ret; - - read_lock_irqsave(&mapping->tree_lock, flags); + rcu_read_lock(); ret = radix_tree_tagged(&mapping->page_tree, tag); - read_unlock_irqrestore(&mapping->tree_lock, flags); + rcu_read_unlock(); return ret; } EXPORT_SYMBOL(mapping_tagged); Index: linux-rt-rebase.q/mm/readahead.c =================================================================== --- linux-rt-rebase.q.orig/mm/readahead.c +++ linux-rt-rebase.q/mm/readahead.c @@ -156,20 +156,19 @@ __do_page_cache_readahead(struct address /* * Preallocate as many pages as we will need. */ - read_lock_irq(&mapping->tree_lock); for (page_idx = 0; page_idx < nr_to_read; page_idx++) { pgoff_t page_offset = offset + page_idx; if (page_offset > end_index) break; + rcu_read_lock(); page = radix_tree_lookup(&mapping->page_tree, page_offset); + rcu_read_unlock(); if (page) continue; - read_unlock_irq(&mapping->tree_lock); page = page_cache_alloc_cold(mapping); - read_lock_irq(&mapping->tree_lock); if (!page) break; page->index = page_offset; @@ -178,7 +177,6 @@ __do_page_cache_readahead(struct address SetPageReadahead(page); ret++; } - read_unlock_irq(&mapping->tree_lock); /* * Now start the IO. We ignore I/O errors - if the page is not patches/i386-prepare-sharing-pit-code.patch0000664000077200007720000000353310655544570020041 0ustar mingomingoSubject: i386: prepare sharing the PIT code PIT clock events work already and the PIT handling is the same for i386 and x86_64. x86_64 does not support PIT as a clock source, so disable the PIT clocksource for x86_64. Prepare i8253.h to be shared with x8664 Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/i386/kernel/i8253.c | 4 +++- include/asm-i386/i8253.h | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) Index: linux/arch/i386/kernel/i8253.c =================================================================== --- linux.orig/arch/i386/kernel/i8253.c +++ linux/arch/i386/kernel/i8253.c @@ -13,7 +13,6 @@ #include #include #include -#include DEFINE_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); @@ -120,6 +119,7 @@ void __init setup_pit_timer(void) global_clock_event = &pit_clockevent; } +#ifndef CONFIG_X86_64 /* * Since the PIT overflows every tick, its not very useful * to just read by itself. So use jiffies to emulate a free @@ -204,3 +204,5 @@ static int __init init_pit_clocksource(v return clocksource_register(&clocksource_pit); } arch_initcall(init_pit_clocksource); + +#endif Index: linux/include/asm-i386/i8253.h =================================================================== --- linux.orig/include/asm-i386/i8253.h +++ linux/include/asm-i386/i8253.h @@ -1,8 +1,6 @@ #ifndef __ASM_I8253_H__ #define __ASM_I8253_H__ -#include - /* i8253A PIT registers */ #define PIT_MODE 0x43 #define PIT_CH0 0x40 @@ -10,8 +8,12 @@ extern spinlock_t i8253_lock; +#ifdef CONFIG_GENERIC_CLOCKEVENTS + extern struct clock_event_device *global_clock_event; extern void setup_pit_timer(void); +#endif + #endif /* __ASM_I8253_H__ */ patches/mitigate-resched-flood.patch0000664000077200007720000001247510655544577017114 0ustar mingomingo[PATCH 1/3] mitigate-resched-interrupt-floods Mitigate rescheduling interrupt floods. Background: preempt-rt sends a resched interrupt to all other cpus whenever some realtime task gets preempted. This is to give that task a chance to continue running on some other cpu. Unfortunately this can cause 'resched interrupt floods' when there are large numbers of realtime tasks on the system that are continually being preempted. This patch reduces such interrupts by noting that it is not necessary to send rescheduling interrupts to every cpu in the system, just to those cpus in the affinity mask of the task to be migrated. This works well in the real world, as traditionally realtime tasks are carefully targeted to specific cpus or sets of cpus, meaning users often give such tasks reduced affinity masks. Signed-off-by: Joe Korty --- arch/i386/kernel/smp.c | 9 +++++++++ arch/x86_64/kernel/smp.c | 9 +++++++++ include/asm-i386/smp.h | 2 ++ include/asm-x86_64/smp.h | 3 +++ include/linux/smp.h | 9 +++++++++ kernel/sched.c | 4 ++-- 6 files changed, 34 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/arch/i386/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/smp.c +++ linux-rt-rebase.q/arch/i386/kernel/smp.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -486,6 +487,14 @@ void smp_send_reschedule_allbutself(void send_IPI_allbutself(RESCHEDULE_VECTOR); } +void smp_send_reschedule_allbutself_cpumask(cpumask_t mask) +{ + cpu_clear(smp_processor_id(), mask); + cpus_and(mask, mask, cpu_online_map); + if (!cpus_empty(mask)) + send_IPI_mask(mask, RESCHEDULE_VECTOR); +} + /* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. Index: linux-rt-rebase.q/arch/x86_64/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/smp.c +++ linux-rt-rebase.q/arch/x86_64/kernel/smp.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -303,6 +304,14 @@ void smp_send_reschedule_allbutself(void send_IPI_allbutself(RESCHEDULE_VECTOR); } +void smp_send_reschedule_allbutself_cpumask(cpumask_t mask) +{ + cpu_clear(smp_processor_id(), mask); + cpus_and(mask, mask, cpu_online_map); + if (!cpus_empty(mask)) + send_IPI_mask(mask, RESCHEDULE_VECTOR); +} + /* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. Index: linux-rt-rebase.q/include/asm-i386/smp.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/smp.h +++ linux-rt-rebase.q/include/asm-i386/smp.h @@ -179,4 +179,6 @@ static __inline int logical_smp_processo #endif #endif +#define HAVE_RESCHEDULE_ALLBUTSELF_CPUMASK 1 + #endif Index: linux-rt-rebase.q/include/asm-x86_64/smp.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/smp.h +++ linux-rt-rebase.q/include/asm-x86_64/smp.h @@ -113,5 +113,8 @@ static __inline int logical_smp_processo #else #define cpu_physical_id(cpu) boot_cpu_id #endif /* !CONFIG_SMP */ + +#define HAVE_RESCHEDULE_ALLBUTSELF_CPUMASK 1 + #endif Index: linux-rt-rebase.q/include/linux/smp.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/smp.h +++ linux-rt-rebase.q/include/linux/smp.h @@ -43,6 +43,14 @@ extern void smp_send_reschedule_allbutse */ extern void smp_send_reschedule_allbutself(void); +#ifdef HAVE_RESCHEDULE_ALLBUTSELF_CPUMASK +extern void smp_send_reschedule_allbutself_cpumask(cpumask_t); +#else +static inline void smp_send_reschedule_allbutself_cpumask(cpumask_t mask) { + smp_send_reschedule_allbutself(); +} +#endif + /* * Prepare machine for booting other CPUs. @@ -108,6 +116,7 @@ static inline int up_smp_call_function(v }) static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule_allbutself(void) { } +static inline void smp_send_reschedule_allbutself_cpumask(cpumask_t) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_single(cpuid, func, info, retry, wait) \ Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -1846,7 +1846,7 @@ out_set_cpu: * nevertheless, maybe one of them can take * this task: */ - smp_send_reschedule_allbutself(); + smp_send_reschedule_allbutself_cpumask(p->cpus_allowed); schedstat_inc(this_rq, rto_wakeup); } @@ -2178,7 +2178,7 @@ static inline void finish_task_switch(st */ if (unlikely(rt_task(current) && prev->se.on_rq && rt_task(prev))) { schedstat_inc(rq, rto_schedule); - smp_send_reschedule_allbutself(); + smp_send_reschedule_allbutself_cpumask(current->cpus_allowed); } #endif prev_state = prev->state; patches/netfilter-more-debugging.patch0000664000077200007720000000160410655544571017444 0ustar mingomingo doing netfilter changes and turning on netfilter debug means we've got to interpret netfilter warning messages a bit more. --- include/net/netfilter/nf_conntrack.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) Index: linux/include/net/netfilter/nf_conntrack.h =================================================================== --- linux.orig/include/net/netfilter/nf_conntrack.h +++ linux/include/net/netfilter/nf_conntrack.h @@ -63,11 +63,14 @@ union nf_conntrack_help { #ifdef CONFIG_NETFILTER_DEBUG #define NF_CT_ASSERT(x) \ do { \ - if (!(x)) \ + if (!(x)) { \ /* Wooah! I'm tripping my conntrack in a frenzy of \ netplay... */ \ printk("NF_CT_ASSERT: %s:%i(%s)\n", \ __FILE__, __LINE__, __FUNCTION__); \ + if (printk_ratelimit()) \ + WARN_ON(1); \ + } \ } while(0) #else #define NF_CT_ASSERT(x) patches/rcu-preempt-fix-nmi-watchdog.patch0000664000077200007720000000262610655544573020174 0ustar mingomingoSubject: change die_chain from atomic to raw notifiers From: Ingo Molnar atomic notifier chains are using rcu_read_lock()/unlock(), but those are not NMI-safe in -rt - so switch these chains to raw notifiers. Signed-off-by: Ingo Molnar --- kernel/die_notifier.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) Index: linux/kernel/die_notifier.c =================================================================== --- linux.orig/kernel/die_notifier.c +++ linux/kernel/die_notifier.c @@ -5,7 +5,7 @@ #include -static ATOMIC_NOTIFIER_HEAD(die_chain); +static RAW_NOTIFIER_HEAD(die_chain); int notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) @@ -19,19 +19,19 @@ int notify_die(enum die_val val, const c }; - return atomic_notifier_call_chain(&die_chain, val, &args); + return raw_notifier_call_chain(&die_chain, val, &args); } int register_die_notifier(struct notifier_block *nb) { vmalloc_sync_all(); - return atomic_notifier_chain_register(&die_chain, nb); + return raw_notifier_chain_register(&die_chain, nb); } EXPORT_SYMBOL_GPL(register_die_notifier); int unregister_die_notifier(struct notifier_block *nb) { - return atomic_notifier_chain_unregister(&die_chain, nb); + return raw_notifier_chain_unregister(&die_chain, nb); } EXPORT_SYMBOL_GPL(unregister_die_notifier); patches/bh-state-lock.patch0000664000077200007720000000606110655544574015221 0ustar mingomingo I was compiling a kernel in a shell that I set to a priority of 20, and it locked up on the bit_spin_lock crap of jbd. This patch adds another spinlock to the buffer head and uses that instead of the bit_spins. From: Steven Rostedt Signed-off-by: Ingo Molnar -- fs/buffer.c | 3 ++- include/linux/buffer_head.h | 1 + include/linux/jbd.h | 12 ++++++------ 3 files changed, 9 insertions(+), 7 deletions(-) Index: linux-rt-rebase.q/fs/buffer.c =================================================================== --- linux-rt-rebase.q.orig/fs/buffer.c +++ linux-rt-rebase.q/fs/buffer.c @@ -40,7 +40,6 @@ #include #include #include -#include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); @@ -2958,6 +2957,7 @@ struct buffer_head *alloc_buffer_head(gf if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); spin_lock_init(&ret->b_uptodate_lock); + spin_lock_init(&ret->b_state_lock); get_cpu_var(bh_accounting).nr++; recalc_bh_state(); put_cpu_var(bh_accounting); @@ -2970,6 +2970,7 @@ void free_buffer_head(struct buffer_head { BUG_ON(!list_empty(&bh->b_assoc_buffers)); BUG_ON(spin_is_locked(&bh->b_uptodate_lock)); + BUG_ON(spin_is_locked(&bh->b_state_lock)); kmem_cache_free(bh_cachep, bh); get_cpu_var(bh_accounting).nr--; recalc_bh_state(); Index: linux-rt-rebase.q/include/linux/buffer_head.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/buffer_head.h +++ linux-rt-rebase.q/include/linux/buffer_head.h @@ -70,6 +70,7 @@ struct buffer_head { associated with */ atomic_t b_count; /* users using this buffer_head */ spinlock_t b_uptodate_lock; + spinlock_t b_state_lock; }; /* Index: linux-rt-rebase.q/include/linux/jbd.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/jbd.h +++ linux-rt-rebase.q/include/linux/jbd.h @@ -331,32 +331,32 @@ static inline struct journal_head *bh2jh static inline void jbd_lock_bh_state(struct buffer_head *bh) { - bit_spin_lock(BH_State, &bh->b_state); + spin_lock(&bh->b_state_lock); } static inline int jbd_trylock_bh_state(struct buffer_head *bh) { - return bit_spin_trylock(BH_State, &bh->b_state); + return spin_trylock(&bh->b_state_lock); } static inline int jbd_is_locked_bh_state(struct buffer_head *bh) { - return bit_spin_is_locked(BH_State, &bh->b_state); + return spin_is_locked(&bh->b_state_lock); } static inline void jbd_unlock_bh_state(struct buffer_head *bh) { - bit_spin_unlock(BH_State, &bh->b_state); + spin_unlock(&bh->b_state_lock); } static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { - bit_spin_lock(BH_JournalHead, &bh->b_state); + spin_lock_irq(&bh->b_uptodate_lock); } static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) { - bit_spin_unlock(BH_JournalHead, &bh->b_state); + spin_unlock_irq(&bh->b_uptodate_lock); } struct jbd_revoke_table_s; patches/version.patch0000664000077200007720000000175010655544577014254 0ustar mingomingoSubject: add -rt extra-version From: Ingo Molnar add -rt extra-version. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- Makefile | 2 +- kernel/workqueue.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/Makefile =================================================================== --- linux-rt-rebase.q.orig/Makefile +++ linux-rt-rebase.q/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 23 -EXTRAVERSION =-rc2 +EXTRAVERSION =-rc2-rt2 NAME = Holy Dancing Manatees, Batman! # *DOCUMENTATION* Index: linux-rt-rebase.q/kernel/workqueue.c =================================================================== --- linux-rt-rebase.q.orig/kernel/workqueue.c +++ linux-rt-rebase.q/kernel/workqueue.c @@ -647,6 +647,7 @@ out: return err; } +EXPORT_SYMBOL(schedule_on_each_cpu); /** * schedule_on_each_cpu_wq - call a function on each online CPU on a per-CPU wq patches/preempt-realtime-mips.patch0000664000077200007720000013103210655544574017003 0ustar mingomingo arch/mips/Kconfig | 13 +- arch/mips/kernel/asm-offsets.c | 2 arch/mips/kernel/entry.S | 22 ++-- arch/mips/kernel/i8259.c | 2 arch/mips/kernel/module.c | 2 arch/mips/kernel/process.c | 8 - arch/mips/kernel/scall32-o32.S | 2 arch/mips/kernel/scall64-64.S | 2 arch/mips/kernel/scall64-n32.S | 2 arch/mips/kernel/scall64-o32.S | 2 arch/mips/kernel/semaphore.c | 22 ++-- arch/mips/kernel/signal.c | 4 arch/mips/kernel/signal32.c | 4 arch/mips/kernel/smp.c | 27 ++++ arch/mips/kernel/time.c | 208 ++++++++++++++++++++++++++++++++++++-- arch/mips/kernel/traps.c | 2 arch/mips/mm/init.c | 2 arch/mips/sibyte/cfe/smp.c | 4 arch/mips/sibyte/sb1250/irq.c | 10 + arch/mips/sibyte/sb1250/smp.c | 2 arch/mips/sibyte/swarm/setup.c | 6 + include/asm-mips/asmmacro.h | 8 - include/asm-mips/atomic.h | 1 include/asm-mips/bitops.h | 5 include/asm-mips/hw_irq.h | 1 include/asm-mips/i8259.h | 2 include/asm-mips/io.h | 1 include/asm-mips/linkage.h | 5 include/asm-mips/m48t35.h | 2 include/asm-mips/rwsem.h | 176 ++++++++++++++++++++++++++++++++ include/asm-mips/semaphore.h | 33 +++--- include/asm-mips/spinlock.h | 18 +-- include/asm-mips/spinlock_types.h | 4 include/asm-mips/thread_info.h | 2 include/asm-mips/time.h | 2 include/asm-mips/timeofday.h | 5 include/asm-mips/uaccess.h | 12 -- 37 files changed, 534 insertions(+), 91 deletions(-) Index: linux-rt-rebase.q/arch/mips/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/mips/Kconfig +++ linux-rt-rebase.q/arch/mips/Kconfig @@ -609,18 +609,16 @@ source "arch/mips/philips/pnx8550/common endmenu + config RWSEM_GENERIC_SPINLOCK bool - depends on !PREEMPT_RT default y config RWSEM_XCHGADD_ALGORITHM bool - depends on !PREEMPT_RT config ASM_SEMAPHORES bool -# depends on !PREEMPT_RT default y config ARCH_HAS_ILOG2_U32 @@ -1755,6 +1753,15 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. +config GENERIC_TIME + bool + default y + +source "kernel/time/Kconfig" + +config CPU_SPEED + int "CPU speed used for clocksource/clockevent calculations" + default 600 endmenu config LOCKDEP_SUPPORT Index: linux-rt-rebase.q/arch/mips/kernel/asm-offsets.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/asm-offsets.c +++ linux-rt-rebase.q/arch/mips/kernel/asm-offsets.c @@ -10,9 +10,11 @@ */ #include #include +#include #include #include #include +#include #include #include Index: linux-rt-rebase.q/arch/mips/kernel/entry.S =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/entry.S +++ linux-rt-rebase.q/arch/mips/kernel/entry.S @@ -30,7 +30,7 @@ .align 5 #ifndef CONFIG_PREEMPT FEXPORT(ret_from_exception) - local_irq_disable # preempt stop + raw_local_irq_disable # preempt stop b __ret_from_irq #endif FEXPORT(ret_from_irq) @@ -41,7 +41,7 @@ FEXPORT(__ret_from_irq) beqz t0, resume_kernel resume_userspace: - local_irq_disable # make sure we dont miss an + raw_local_irq_disable # make sure we dont miss an # interrupt setting need_resched # between sampling and return LONG_L a2, TI_FLAGS($28) # current->work @@ -51,7 +51,9 @@ resume_userspace: #ifdef CONFIG_PREEMPT resume_kernel: - local_irq_disable + raw_local_irq_disable + lw t0, kernel_preemption + beqz t0, restore_all lw t0, TI_PRE_COUNT($28) bnez t0, restore_all need_resched: @@ -61,7 +63,9 @@ need_resched: LONG_L t0, PT_STATUS(sp) # Interrupts off? andi t0, 1 beqz t0, restore_all + raw_local_irq_disable jal preempt_schedule_irq + sw zero, TI_PRE_COUNT($28) b need_resched #endif @@ -69,7 +73,7 @@ FEXPORT(ret_from_fork) jal schedule_tail # a0 = struct task_struct *prev FEXPORT(syscall_exit) - local_irq_disable # make sure need_resched and + raw_local_irq_disable # make sure need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) # current->work @@ -142,19 +146,21 @@ FEXPORT(restore_partial) # restore part .set at work_pending: - andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS + # a2 is preloaded with TI_FLAGS + andi t0, a2, (_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) beqz t0, work_notifysig work_resched: + raw_local_irq_enable t0 jal schedule - local_irq_disable # make sure need_resched and + raw_local_irq_disable # make sure need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) andi t0, a2, _TIF_WORK_MASK # is there any work to be done # other than syscall tracing? beqz t0, restore_all - andi t0, a2, _TIF_NEED_RESCHED + andi t0, a2, (_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) bnez t0, work_resched work_notifysig: # deal with pending signals and @@ -170,7 +176,7 @@ syscall_exit_work: li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT and t0, a2 # a2 is preloaded with TI_FLAGS beqz t0, work_pending # trace bit set? - local_irq_enable # could let do_syscall_trace() + raw_local_irq_enable # could let do_syscall_trace() # call schedule() instead move a0, sp li a1, 1 Index: linux-rt-rebase.q/arch/mips/kernel/i8259.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/i8259.c +++ linux-rt-rebase.q/arch/mips/kernel/i8259.c @@ -29,9 +29,9 @@ */ static int i8259A_auto_eoi = -1; -DEFINE_SPINLOCK(i8259A_lock); /* some platforms call this... */ void mask_and_ack_8259A(unsigned int); +DEFINE_RAW_SPINLOCK(i8259A_lock); static struct irq_chip i8259A_chip = { .name = "XT-PIC", Index: linux-rt-rebase.q/arch/mips/kernel/module.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/module.c +++ linux-rt-rebase.q/arch/mips/kernel/module.c @@ -40,7 +40,7 @@ struct mips_hi16 { static struct mips_hi16 *mips_hi16_list; static LIST_HEAD(dbe_list); -static DEFINE_SPINLOCK(dbe_lock); +static DEFINE_RAW_SPINLOCK(dbe_lock); void *module_alloc(unsigned long size) { Index: linux-rt-rebase.q/arch/mips/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/process.c +++ linux-rt-rebase.q/arch/mips/kernel/process.c @@ -52,7 +52,7 @@ void __noreturn cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { - while (!need_resched()) { + while (!need_resched() && !need_resched_delayed()) { #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG extern void smtc_idle_loop_hook(void); @@ -61,9 +61,11 @@ void __noreturn cpu_idle(void) if (cpu_wait) (*cpu_wait)(); } - preempt_enable_no_resched(); - schedule(); + local_irq_disable(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); + local_irq_enable(); } } Index: linux-rt-rebase.q/arch/mips/kernel/scall32-o32.S =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/scall32-o32.S +++ linux-rt-rebase.q/arch/mips/kernel/scall32-o32.S @@ -73,7 +73,7 @@ stack_done: 1: sw v0, PT_R2(sp) # result o32_syscall_exit: - local_irq_disable # make sure need_resched and + raw_local_irq_disable # make sure need_resched and # signals dont change between # sampling and return lw a2, TI_FLAGS($28) # current->work Index: linux-rt-rebase.q/arch/mips/kernel/scall64-64.S =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/scall64-64.S +++ linux-rt-rebase.q/arch/mips/kernel/scall64-64.S @@ -72,7 +72,7 @@ NESTED(handle_sys64, PT_SIZE, sp) 1: sd v0, PT_R2(sp) # result n64_syscall_exit: - local_irq_disable # make sure need_resched and + raw_local_irq_disable # make sure need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) # current->work Index: linux-rt-rebase.q/arch/mips/kernel/scall64-n32.S =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/scall64-n32.S +++ linux-rt-rebase.q/arch/mips/kernel/scall64-n32.S @@ -69,7 +69,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd v0, PT_R0(sp) # set flag for syscall restarting 1: sd v0, PT_R2(sp) # result - local_irq_disable # make sure need_resched and + raw_local_irq_disable # make sure need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) # current->work Index: linux-rt-rebase.q/arch/mips/kernel/scall64-o32.S =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/scall64-o32.S +++ linux-rt-rebase.q/arch/mips/kernel/scall64-o32.S @@ -98,7 +98,7 @@ NESTED(handle_sys, PT_SIZE, sp) 1: sd v0, PT_R2(sp) # result o32_syscall_exit: - local_irq_disable # make need_resched and + raw_local_irq_disable # make need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) Index: linux-rt-rebase.q/arch/mips/kernel/semaphore.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/semaphore.c +++ linux-rt-rebase.q/arch/mips/kernel/semaphore.c @@ -36,7 +36,7 @@ * sem->count and sem->waking atomic. Scalability isn't an issue because * this lock is used on UP only so it's just an empty variable. */ -static inline int __sem_update_count(struct semaphore *sem, int incr) +static inline int __sem_update_count(struct compat_semaphore *sem, int incr) { int old_count, tmp; @@ -67,7 +67,7 @@ static inline int __sem_update_count(str : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) : "r" (incr), "m" (sem->count)); } else { - static DEFINE_SPINLOCK(semaphore_lock); + static DEFINE_RAW_SPINLOCK(semaphore_lock); unsigned long flags; spin_lock_irqsave(&semaphore_lock, flags); @@ -80,7 +80,7 @@ static inline int __sem_update_count(str return old_count; } -void __up(struct semaphore *sem) +void __compat_up(struct compat_semaphore *sem) { /* * Note that we incremented count in up() before we came here, @@ -94,7 +94,7 @@ void __up(struct semaphore *sem) wake_up(&sem->wait); } -EXPORT_SYMBOL(__up); +EXPORT_SYMBOL(__compat_up); /* * Note that when we come in to __down or __down_interruptible, @@ -104,7 +104,7 @@ EXPORT_SYMBOL(__up); * Thus it is only when we decrement count from some value > 0 * that we have actually got the semaphore. */ -void __sched __down(struct semaphore *sem) +void __sched __compat_down(struct compat_semaphore *sem) { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); @@ -133,9 +133,9 @@ void __sched __down(struct semaphore *se wake_up(&sem->wait); } -EXPORT_SYMBOL(__down); +EXPORT_SYMBOL(__compat_down); -int __sched __down_interruptible(struct semaphore * sem) +int __sched __compat_down_interruptible(struct compat_semaphore * sem) { int retval = 0; struct task_struct *tsk = current; @@ -165,4 +165,10 @@ int __sched __down_interruptible(struct return retval; } -EXPORT_SYMBOL(__down_interruptible); +EXPORT_SYMBOL(__compat_down_interruptible); + +int fastcall compat_sem_is_locked(struct compat_semaphore *sem) +{ + return (int) atomic_read(&sem->count) < 0; +} +EXPORT_SYMBOL(compat_sem_is_locked); Index: linux-rt-rebase.q/arch/mips/kernel/signal.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/signal.c +++ linux-rt-rebase.q/arch/mips/kernel/signal.c @@ -629,6 +629,10 @@ static void do_signal(struct pt_regs *re siginfo_t info; int signr; +#ifdef CONFIG_PREEMPT_RT + local_irq_enable(); + preempt_check_resched(); +#endif /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything Index: linux-rt-rebase.q/arch/mips/kernel/signal32.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/signal32.c +++ linux-rt-rebase.q/arch/mips/kernel/signal32.c @@ -655,6 +655,10 @@ static int setup_rt_frame_32(struct k_si if (err) goto give_sigsegv; +#ifdef CONFIG_PREEMPT_RT + local_irq_enable(); + preempt_check_resched(); +#endif /* * Arguments to signal handler: * Index: linux-rt-rebase.q/arch/mips/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/smp.c +++ linux-rt-rebase.q/arch/mips/kernel/smp.c @@ -89,7 +89,22 @@ asmlinkage __cpuinit void start_secondar cpu_idle(); } -DEFINE_SPINLOCK(smp_call_lock); +DEFINE_RAW_SPINLOCK(smp_call_lock); + +/* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them. + */ +void smp_send_reschedule_allbutself(void) +{ + int cpu = smp_processor_id(); + int i; + + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i) && i != cpu) + core_send_ipi(i, SMP_RESCHEDULE_YOURSELF); +} struct call_data_struct *call_data; @@ -331,6 +346,8 @@ int setup_profiling_timer(unsigned int m return 0; } +static DEFINE_RAW_SPINLOCK(tlbstate_lock); + static void flush_tlb_all_ipi(void *info) { local_flush_tlb_all(); @@ -388,6 +405,7 @@ static inline void smp_on_each_tlb(void void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); + spin_lock(&tlbstate_lock); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); @@ -397,6 +415,7 @@ void flush_tlb_mm(struct mm_struct *mm) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } + spin_unlock(&tlbstate_lock); local_flush_tlb_mm(mm); preempt_enable(); @@ -420,6 +439,8 @@ void flush_tlb_range(struct vm_area_stru struct mm_struct *mm = vma->vm_mm; preempt_disable(); + spin_lock(&tlbstate_lock); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd; @@ -433,6 +454,7 @@ void flush_tlb_range(struct vm_area_stru if (smp_processor_id() != i) cpu_context(i, mm) = 0; } + spin_unlock(&tlbstate_lock); local_flush_tlb_range(vma, start, end); preempt_enable(); } @@ -463,6 +485,8 @@ static void flush_tlb_page_ipi(void *inf void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { preempt_disable(); + spin_lock(&tlbstate_lock); + if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { struct flush_tlb_data fd; @@ -475,6 +499,7 @@ void flush_tlb_page(struct vm_area_struc if (smp_processor_id() != i) cpu_context(i, vma->vm_mm) = 0; } + spin_unlock(&tlbstate_lock); local_flush_tlb_page(vma, page); preempt_enable(); } Index: linux-rt-rebase.q/arch/mips/kernel/time.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/time.c +++ linux-rt-rebase.q/arch/mips/kernel/time.c @@ -10,6 +10,11 @@ * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. + * + * This implementation of High Res Timers uses two timers. One is the system + * timer. The second is used for the high res timers. The high res timers + * require the CPU to have count/compare registers. The mips_set_next_event() + * function schedules the next high res timer interrupt. */ #include #include @@ -23,6 +28,7 @@ #include #include #include +#include #include #include @@ -47,7 +53,27 @@ /* * forward reference */ -DEFINE_SPINLOCK(rtc_lock); +DEFINE_RAW_SPINLOCK(rtc_lock); + +/* any missed timer interrupts */ +int missed_timer_count; + +#ifdef CONFIG_HIGH_RES_TIMERS +static void mips_set_next_event(unsigned long evt); +static void mips_set_mode(int mode, void *priv); + +static struct clock_event lapic_clockevent = { + .name = "mips clockevent interface", + .capabilities = CLOCK_CAP_NEXTEVT | CLOCK_CAP_PROFILE | + CLOCK_HAS_IRQHANDLER +#ifdef CONFIG_SMP + | CLOCK_CAP_UPDATE +#endif + , + .shift = 32, + .set_next_event = mips_set_next_event, +}; +#endif /* * By default we provide the null RTC ops @@ -56,6 +82,129 @@ static unsigned long null_rtc_get_time(v { return mktime(2000, 1, 1, 0, 0, 0); } +#ifdef CONFIG_SMP +/* + * We have to synchronize the master CPU with all the slave CPUs + */ +static atomic_t cpus_started; +static atomic_t cpus_ready; +static atomic_t cpus_count; +/* + * Master processor inits + */ +static void sync_cpus_init(int v) +{ + atomic_set(&cpus_count, 0); + mb(); + atomic_set(&cpus_started, v); + mb(); + atomic_set(&cpus_ready, v); + mb(); +} + +/* + * Called by the master processor + */ +static void sync_cpus_master(int v) +{ + atomic_set(&cpus_count, 0); + mb(); + atomic_set(&cpus_started, v); + mb(); + /* Wait here till all other CPUs are now ready */ + while (atomic_read(&cpus_count) != (num_online_cpus() -1) ) + mb(); + atomic_set(&cpus_ready, v); + mb(); +} +/* + * Called by the slave processors + */ +static void sync_cpus_slave(int v) +{ + /* Check if the master has been through this */ + while (atomic_read(&cpus_started) != v) + mb(); + atomic_inc(&cpus_count); + mb(); + while (atomic_read(&cpus_ready) != v) + mb(); +} +/* + * Called by the slave CPUs when done syncing the count register + * with the master processor + */ +static void sync_cpus_slave_exit(int v) +{ + while (atomic_read(&cpus_started) != v) + mb(); + atomic_inc(&cpus_count); + mb(); +} + +#define LOOPS 100 +static u32 c0_count[NR_CPUS]; /* Count register per CPU */ +static u32 c[NR_CPUS][LOOPS + 1]; /* Count register per CPU per loop for syncing */ + +/* + * Slave processors execute this via IPI + */ +static void sync_c0_count_slave(void *info) +{ + int cpus = 1, loop, prev_count = 0, cpu = smp_processor_id(); + unsigned long flags; + u32 diff_count; /* CPU count registers are 32-bit */ + local_irq_save(flags); + + for(loop = 0; loop <= LOOPS; loop++) { + /* Sync with the Master processor */ + sync_cpus_slave(cpus++); + c[cpu][loop] = c0_count[cpu] = read_c0_count(); + mb(); + sync_cpus_slave(cpus++); + diff_count = c0_count[0] - c0_count[cpu]; + diff_count += prev_count; + diff_count += read_c0_count(); + write_c0_count(diff_count); + prev_count = (prev_count >> 1) + + ((int)(c0_count[0] - c0_count[cpu]) >> 1); + } + + /* Slave processor is done syncing count register with Master */ + sync_cpus_slave_exit(cpus++); + printk("SMP: Slave processor %d done syncing count \n", cpu); + local_irq_restore(flags); +} + +/* + * Master kicks off the syncing process + */ +void sync_c0_count_master(void) +{ + int cpus = 0, loop, cpu = smp_processor_id(); + unsigned long flags; + + printk("SMP: Starting to sync the c0 count register ... \n"); + sync_cpus_init(cpus++); + + /* Kick off the slave processors to also start the syncing process */ + smp_call_function(sync_c0_count_slave, NULL, 0, 0); + local_irq_save(flags); + + for (loop = 0; loop <= LOOPS; loop++) { + /* Wait for all the CPUs here */ + sync_cpus_master(cpus++); + c[cpu][loop] = c0_count[cpu] = read_c0_count(); + mb(); + /* Do syncing once more */ + sync_cpus_master(cpus++); + } + sync_cpus_master(cpus++); + local_irq_restore(flags); + + printk("SMP: Syncing process completed accross CPUs ... \n"); +} +#endif /* CONFIG_SMP */ static int null_rtc_set_time(unsigned long sec) { @@ -66,19 +215,30 @@ unsigned long (*rtc_mips_get_time)(void) int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time; int (*rtc_mips_set_mmss)(unsigned long); - /* how many counter cycles in a jiffy */ static unsigned long cycles_per_jiffy __read_mostly; +static unsigned long hrt_cycles_per_jiffy __read_mostly; + + /* expirelo is the count value for next CPU timer interrupt */ static unsigned int expirelo; - /* * Null timer ack for systems not needing one (e.g. i8254). */ static void null_timer_ack(void) { /* nothing */ } +#ifdef CONFIG_HIGH_RES_TIMERS +/* + * Set the next event + */ +static void mips_set_next_event(unsigned long evt) +{ + write_c0_compare(read_c0_count() + evt); +} +#endif + /* * Null high precision timer functions for systems lacking one. */ @@ -95,13 +255,13 @@ static void c0_timer_ack(void) unsigned int count; /* Ack this timer interrupt and set the next one. */ - expirelo += cycles_per_jiffy; + expirelo += hrt_cycles_per_jiffy; write_c0_compare(expirelo); - /* Check to see if we have missed any timer interrupts. */ - while (((count = read_c0_count()) - expirelo) < 0x7fffffff) { - /* missed_timer_count++; */ - expirelo = count + cycles_per_jiffy; + count = read_c0_count(); + if ((count - expirelo) < 0x7fffffff) { + /* missed_timer_count++; */ + expirelo = count + hrt_cycles_per_jiffy; write_c0_compare(expirelo); } } @@ -160,7 +320,7 @@ irqreturn_t timer_interrupt(int irq, voi /* * If we have an externally synchronized Linux clock, then update - * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be + * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be * called as close as possible to 500 ms before the new second starts. */ if (ntp_synced() && @@ -228,6 +388,15 @@ static inline int handle_perf_irq (int r !r2; } +#ifdef CONFIG_HIGH_RES_TIMERS +void event_timer_handler(struct pt_regs *regs) +{ + c0_timer_ack(); + if (lapic_clockevent.event_handler) + lapic_clockevent.event_handler(regs,NULL); +} +#endif + asmlinkage void ll_timer_interrupt(int irq) { int r2 = cpu_has_mips_r2; @@ -235,6 +404,16 @@ asmlinkage void ll_timer_interrupt(int i irq_enter(); kstat_this_cpu.irqs[irq]++; + +#ifdef CONFIG_HIGH_RES_TIMERS + /* + * Run the event handler + */ + if (!r2 || (read_c0_cause() & (1 << 26))) + if (lapic_clockevent.event_handler) + lapic_clockevent.event_handler(regs,NULL); +#endif + if (handle_perf_irq(r2)) goto out; @@ -267,7 +446,7 @@ asmlinkage void ll_local_timer_interrupt * b) (optional) calibrate and set the mips_hpt_frequency * (only needed if you intended to use cpu counter as timer interrupt * source) - * 2) setup xtime based on rtc_mips_get_time(). + * 2) setup xtime based on rtc_get_time(). * 3) calculate a couple of cached variables for later usage * 4) plat_timer_setup() - * a) (optional) over-write any choices made above by time_init(). @@ -358,6 +537,9 @@ static void __init init_mips_clocksource void __init time_init(void) { +#ifdef CONFIG_HIGH_RES_TIMERS + u64 temp; +#endif if (board_time_init) board_time_init(); @@ -401,6 +583,12 @@ void __init time_init(void) if (!mips_hpt_frequency) mips_hpt_frequency = calibrate_hpt(); +#ifdef CONFIG_HIGH_RES_TIMERS + hrt_cycles_per_jiffy = ( (CONFIG_CPU_SPEED * 1000000) + HZ / 2) / HZ; +#else + hrt_cycles_per_jiffy = cycles_per_jiffy; +#endif + /* Report the high precision timer rate for a reference. */ printk("Using %u.%03u MHz high precision timer.\n", ((mips_hpt_frequency + 500) / 1000) / 1000, Index: linux-rt-rebase.q/arch/mips/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/traps.c +++ linux-rt-rebase.q/arch/mips/kernel/traps.c @@ -309,7 +309,7 @@ void show_registers(struct pt_regs *regs printk("\n"); } -static DEFINE_SPINLOCK(die_lock); +static DEFINE_RAW_SPINLOCK(die_lock); void __noreturn die(const char * str, struct pt_regs * regs) { Index: linux-rt-rebase.q/arch/mips/mm/init.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/mm/init.c +++ linux-rt-rebase.q/arch/mips/mm/init.c @@ -59,7 +59,7 @@ #endif /* CONFIG_MIPS_MT_SMTC */ -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +DEFINE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); /* * We have up to 8 empty zeroed pages so we can map one of the right colour Index: linux-rt-rebase.q/arch/mips/sibyte/cfe/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/sibyte/cfe/smp.c +++ linux-rt-rebase.q/arch/mips/sibyte/cfe/smp.c @@ -107,4 +107,8 @@ void prom_smp_finish(void) */ void prom_cpus_done(void) { +#ifdef CONFIG_HIGH_RES_TIMERS + extern void sync_c0_count_master(void); + sync_c0_count_master(); +#endif } Index: linux-rt-rebase.q/arch/mips/sibyte/sb1250/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/sibyte/sb1250/irq.c +++ linux-rt-rebase.q/arch/mips/sibyte/sb1250/irq.c @@ -81,7 +81,7 @@ static struct irq_chip sb1250_irq_type = /* Store the CPU id (not the logical number) */ int sb1250_irq_owner[SB1250_NR_IRQS]; -DEFINE_SPINLOCK(sb1250_imr_lock); +DEFINE_RAW_SPINLOCK(sb1250_imr_lock); void sb1250_mask_irq(int cpu, int irq) { @@ -352,6 +352,10 @@ void __init arch_init_irq(void) #ifdef CONFIG_KGDB imask |= STATUSF_IP6; #endif + +#ifdef CONFIG_HIGH_RES_TIMERS + imask |= STATUSF_IP7; +#endif /* Enable necessary IPs, disable the rest */ change_c0_status(ST0_IM, imask); @@ -429,6 +433,10 @@ asmlinkage void plat_irq_dispatch(void) else #endif +#ifdef CONFIG_HIGH_RES_TIMERS + if (pending & CAUSEF_IP7) + event_timer_handler(regs); +#endif if (pending & CAUSEF_IP4) sb1250_timer_interrupt(); Index: linux-rt-rebase.q/arch/mips/sibyte/sb1250/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/sibyte/sb1250/smp.c +++ linux-rt-rebase.q/arch/mips/sibyte/sb1250/smp.c @@ -59,7 +59,7 @@ void sb1250_smp_finish(void) { extern void sb1250_time_init(void); sb1250_time_init(); - local_irq_enable(); + raw_local_irq_enable(); } /* Index: linux-rt-rebase.q/arch/mips/sibyte/swarm/setup.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/sibyte/swarm/setup.c +++ linux-rt-rebase.q/arch/mips/sibyte/swarm/setup.c @@ -131,6 +131,12 @@ void __init plat_mem_setup(void) rtc_mips_set_time = m41t81_set_time; } +#ifdef CONFIG_HIGH_RES_TIMERS + /* + * set the mips_hpt_frequency here + */ + mips_hpt_frequency = CONFIG_CPU_SPEED * 1000000; +#endif printk("This kernel optimized for " #ifdef CONFIG_SIMULATION "simulation" Index: linux-rt-rebase.q/include/asm-mips/asmmacro.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/asmmacro.h +++ linux-rt-rebase.q/include/asm-mips/asmmacro.h @@ -21,7 +21,7 @@ #endif #ifdef CONFIG_MIPS_MT_SMTC - .macro local_irq_enable reg=t0 + .macro raw_local_irq_enable reg=t0 mfc0 \reg, CP0_TCSTATUS ori \reg, \reg, TCSTATUS_IXMT xori \reg, \reg, TCSTATUS_IXMT @@ -29,21 +29,21 @@ _ehb .endm - .macro local_irq_disable reg=t0 + .macro raw_local_irq_disable reg=t0 mfc0 \reg, CP0_TCSTATUS ori \reg, \reg, TCSTATUS_IXMT mtc0 \reg, CP0_TCSTATUS _ehb .endm #else - .macro local_irq_enable reg=t0 + .macro raw_local_irq_enable reg=t0 mfc0 \reg, CP0_STATUS ori \reg, \reg, 1 mtc0 \reg, CP0_STATUS irq_enable_hazard .endm - .macro local_irq_disable reg=t0 + .macro raw_local_irq_disable reg=t0 mfc0 \reg, CP0_STATUS ori \reg, \reg, 1 xori \reg, \reg, 1 Index: linux-rt-rebase.q/include/asm-mips/atomic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/atomic.h +++ linux-rt-rebase.q/include/asm-mips/atomic.h @@ -573,7 +573,6 @@ static __inline__ long atomic64_add_retu raw_local_irq_restore(flags); } #endif -#endif smp_llsc_mb(); Index: linux-rt-rebase.q/include/asm-mips/bitops.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/bitops.h +++ linux-rt-rebase.q/include/asm-mips/bitops.h @@ -500,9 +500,6 @@ static inline unsigned long __ffs(unsign } /* - * fls - find last bit set. - * @word: The word to search - * * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ @@ -520,6 +517,8 @@ static inline int fls64(__u64 word) return 64 - word; } +#define __bi_local_irq_save(x) raw_local_irq_save(x) +#define __bi_local_irq_restore(x) raw_local_irq_restore(x) #else #include #endif Index: linux-rt-rebase.q/include/asm-mips/hw_irq.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/hw_irq.h +++ linux-rt-rebase.q/include/asm-mips/hw_irq.h @@ -10,6 +10,7 @@ #include #include +#include extern void disable_8259A_irq(unsigned int irq); extern void enable_8259A_irq(unsigned int irq); Index: linux-rt-rebase.q/include/asm-mips/i8259.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/i8259.h +++ linux-rt-rebase.q/include/asm-mips/i8259.h @@ -35,7 +35,7 @@ #define SLAVE_ICW4_DEFAULT 0x01 #define PIC_ICW4_AEOI 2 -extern spinlock_t i8259A_lock; +extern raw_spinlock_t i8259A_lock; extern void init_8259A(int auto_eoi); extern void enable_8259A_irq(unsigned int irq); Index: linux-rt-rebase.q/include/asm-mips/io.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/io.h +++ linux-rt-rebase.q/include/asm-mips/io.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include Index: linux-rt-rebase.q/include/asm-mips/linkage.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/linkage.h +++ linux-rt-rebase.q/include/asm-mips/linkage.h @@ -3,6 +3,11 @@ #ifdef __ASSEMBLY__ #include + +/* FASTCALL stuff */ +#define FASTCALL(x) x +#define fastcall + #endif #endif Index: linux-rt-rebase.q/include/asm-mips/m48t35.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/m48t35.h +++ linux-rt-rebase.q/include/asm-mips/m48t35.h @@ -6,7 +6,7 @@ #include -extern spinlock_t rtc_lock; +extern raw_spinlock_t rtc_lock; struct m48t35_rtc { volatile u8 pad[0x7ff8]; /* starts at 0x7ff8 */ Index: linux-rt-rebase.q/include/asm-mips/rwsem.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/asm-mips/rwsem.h @@ -0,0 +1,176 @@ +/* + * include/asm-mips/rwsem.h: R/W semaphores for MIPS using the stuff + * in lib/rwsem.c. Adapted largely from include/asm-ppc/rwsem.h + * by john.cooper@timesys.com + */ + +#ifndef _MIPS_RWSEM_H +#define _MIPS_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ +#include +#include +#include +#include + +/* + * the semaphore definition + */ +struct compat_rw_semaphore { + /* XXX this should be able to be an atomic_t -- paulus */ + signed long count; +#define RWSEM_UNLOCKED_VALUE 0x00000000 +#define RWSEM_ACTIVE_BIAS 0x00000001 +#define RWSEM_ACTIVE_MASK 0x0000ffff +#define RWSEM_WAITING_BIAS (-0x00010000) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + raw_spinlock_t wait_lock; + struct list_head wait_list; +#if RWSEM_DEBUG + int debug; +#endif +}; + +/* + * initialisation + */ +#if RWSEM_DEBUG +#define __RWSEM_DEBUG_INIT , 0 +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __COMPAT_RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEBUG_INIT } + +#define COMPAT_DECLARE_RWSEM(name) \ + struct compat_rw_semaphore name = __COMPAT_RWSEM_INITIALIZER(name) + +extern struct compat_rw_semaphore *rwsem_down_read_failed(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_down_write_failed(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_wake(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_downgrade_wake(struct compat_rw_semaphore *sem); + +static inline void compat_init_rwsem(struct compat_rw_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +#if RWSEM_DEBUG + sem->debug = 0; +#endif +} + +/* + * lock for reading + */ +static inline void __down_read(struct compat_rw_semaphore *sem) +{ + if (atomic_inc_return((atomic_t *)(&sem->count)) > 0) + smp_wmb(); + else + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct compat_rw_semaphore *sem) +{ + int tmp; + + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + smp_wmb(); + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write(struct compat_rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)); + if (tmp == RWSEM_ACTIVE_WRITE_BIAS) + smp_wmb(); + else + rwsem_down_write_failed(sem); +} + +static inline int __down_write_trylock(struct compat_rw_semaphore *sem) +{ + int tmp; + + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + smp_wmb(); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct compat_rw_semaphore *sem) +{ + int tmp; + + smp_wmb(); + tmp = atomic_dec_return((atomic_t *)(&sem->count)); + if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct compat_rw_semaphore *sem) +{ + smp_wmb(); + if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)) < 0) + rwsem_wake(sem); +} + +/* + * implement atomic add functionality + */ +static inline void rwsem_atomic_add(int delta, struct compat_rw_semaphore *sem) +{ + atomic_add(delta, (atomic_t *)(&sem->count)); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct compat_rw_semaphore *sem) +{ + int tmp; + + smp_wmb(); + tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +/* + * implement exchange and add functionality + */ +static inline int rwsem_atomic_update(int delta, struct compat_rw_semaphore *sem) +{ + smp_mb(); + return atomic_add_return(delta, (atomic_t *)(&sem->count)); +} + +#endif /* __KERNEL__ */ +#endif /* _MIPS_RWSEM_H */ Index: linux-rt-rebase.q/include/asm-mips/semaphore.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/semaphore.h +++ linux-rt-rebase.q/include/asm-mips/semaphore.h @@ -47,39 +47,42 @@ struct compat_semaphore { wait_queue_head_t wait; }; -#define __SEMAPHORE_INITIALIZER(name, n) \ +#define __COMPAT_SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } -#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) +#define __COMPAT_MUTEX_INITIALIZER(name) \ + __COMPAT_SEMAPHORE_INITIALIZER(name, 1) -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) +#define __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, count) \ + struct compat_semaphore name = __COMPAT_SEMAPHORE_INITIALIZER(name,count) -static inline void sema_init (struct semaphore *sem, int val) +#define COMPAT_DECLARE_MUTEX(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, 1) +#define COMPAT_DECLARE_MUTEX_LOCKED(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, 0) + +static inline void compat_sema_init (struct compat_semaphore *sem, int val) { atomic_set(&sem->count, val); init_waitqueue_head(&sem->wait); } -static inline void init_MUTEX (struct semaphore *sem) +static inline void compat_init_MUTEX (struct compat_semaphore *sem) { - sema_init(sem, 1); + compat_sema_init(sem, 1); } -static inline void init_MUTEX_LOCKED (struct semaphore *sem) +static inline void compat_init_MUTEX_LOCKED (struct compat_semaphore *sem) { - sema_init(sem, 0); + compat_sema_init(sem, 0); } -extern void __down(struct semaphore * sem); -extern int __down_interruptible(struct semaphore * sem); -extern void __up(struct semaphore * sem); +extern void __compat_down(struct compat_semaphore * sem); +extern int __compat_down_interruptible(struct compat_semaphore * sem); +extern void __compat_up(struct compat_semaphore * sem); -static inline void down(struct semaphore * sem) +static inline void compat_down(struct compat_semaphore * sem) { might_sleep(); @@ -112,6 +115,8 @@ static inline void compat_up(struct comp __compat_up(sem); } +extern int compat_sem_is_locked(struct compat_semaphore *sem); + #define compat_sema_count(sem) atomic_read(&(sem)->count) #include Index: linux-rt-rebase.q/include/asm-mips/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/spinlock.h +++ linux-rt-rebase.q/include/asm-mips/spinlock.h @@ -28,7 +28,7 @@ * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(__raw_spinlock_t *lock) { unsigned int tmp; @@ -70,7 +70,7 @@ static inline void __raw_spin_lock(raw_s smp_llsc_mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(__raw_spinlock_t *lock) { smp_mb(); @@ -83,7 +83,7 @@ static inline void __raw_spin_unlock(raw : "memory"); } -static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) +static inline unsigned int __raw_spin_trylock(__raw_spinlock_t *lock) { unsigned int temp, res; @@ -144,7 +144,7 @@ static inline unsigned int __raw_spin_tr */ #define __raw_write_can_lock(rw) (!(rw)->lock) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(__raw_rwlock_t *rw) { unsigned int tmp; @@ -189,7 +189,7 @@ static inline void __raw_read_lock(raw_r /* Note the use of sub, not subu which will make the kernel die with an overflow exception if we ever try to unlock an rwlock that is already unlocked or is being held by a writer. */ -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(__raw_rwlock_t *rw) { unsigned int tmp; @@ -223,7 +223,7 @@ static inline void __raw_read_unlock(raw } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(__raw_rwlock_t *rw) { unsigned int tmp; @@ -265,7 +265,7 @@ static inline void __raw_write_lock(raw_ smp_llsc_mb(); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(__raw_rwlock_t *rw) { smp_mb(); @@ -277,7 +277,7 @@ static inline void __raw_write_unlock(ra : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(__raw_rwlock_t *rw) { unsigned int tmp; int ret; @@ -321,7 +321,7 @@ static inline int __raw_read_trylock(raw return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(__raw_rwlock_t *rw) { unsigned int tmp; int ret; Index: linux-rt-rebase.q/include/asm-mips/spinlock_types.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/spinlock_types.h +++ linux-rt-rebase.q/include/asm-mips/spinlock_types.h @@ -7,13 +7,13 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} __raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { 0 } Index: linux-rt-rebase.q/include/asm-mips/thread_info.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/thread_info.h +++ linux-rt-rebase.q/include/asm-mips/thread_info.h @@ -112,6 +112,7 @@ register struct thread_info *__current_t #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ #define TIF_SECCOMP 4 /* secure computing */ +#define TIF_NEED_RESCHED_DELAYED 6 /* reschedule on return to userspace */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -129,6 +130,7 @@ register struct thread_info *__current_t #define _TIF_NEED_RESCHED (1< #include -extern spinlock_t rtc_lock; +extern raw_spinlock_t rtc_lock; /* * RTC ops. By default, they point to no-RTC functions. Index: linux-rt-rebase.q/include/asm-mips/timeofday.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/asm-mips/timeofday.h @@ -0,0 +1,5 @@ +#ifndef _ASM_MIPS_TIMEOFDAY_H +#define _ASM_MIPS_TIMEOFDAY_H +#include +#endif + Index: linux-rt-rebase.q/include/asm-mips/uaccess.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/uaccess.h +++ linux-rt-rebase.q/include/asm-mips/uaccess.h @@ -427,7 +427,6 @@ extern size_t __copy_user(void *__to, co const void *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ @@ -483,7 +482,6 @@ extern size_t __copy_user_inatomic(void const void *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ @@ -562,7 +560,6 @@ extern size_t __copy_user_inatomic(void const void __user *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ @@ -593,7 +590,6 @@ extern size_t __copy_user_inatomic(void const void __user *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ @@ -611,7 +607,6 @@ extern size_t __copy_user_inatomic(void const void __user *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ @@ -638,7 +633,6 @@ __clear_user(void __user *addr, __kernel { __kernel_size_t res; - might_sleep(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, $0\n\t" @@ -687,7 +681,6 @@ __strncpy_from_user(char *__to, const ch { long res; - might_sleep(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" @@ -724,7 +717,6 @@ strncpy_from_user(char *__to, const char { long res; - might_sleep(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" @@ -743,7 +735,6 @@ static inline long __strlen_user(const c { long res; - might_sleep(); __asm__ __volatile__( "move\t$4, %1\n\t" __MODULE_JAL(__strlen_user_nocheck_asm) @@ -773,7 +764,6 @@ static inline long strlen_user(const cha { long res; - might_sleep(); __asm__ __volatile__( "move\t$4, %1\n\t" __MODULE_JAL(__strlen_user_asm) @@ -790,7 +780,6 @@ static inline long __strnlen_user(const { long res; - might_sleep(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" @@ -821,7 +810,6 @@ static inline long strnlen_user(const ch { long res; - might_sleep(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" patches/neptune-no-at-keyboard.patch0000664000077200007720000000324210655544571017047 0ustar mingomingoneptune needs this to boot ... --- drivers/input/keyboard/atkbd.c | 14 ++++++++++++++ drivers/input/mouse/psmouse-base.c | 15 +++++++++++++++ 2 files changed, 29 insertions(+) Index: linux/drivers/input/keyboard/atkbd.c =================================================================== --- linux.orig/drivers/input/keyboard/atkbd.c +++ linux/drivers/input/keyboard/atkbd.c @@ -1396,9 +1396,23 @@ static ssize_t atkbd_show_err_count(stru return sprintf(buf, "%lu\n", atkbd->err_count); } +static int __read_mostly noatkbd; + +static int __init noatkbd_setup(char *str) +{ + noatkbd = 1; + printk(KERN_INFO "debug: not setting up AT keyboard.\n"); + + return 1; +} + +__setup("noatkbd", noatkbd_setup); static int __init atkbd_init(void) { + if (noatkbd) + return 0; + return serio_register_driver(&atkbd_drv); } Index: linux/drivers/input/mouse/psmouse-base.c =================================================================== --- linux.orig/drivers/input/mouse/psmouse-base.c +++ linux/drivers/input/mouse/psmouse-base.c @@ -1594,10 +1594,25 @@ static int psmouse_get_maxproto(char *bu return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); } +static int __read_mostly nopsmouse; + +static int __init nopsmouse_setup(char *str) +{ + nopsmouse = 1; + printk(KERN_INFO "debug: not setting up psmouse.\n"); + + return 1; +} + +__setup("nopsmouse", nopsmouse_setup); + static int __init psmouse_init(void) { int err; + if (nopsmouse) + return 0; + kpsmoused_wq = create_singlethread_workqueue("kpsmoused"); if (!kpsmoused_wq) { printk(KERN_ERR "psmouse: failed to create kpsmoused workqueue\n"); patches/ioapic-fix-too-fast-clocks.patch0000664000077200007720000000272010655544571017615 0ustar mingomingoFrom: Akira Tsukamoto This one line patch adds upper bound testing inside timer_irq_works() when evaluating whether irq timer works or not on boot up. It fix the machines having problem with clock running too fast. What this patch do is, if timer interrupts running too fast through IO-APIC IRQ then false back to i8259A IRQ. I really appreciate for the feedback from ATI Xpress 200 chipset user, It should eliminate the needs of adding no_timer_check on kernel options. I have NEC laptop using ATI Xpress 200 chipset with Pentium M 1.8GHz and its clock keep going forward when kernel compiled with local APIC support. Many machines based on RS200 chipset seem to have the same problem, including Acer Ferrari 400X AMD notebook or Compaq R4000. Also I would like to have comments on upper bound limit, 16 ticks, which I chose in this patch. My laptop always reports around 20, which is double from normal. arch/i386/kernel/io_apic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/arch/i386/kernel/io_apic.c =================================================================== --- linux.orig/arch/i386/kernel/io_apic.c +++ linux/arch/i386/kernel/io_apic.c @@ -1900,7 +1900,7 @@ static int __init timer_irq_works(void) * might have cached one ExtINT interrupt. Finally, at * least one tick may be lost due to delays. */ - if (jiffies - t1 > 4) + if (jiffies - t1 > 4 && jiffies - t1 < 16) return 1; return 0; patches/softlockup-add-irq-regs-h.patch0000664000077200007720000000272410655544576017463 0ustar mingomingoSubject: core: make asm/irq_regs.h available on every platform From: Ingo Molnar the softlockup detector would like to use get_irq_regs(), so generalize the availability on every Linux architecture. (it is fine for an architecture to always return NULL to get_irq_regs(), which it does by default.) Signed-off-by: Ingo Molnar --- include/asm-arm26/irq_regs.h | 1 + include/asm-cris/irq_regs.h | 1 + include/asm-ppc/irq_regs.h | 1 + include/asm-v850/irq_regs.h | 1 + 4 files changed, 4 insertions(+) Index: linux-rt-rebase.q/include/asm-arm26/irq_regs.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/asm-arm26/irq_regs.h @@ -0,0 +1 @@ +#include Index: linux-rt-rebase.q/include/asm-cris/irq_regs.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/asm-cris/irq_regs.h @@ -0,0 +1 @@ +#include Index: linux-rt-rebase.q/include/asm-ppc/irq_regs.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/asm-ppc/irq_regs.h @@ -0,0 +1 @@ +#include Index: linux-rt-rebase.q/include/asm-v850/irq_regs.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/asm-v850/irq_regs.h @@ -0,0 +1 @@ +#include patches/preempt-realtime-irqs.patch0000664000077200007720000001202110655544575017006 0ustar mingomingo--- include/linux/irq.h | 10 ++++------ kernel/irq/handle.c | 13 +++++++++++-- kernel/irq/manage.c | 18 ++++++++++++++---- kernel/irq/spurious.c | 3 +-- 4 files changed, 30 insertions(+), 14 deletions(-) Index: linux-rt-rebase.q/include/linux/irq.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/irq.h +++ linux-rt-rebase.q/include/linux/irq.h @@ -146,7 +146,6 @@ struct irq_chip { * @last_unhandled: aging timer for unhandled count * @thread: Thread pointer for threaded preemptible irq handling * @wait_for_handler: Waitqueue to wait for a running preemptible handler - * @cycles: Timestamp for stats and debugging * @lock: locking for SMP * @affinity: IRQ affinity on SMP * @cpu: cpu index useful for balancing @@ -169,10 +168,10 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned int irqs_unhandled; unsigned long last_unhandled; /* Aging timer for unhandled count */ - struct task_struct *thread; - wait_queue_head_t wait_for_handler; - cycles_t timestamp; - spinlock_t lock; + struct task_struct *thread; + wait_queue_head_t wait_for_handler; + cycles_t timestamp; + raw_spinlock_t lock; #ifdef CONFIG_SMP cpumask_t affinity; unsigned int cpu; @@ -398,7 +397,6 @@ extern int set_irq_msi(unsigned int irq, /* Early initialization of irqs */ extern void early_init_hardirqs(void); -extern cycles_t irq_timestamp(unsigned int irq); #if defined(CONFIG_PREEMPT_HARDIRQS) extern void init_hardirqs(void); Index: linux-rt-rebase.q/kernel/irq/handle.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/handle.c +++ linux-rt-rebase.q/kernel/irq/handle.c @@ -54,12 +54,13 @@ struct irq_desc irq_desc[NR_IRQS] __cach .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = RAW_SPIN_LOCK_UNLOCKED(irq_desc), #ifdef CONFIG_SMP .affinity = CPU_MASK_ALL #endif } }; +EXPORT_SYMBOL_GPL(irq_desc); /* * What should we do if we get a hw irq event on an illegal vector? @@ -151,6 +152,7 @@ irqreturn_t handle_IRQ_event(unsigned in ret = action->handler(irq, action->dev_id); if (preempt_count() != preempt_count) { + stop_trace(); print_symbol("BUG: unbalanced irq-handler preempt count in %s!\n", (unsigned long) action->handler); printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count()); dump_stack(); @@ -225,7 +227,7 @@ int redirect_hardirq(struct irq_desc *de * This is the original x86 implementation which is used for every * interrupt type. */ -fastcall unsigned int __do_IRQ(unsigned int irq) +fastcall notrace unsigned int __do_IRQ(unsigned int irq) { struct irq_desc *desc = irq_desc + irq; struct irqaction *action; @@ -246,6 +248,13 @@ fastcall unsigned int __do_IRQ(unsigned desc->chip->end(irq); return 1; } + /* + * If the task is currently running in user mode, don't + * detect soft lockups. If CONFIG_DETECT_SOFTLOCKUP is not + * configured, this should be optimized out. + */ + if (user_mode(get_irq_regs())) + touch_softlockup_watchdog(); spin_lock(&desc->lock); if (desc->chip->ack) Index: linux-rt-rebase.q/kernel/irq/manage.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/manage.c +++ linux-rt-rebase.q/kernel/irq/manage.c @@ -578,9 +578,9 @@ int request_irq(unsigned int irq, irq_ha if (irqflags & IRQF_DISABLED) { unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); handler(irq, dev_id); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else handler(irq, dev_id); } @@ -600,6 +600,11 @@ int hardirq_preemption = 1; EXPORT_SYMBOL(hardirq_preemption); +/* + * Real-Time Preemption depends on hardirq threading: + */ +#ifndef CONFIG_PREEMPT_RT + static int __init hardirq_preempt_setup (char *str) { if (!strncmp(str, "off", 3)) @@ -614,6 +619,7 @@ static int __init hardirq_preempt_setup __setup("hardirq-preempt=", hardirq_preempt_setup); +#endif /* * threaded simple handler @@ -773,12 +779,16 @@ static int do_irqd(void * __desc) sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m); while (!kthread_should_stop()) { - local_irq_disable(); + local_irq_disable_nort(); set_current_state(TASK_INTERRUPTIBLE); +#ifndef CONFIG_PREEMPT_RT irq_enter(); +#endif do_hardirq(desc); +#ifndef CONFIG_PREEMPT_RT irq_exit(); - local_irq_enable(); +#endif + local_irq_enable_nort(); cond_resched(); #ifdef CONFIG_SMP /* Index: linux-rt-rebase.q/kernel/irq/spurious.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/spurious.c +++ linux-rt-rebase.q/kernel/irq/spurious.c @@ -59,9 +59,8 @@ static int misrouted_irq(int irq) } action = action->next; } - local_irq_disable(); /* Now clean up the flags */ - spin_lock(&desc->lock); + spin_lock_irq(&desc->lock); action = desc->action; /* patches/x86_64-tsc-sync-irqflags-fix.patch0000664000077200007720000000136610655544571017655 0ustar mingomingo--- arch/x86_64/kernel/tsc_sync.c | 4 ++++ 1 file changed, 4 insertions(+) Index: linux/arch/x86_64/kernel/tsc_sync.c =================================================================== --- linux.orig/arch/x86_64/kernel/tsc_sync.c +++ linux/arch/x86_64/kernel/tsc_sync.c @@ -97,6 +97,7 @@ static __cpuinit void check_tsc_warp(voi */ void __cpuinit check_tsc_sync_source(int cpu) { + unsigned long flags; int cpus = 2; /* @@ -117,8 +118,11 @@ void __cpuinit check_tsc_sync_source(int /* * Wait for the target to arrive: */ + local_save_flags(flags); + local_irq_enable(); while (atomic_read(&start_count) != cpus-1) cpu_relax(); + local_irq_restore(flags); /* * Trigger the target to continue into the measurement too: */ patches/preempt-irqs-direct-debug-keyboard.patch0000664000077200007720000000477310655544573021355 0ustar mingomingo--- include/linux/sched.h | 6 ++++++ init/main.c | 2 ++ kernel/irq/handle.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+) Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -267,6 +267,12 @@ extern void trap_init(void); extern void update_process_times(int user); extern void scheduler_tick(void); +#ifdef CONFIG_GENERIC_HARDIRQS +extern int debug_direct_keyboard; +#else +# define debug_direct_keyboard 0 +#endif + #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); extern void spawn_softlockup_task(void); Index: linux-rt-rebase.q/init/main.c =================================================================== --- linux-rt-rebase.q.orig/init/main.c +++ linux-rt-rebase.q/init/main.c @@ -867,5 +867,7 @@ static int __init kernel_init(void * unu * initmem segments and start the user-mode stuff.. */ init_post(); + WARN_ON(debug_direct_keyboard); + return 0; } Index: linux-rt-rebase.q/kernel/irq/handle.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/handle.c +++ linux-rt-rebase.q/kernel/irq/handle.c @@ -132,6 +132,11 @@ irqreturn_t handle_IRQ_event(unsigned in irqreturn_t ret, retval = IRQ_NONE; unsigned int status = 0; +#ifdef __i386__ + if (debug_direct_keyboard && irq == 1) + lockdep_off(); +#endif + handle_dynamic_tick(action); /* @@ -163,9 +168,30 @@ irqreturn_t handle_IRQ_event(unsigned in } local_irq_disable(); +#ifdef __i386__ + if (debug_direct_keyboard && irq == 1) + lockdep_on(); +#endif return retval; } +/* + * Hack - used for development only. + */ +int __read_mostly debug_direct_keyboard = 0; + +int __init debug_direct_keyboard_setup(char *str) +{ + debug_direct_keyboard = 1; + printk(KERN_INFO "Switching IRQ 1 (keyboard) to to direct!\n"); +#ifdef CONFIG_PREEMPT_RT + printk(KERN_INFO "WARNING: kernel may easily crash this way!\n"); +#endif + return 1; +} + +__setup("debug_direct_keyboard", debug_direct_keyboard_setup); + int redirect_hardirq(struct irq_desc *desc) { /* @@ -175,6 +201,11 @@ int redirect_hardirq(struct irq_desc *de !desc->thread) return 0; +#ifdef __i386__ + if (debug_direct_keyboard && (desc - irq_desc == 1)) + return 0; +#endif + BUG_ON(!irqs_disabled()); if (desc->thread && desc->thread->state != TASK_RUNNING) wake_up_process(desc->thread); patches/ns2cyc-result-fix.patch0000664000077200007720000000530510655544571016062 0ustar mingomingoFrom sshtylyov@ru.mvista.com Wed May 16 18:11:13 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from imap.sh.mvista.com (unknown [63.81.120.155]) by mail.tglx.de (Postfix) with ESMTP id B7F0D65C065 for ; Wed, 16 May 2007 18:11:13 +0200 (CEST) Received: from wasted.dev.rtsoft.ru (unknown [10.150.0.9]) by imap.sh.mvista.com (Postfix) with ESMTP id 11FC13EC9 for ; Wed, 16 May 2007 08:38:17 -0700 (PDT) From: Sergei Shtylyov Organization: MontaVista Software Inc. To: tglx@linutronix.de Subject: [PATCH 2.6.21-rt1] ns2cyc() result fix Date: Wed, 16 May 2007 18:39:50 +0300 User-Agent: KMail/1.5 MIME-Version: 1.0 Content-Disposition: inline Content-Type: text/plain; charset="iso-8859-1" Message-Id: <200705161939.50242.sshtylyov@ru.mvista.com> X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Fix the dubious use of cycles_t where cycle_t was appropriate. On the machines with 32-bit cycles_t (like ARM/PPC) it caused these warnings: In file included from arch/powerpc/kernel/time.c:1045: include/linux/clocksource.h: In function `ns2cyc': include/linux/clocksource.h:213: warning: comparison of distinct pointer types lacks a cast include/linux/clocksource.h:213: warning: right shift count >= width of type include/linux/clocksource.h:213: warning: passing argument 1 of `__div64_32' from incompatible pointer type This function and therefore usecs_to_cycles() was unlikely to return a correct result on such machines because of the shift result truncation. Signed-off-by: Sergei Shtylyov --- I'm also uncertain about 'preempt_max_latency' and 'preempt_thresh' variables being declared as 'unsigned long' -- however, looks like those are unlikely to overflow... yet it's unclear why there's casts to 'cycle_t' (which is always 64-bit) when initializing/comparing them... --- include/linux/clocksource.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux/include/linux/clocksource.h =================================================================== --- linux.orig/include/linux/clocksource.h +++ linux/include/linux/clocksource.h @@ -189,9 +189,9 @@ static inline s64 cyc2ns(struct clocksou * @cs: Pointer to clocksource * @nsecs: Nanoseconds */ -static inline cycles_t ns2cyc(struct clocksource *cs, u64 nsecs) +static inline cycle_t ns2cyc(struct clocksource *cs, u64 nsecs) { - cycles_t ret = nsecs << cs->shift; + cycle_t ret = nsecs << cs->shift; do_div(ret, cs->mult + 1); patches/2.6.21-rc6-lockless1-prep-find_lock_page.patch0000664000077200007720000000261510655544576021571 0ustar mingomingoFrom: Nick Piggin Subject: [patch 1/9] mm: prep find_lock_page find_lock_page does not need to recheck ->index because if the page is in the right mapping then the index must be the same. Also, tree_lock does not need to be retaken after the page is locked in order to test ->mapped has not changed. Signed-off-by: Nick Piggin --- mm/filemap.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/mm/filemap.c =================================================================== --- linux-rt-rebase.q.orig/mm/filemap.c +++ linux-rt-rebase.q/mm/filemap.c @@ -621,26 +621,26 @@ struct page *find_lock_page(struct addre { struct page *page; - read_lock_irq(&mapping->tree_lock); repeat: + read_lock_irq(&mapping->tree_lock); page = radix_tree_lookup(&mapping->page_tree, offset); if (page) { page_cache_get(page); if (TestSetPageLocked(page)) { read_unlock_irq(&mapping->tree_lock); __lock_page(page); - read_lock_irq(&mapping->tree_lock); /* Has the page been truncated while we slept? */ - if (unlikely(page->mapping != mapping || - page->index != offset)) { + if (unlikely(page->mapping != mapping)) { unlock_page(page); page_cache_release(page); goto repeat; } + goto out; } } read_unlock_irq(&mapping->tree_lock); +out: return page; } EXPORT_SYMBOL(find_lock_page); patches/futex-performance-hack.patch0000664000077200007720000000330410655544576017121 0ustar mingomingo--- kernel/futex.c | 6 ++++-- kernel/sysctl.c | 9 +++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/kernel/futex.c =================================================================== --- linux-rt-rebase.q.orig/kernel/futex.c +++ linux-rt-rebase.q/kernel/futex.c @@ -120,12 +120,14 @@ static struct futex_hash_bucket futex_qu /* Futex-fs vfsmount entry: */ static struct vfsmount *futex_mnt; +int futex_performance_hack; + /* * Take mm->mmap_sem, when futex is shared */ static inline void futex_lock_mm(struct rw_semaphore *fshared) { - if (fshared) + if (fshared && !futex_performance_hack) down_read(fshared); } @@ -134,7 +136,7 @@ static inline void futex_lock_mm(struct */ static inline void futex_unlock_mm(struct rw_semaphore *fshared) { - if (fshared) + if (fshared && !futex_performance_hack) up_read(fshared); } Index: linux-rt-rebase.q/kernel/sysctl.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sysctl.c +++ linux-rt-rebase.q/kernel/sysctl.c @@ -66,6 +66,7 @@ extern int print_fatal_signals; extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern int sysctl_panic_on_oom; +extern int futex_performance_hack; extern int max_threads; extern int core_uses_pid; extern int suid_dumpable; @@ -324,6 +325,14 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, { + .ctl_name = CTL_UNNUMBERED, + .procname = "futex_performance_hack", + .data = &futex_performance_hack, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { .ctl_name = KERN_PANIC, .procname = "prof_pid", .data = &prof_pid, patches/percpu_list.patch0000664000077200007720000000537710655544576015130 0ustar mingomingoSubject: percpu_list give the lock_list a percpu_head to in order to decrease list head contention due to list adding. Signed-off-by: Peter Zijlstra --- include/linux/percpu_list.h | 119 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) Index: linux-rt-rebase.q/include/linux/percpu_list.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/linux/percpu_list.h @@ -0,0 +1,119 @@ +#ifndef _LINUX_PERCPU_LIST_H +#define _LINUX_PERCPU_LIST_H + +#include +#include + +#ifdef CONFIG_SMP + +struct percpu_list_element { + spinlock_t lock; + unsigned long nr; + struct lock_list_head list; +}; + +struct percpu_list { + struct lock_list_head list; + struct percpu_list_element *percpu_list; +}; + +static inline +void percpu_list_init(struct percpu_list *pcl) +{ + int cpu; + + INIT_LOCK_LIST_HEAD(&pcl->list); + pcl->percpu_list = alloc_percpu(struct percpu_list_element); + + for_each_possible_cpu(cpu) { + struct percpu_list_element *pcle; + + pcle = per_cpu_ptr(pcl->percpu_list, cpu); + spin_lock_init(&pcle->lock); + pcle->nr = 0; + INIT_LOCK_LIST_HEAD(&pcle->list); + } +} + +static inline +void percpu_list_destroy(struct percpu_list *pcl) +{ + free_percpu(pcl->percpu_list); +} + +static inline +void percpu_list_fold_cpu(struct percpu_list *pcl, int cpu) +{ + struct percpu_list_element *pcle = per_cpu_ptr(pcl->percpu_list, cpu); + + spin_lock(&pcle->lock); + if (pcle->nr) { + pcle->nr = 0; + lock_list_splice_init(&pcle->list, &pcl->list); + } + spin_unlock(&pcle->lock); +} + +static inline +void percpu_list_add(struct percpu_list *pcl, struct lock_list_head *elm) +{ + struct percpu_list_element *pcle; + int cpu = raw_smp_processor_id(); + unsigned long nr; + + pcle = per_cpu_ptr(pcl->percpu_list, cpu); + spin_lock(&pcle->lock); + nr = ++pcle->nr; + lock_list_add(elm, &pcle->list); + spin_unlock(&pcle->lock); + + if (nr >= 16) + percpu_list_fold_cpu(pcl, cpu); +} + +static inline +void percpu_list_fold(struct percpu_list *pcl) +{ + int cpu; + + for_each_possible_cpu(cpu) + percpu_list_fold_cpu(pcl, cpu); +} + +#else /* CONFIG_SMP */ + +struct percpu_list { + struct lock_list_head list; +} + +static inline +void percpu_list_init(struct percpu_list *pcl) +{ + INIT_LOCK_LIST_HEAD(&pcl->list); +} + +static inline +void percpu_list_destroy(struct percpu_list *pcl) +{ +} + +static inline +void percpu_list_add(struct percpu_list *pcl, struct lock_list_head *elm) +{ + lock_list_add(elm, &pcl->list); +} + +static inline +void percpu_list_fold(struct percpu_list *pcl) +{ +} + +#endif + +static inline +struct lock_list_head *percpu_list_head(struct percpu_list *pcl) +{ + return &pcl->list; +} + +#endif /* _LINUX_PERCPU_LIST_H */ patches/spinlock-trylock-cleanup-sungem.patch0000664000077200007720000000113310655544571021004 0ustar mingomingo--- drivers/net/sungem.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) Index: linux/drivers/net/sungem.c =================================================================== --- linux.orig/drivers/net/sungem.c +++ linux/drivers/net/sungem.c @@ -1034,10 +1034,8 @@ static int gem_start_xmit(struct sk_buff (csum_stuff_off << 21)); } - local_irq_save(flags); - if (!spin_trylock(&gp->tx_lock)) { + if (!spin_trylock_irqsave(&gp->tx_lock, flags)) { /* Tell upper layer to requeue */ - local_irq_restore(flags); return NETDEV_TX_LOCKED; } /* We raced with gem_do_stop() */ patches/preempt-realtime-acpi.patch0000664000077200007720000001316610655544575016757 0ustar mingomingo--- drivers/acpi/ec.c | 12 ++++++++++++ drivers/acpi/hardware/hwregs.c | 16 ++++++++-------- drivers/acpi/processor_idle.c | 2 +- drivers/acpi/utilities/utmutex.c | 2 +- include/acpi/acglobal.h | 7 ++++++- include/acpi/acpiosxf.h | 2 +- 6 files changed, 29 insertions(+), 12 deletions(-) Index: linux-rt-rebase.q/drivers/acpi/ec.c =================================================================== --- linux-rt-rebase.q.orig/drivers/acpi/ec.c +++ linux-rt-rebase.q/drivers/acpi/ec.c @@ -483,7 +483,19 @@ static u32 acpi_ec_gpe_handler(void *dat atomic_inc(&ec->event_count); if (acpi_ec_mode == EC_INTR) { +#if 0 wake_up(&ec->wait); +#else + // hack ... + if (waitqueue_active(&ec->wait)) { + struct task_struct *task; + + task = list_entry(ec->wait.task_list.next, + wait_queue_t, task_list)->private; + if (task) + wake_up_process(task); + } +#endif } value = acpi_ec_read_status(ec); Index: linux-rt-rebase.q/drivers/acpi/hardware/hwregs.c =================================================================== --- linux-rt-rebase.q.orig/drivers/acpi/hardware/hwregs.c +++ linux-rt-rebase.q/drivers/acpi/hardware/hwregs.c @@ -73,7 +73,7 @@ acpi_status acpi_hw_clear_acpi_status(vo ACPI_BITMASK_ALL_FIXED_STATUS, (u16) acpi_gbl_FADT.xpm1a_event_block.address)); - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_STATUS, @@ -98,7 +98,7 @@ acpi_status acpi_hw_clear_acpi_status(vo status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block); unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } @@ -331,7 +331,7 @@ acpi_status acpi_set_register(u32 regist return_ACPI_STATUS(AE_BAD_PARAMETER); } - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); /* Always do a register read first so we can insert the new bits */ @@ -441,7 +441,7 @@ acpi_status acpi_set_register(u32 regist unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); /* Normalize the value that was read */ @@ -481,7 +481,7 @@ acpi_hw_register_read(u8 use_lock, u32 r ACPI_FUNCTION_TRACE(hw_register_read); if (ACPI_MTX_LOCK == use_lock) { - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); } switch (register_id) { @@ -560,7 +560,7 @@ acpi_hw_register_read(u8 use_lock, u32 r unlock_and_exit: if (ACPI_MTX_LOCK == use_lock) { - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); } if (ACPI_SUCCESS(status)) { @@ -606,7 +606,7 @@ acpi_status acpi_hw_register_write(u8 us ACPI_FUNCTION_TRACE(hw_register_write); if (ACPI_MTX_LOCK == use_lock) { - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); } switch (register_id) { @@ -730,7 +730,7 @@ acpi_status acpi_hw_register_write(u8 us unlock_and_exit: if (ACPI_MTX_LOCK == use_lock) { - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); } return_ACPI_STATUS(status); Index: linux-rt-rebase.q/drivers/acpi/processor_idle.c =================================================================== --- linux-rt-rebase.q.orig/drivers/acpi/processor_idle.c +++ linux-rt-rebase.q/drivers/acpi/processor_idle.c @@ -948,7 +948,7 @@ static int acpi_idle_enter_c2(struct cpu } static int c3_cpu_count; -static DEFINE_SPINLOCK(c3_lock); +static DEFINE_RAW_SPINLOCK(c3_lock); /** * acpi_idle_enter_c3 - enters an ACPI C3 state-type Index: linux-rt-rebase.q/drivers/acpi/utilities/utmutex.c =================================================================== --- linux-rt-rebase.q.orig/drivers/acpi/utilities/utmutex.c +++ linux-rt-rebase.q/drivers/acpi/utilities/utmutex.c @@ -116,7 +116,7 @@ void acpi_ut_mutex_terminate(void) /* Delete the spinlocks */ acpi_os_delete_lock(acpi_gbl_gpe_lock); - acpi_os_delete_lock(acpi_gbl_hardware_lock); +// acpi_os_delete_lock(acpi_gbl_hardware_lock); return_VOID; } Index: linux-rt-rebase.q/include/acpi/acglobal.h =================================================================== --- linux-rt-rebase.q.orig/include/acpi/acglobal.h +++ linux-rt-rebase.q/include/acpi/acglobal.h @@ -184,7 +184,12 @@ ACPI_EXTERN acpi_semaphore acpi_gbl_glob * interrupt level */ ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ -ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ + +/* + * Need to be raw because it might be used in acpi_processor_idle(): + */ +ACPI_EXTERN raw_spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ + #define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock #define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock Index: linux-rt-rebase.q/include/acpi/acpiosxf.h =================================================================== --- linux-rt-rebase.q.orig/include/acpi/acpiosxf.h +++ linux-rt-rebase.q/include/acpi/acpiosxf.h @@ -61,7 +61,7 @@ typedef enum { OSL_EC_BURST_HANDLER } acpi_execute_type; -#define ACPI_NO_UNIT_LIMIT ((u32) -1) +#define ACPI_NO_UNIT_LIMIT (INT_MAX/2) #define ACPI_MUTEX_SEM 1 /* Functions for acpi_os_signal */ patches/panic-dont-stop-box.patch0000664000077200007720000000101310655544575016362 0ustar mingomingo--- kernel/panic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/kernel/panic.c =================================================================== --- linux-rt-rebase.q.orig/kernel/panic.c +++ linux-rt-rebase.q/kernel/panic.c @@ -96,7 +96,7 @@ NORET_TYPE void panic(const char * fmt, * unfortunately means it may not be hardened to work in a panic * situation. */ - smp_send_stop(); +// smp_send_stop(); #endif atomic_notifier_call_chain(&panic_notifier_list, 0, buf); patches/latency-tracing.patch0000664000077200007720000034262710655544571015660 0ustar mingomingo Makefile | 11 arch/i386/lib/delay.c | 6 arch/x86_64/kernel/tsc.c | 4 drivers/clocksource/acpi_pm.c | 8 fs/proc/proc_misc.c | 17 include/linux/clocksource.h | 23 include/linux/kernel.h | 2 include/linux/latency_hist.h | 32 include/linux/preempt.h | 20 include/linux/sched.h | 109 + init/main.c | 2 kernel/Makefile | 5 kernel/fork.c | 2 kernel/latency_hist.c | 267 ++++ kernel/latency_trace.c | 2742 ++++++++++++++++++++++++++++++++++++++++++ kernel/lockdep.c | 33 kernel/panic.c | 2 kernel/printk.c | 2 kernel/sched.c | 82 - kernel/sysctl.c | 128 + kernel/time/timekeeping.c | 27 lib/Kconfig.debug | 186 ++ lib/debug_locks.c | 8 scripts/Makefile | 1 scripts/trace-it.c | 79 + 25 files changed, 3737 insertions(+), 61 deletions(-) Index: linux/Makefile =================================================================== --- linux.orig/Makefile +++ linux/Makefile @@ -491,10 +491,15 @@ endif include $(srctree)/arch/$(ARCH)/Makefile -ifdef CONFIG_FRAME_POINTER -CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls + +ifdef CONFIG_MCOUNT +CFLAGS += -pg -fno-omit-frame-pointer -fno-optimize-sibling-calls else -CFLAGS += -fomit-frame-pointer + ifdef CONFIG_FRAME_POINTER + CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls + else + CFLAGS += -fomit-frame-pointer + endif endif ifdef CONFIG_DEBUG_INFO Index: linux/arch/i386/lib/delay.c =================================================================== --- linux.orig/arch/i386/lib/delay.c +++ linux/arch/i386/lib/delay.c @@ -23,7 +23,7 @@ #endif /* simple loop based delay: */ -static void delay_loop(unsigned long loops) +static notrace void delay_loop(unsigned long loops) { int d0; @@ -38,7 +38,7 @@ static void delay_loop(unsigned long loo } /* TSC based delay: */ -static void delay_tsc(unsigned long loops) +static notrace void delay_tsc(unsigned long loops) { unsigned long bclock, now; @@ -69,7 +69,7 @@ int read_current_timer(unsigned long *ti return -1; } -void __delay(unsigned long loops) +void notrace __delay(unsigned long loops) { delay_fn(loops); } Index: linux/arch/x86_64/kernel/tsc.c =================================================================== --- linux.orig/arch/x86_64/kernel/tsc.c +++ linux/arch/x86_64/kernel/tsc.c @@ -247,13 +247,13 @@ __setup("notsc", notsc_setup); /* clock source code: */ -static cycle_t read_tsc(void) +static notrace cycle_t read_tsc(void) { cycle_t ret = (cycle_t)get_cycles_sync(); return ret; } -static cycle_t __vsyscall_fn vread_tsc(void) +static notrace cycle_t __vsyscall_fn vread_tsc(void) { cycle_t ret = (cycle_t)get_cycles_sync(); return ret; Index: linux/drivers/clocksource/acpi_pm.c =================================================================== --- linux.orig/drivers/clocksource/acpi_pm.c +++ linux/drivers/clocksource/acpi_pm.c @@ -30,13 +30,13 @@ */ u32 pmtmr_ioport __read_mostly; -static inline u32 read_pmtmr(void) +static notrace inline u32 read_pmtmr(void) { /* mask the output to 24 bits */ return inl(pmtmr_ioport) & ACPI_PM_MASK; } -u32 acpi_pm_read_verified(void) +u32 notrace acpi_pm_read_verified(void) { u32 v1 = 0, v2 = 0, v3 = 0; @@ -56,12 +56,12 @@ u32 acpi_pm_read_verified(void) return v2; } -static cycle_t acpi_pm_read_slow(void) +static notrace cycle_t acpi_pm_read_slow(void) { return (cycle_t)acpi_pm_read_verified(); } -static cycle_t acpi_pm_read(void) +static notrace cycle_t acpi_pm_read(void) { return (cycle_t)read_pmtmr(); } Index: linux/fs/proc/proc_misc.c =================================================================== --- linux.orig/fs/proc/proc_misc.c +++ linux/fs/proc/proc_misc.c @@ -634,6 +634,20 @@ static int execdomains_read_proc(char *p return proc_calc_metrics(page, start, off, count, eof, len); } +#ifdef CONFIG_EVENT_TRACE +extern struct seq_operations latency_trace_op; +static int latency_trace_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &latency_trace_op); +} +static struct file_operations proc_latency_trace_operations = { + .open = latency_trace_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; +#endif + #ifdef CONFIG_MAGIC_SYSRQ /* * writing 'C' to /proc/sysrq-trigger is like sysrq-C @@ -727,6 +741,9 @@ void __init proc_misc_init(void) #ifdef CONFIG_SCHEDSTATS create_seq_entry("schedstat", 0, &proc_schedstat_operations); #endif +#ifdef CONFIG_EVENT_TRACE + create_seq_entry("latency_trace", 0, &proc_latency_trace_operations); +#endif #ifdef CONFIG_PROC_KCORE proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL); if (proc_root_kcore) { Index: linux/include/linux/clocksource.h =================================================================== --- linux.orig/include/linux/clocksource.h +++ linux/include/linux/clocksource.h @@ -21,6 +21,9 @@ typedef u64 cycle_t; struct clocksource; +extern unsigned long preempt_max_latency; +extern unsigned long preempt_thresh; + /** * struct clocksource - hardware abstraction for a free running counter * Provides mostly state-free accessors to the underlying hardware. @@ -178,8 +181,20 @@ static inline cycle_t clocksource_read(s */ static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) { - u64 ret = (u64)cycles; - ret = (ret * cs->mult) >> cs->shift; + return ((u64)cycles * cs->mult) >> cs->shift; +} + +/** + * ns2cyc - converts nanoseconds to clocksource cycles + * @cs: Pointer to clocksource + * @nsecs: Nanoseconds + */ +static inline cycles_t ns2cyc(struct clocksource *cs, u64 nsecs) +{ + cycles_t ret = nsecs << cs->shift; + + do_div(ret, cs->mult + 1); + return ret; } @@ -227,4 +242,8 @@ static inline void update_vsyscall(struc } #endif +extern cycle_t get_monotonic_cycles(void); +extern unsigned long cycles_to_usecs(cycle_t); +extern cycle_t usecs_to_cycles(unsigned long); + #endif /* _LINUX_CLOCKSOURCE_H */ Index: linux/include/linux/kernel.h =================================================================== --- linux.orig/include/linux/kernel.h +++ linux/include/linux/kernel.h @@ -156,6 +156,8 @@ asmlinkage int vprintk(const char *fmt, __attribute__ ((format (printf, 1, 0))); asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))) __cold; +extern void early_printk(const char *fmt, ...) + __attribute__ ((format (printf, 1, 2))); #else static inline int vprintk(const char *s, va_list args) __attribute__ ((format (printf, 1, 0))); Index: linux/include/linux/latency_hist.h =================================================================== --- /dev/null +++ linux/include/linux/latency_hist.h @@ -0,0 +1,32 @@ +/* + * kernel/latency_hist.h + * + * Add support for histograms of preemption-off latency and + * interrupt-off latency and wakeup latency, it depends on + * Real-Time Preemption Support. + * + * Copyright (C) 2005 MontaVista Software, Inc. + * Yi Yang + * + */ +#ifndef _LINUX_LATENCY_HIST_H_ +#define _LINUX_LATENCY_HIST_H_ + +enum { + INTERRUPT_LATENCY = 0, + PREEMPT_LATENCY, + WAKEUP_LATENCY +}; + +#define MAX_ENTRY_NUM 10240 +#define LATENCY_TYPE_NUM 3 + +#ifdef CONFIG_LATENCY_HIST +extern void latency_hist(int latency_type, int cpu, unsigned long latency); +# define latency_hist_flag 1 +#else +# define latency_hist(a,b,c) do { (void)(cpu); } while (0) +# define latency_hist_flag 0 +#endif /* CONFIG_LATENCY_HIST */ + +#endif /* ifndef _LINUX_LATENCY_HIST_H_ */ Index: linux/include/linux/preempt.h =================================================================== --- linux.orig/include/linux/preempt.h +++ linux/include/linux/preempt.h @@ -10,12 +10,26 @@ #include #include -#ifdef CONFIG_DEBUG_PREEMPT - extern void fastcall add_preempt_count(int val); - extern void fastcall sub_preempt_count(int val); +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_CRITICAL_TIMING) + extern void notrace add_preempt_count(unsigned int val); + extern void notrace sub_preempt_count(unsigned int val); + extern void notrace mask_preempt_count(unsigned int mask); + extern void notrace unmask_preempt_count(unsigned int mask); #else # define add_preempt_count(val) do { preempt_count() += (val); } while (0) # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0) +# define mask_preempt_count(mask) \ + do { preempt_count() |= (mask); } while (0) +# define unmask_preempt_count(mask) \ + do { preempt_count() &= ~(mask); } while (0) +#endif + +#ifdef CONFIG_CRITICAL_TIMING + extern void touch_critical_timing(void); + extern void stop_critical_timing(void); +#else +# define touch_critical_timing() do { } while (0) +# define stop_critical_timing() do { } while (0) #endif #define inc_preempt_count() add_preempt_count(1) Index: linux/include/linux/sched.h =================================================================== --- linux.orig/include/linux/sched.h +++ linux/include/linux/sched.h @@ -239,6 +239,7 @@ static inline void show_state(void) } extern void show_regs(struct pt_regs *); +extern void irq_show_regs_callback(int cpu, struct pt_regs *regs); /* * TASK is a pointer to the task whose backtrace we want to see (or NULL for current @@ -275,6 +276,107 @@ static inline void touch_all_softlockup_ } #endif +#if defined(CONFIG_PREEMPT_TRACE) || defined(CONFIG_EVENT_TRACE) + extern void print_traces(struct task_struct *task); +#else +# define print_traces(task) do { } while (0) +#endif + +#ifdef CONFIG_FRAME_POINTER +# ifndef CONFIG_ARM +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) +# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) +# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) +# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) +# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) +# else + extern unsigned long arm_return_addr(int level); +# define CALLER_ADDR0 arm_return_addr(0) +# define CALLER_ADDR1 arm_return_addr(1) +# define CALLER_ADDR2 arm_return_addr(2) +# define CALLER_ADDR3 arm_return_addr(3) +# define CALLER_ADDR4 arm_return_addr(4) +# define CALLER_ADDR5 arm_return_addr(5) +#endif +#else +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 0UL +# define CALLER_ADDR2 0UL +# define CALLER_ADDR3 0UL +# define CALLER_ADDR4 0UL +# define CALLER_ADDR5 0UL +#endif + +#ifdef CONFIG_MCOUNT + extern void notrace mcount(void); +#else +# define mcount() do { } while (0) +#endif + +#ifdef CONFIG_EVENT_TRACE + extern int mcount_enabled, trace_enabled, trace_user_triggered, + trace_user_trigger_irq, trace_freerunning, trace_verbose, + trace_print_on_crash, trace_all_cpus, print_functions, + syscall_tracing, stackframe_tracing, trace_use_raw_cycles, + trace_all_runnable; + extern void notrace trace_special(unsigned long v1, unsigned long v2, unsigned long v3); + extern void notrace trace_special_pid(int pid, unsigned long v1, unsigned long v2); + extern void notrace trace_special_u64(unsigned long long v1, unsigned long v2); + extern void notrace trace_special_sym(void); + extern void stop_trace(void); +# define start_trace() do { trace_enabled = 1; } while (0) + extern void print_last_trace(void); + extern void nmi_trace(unsigned long eip, unsigned long parent_eip, + unsigned long flags); + extern long user_trace_start(void); + extern long user_trace_stop(void); + extern void trace_cmdline(void); + extern void init_tracer(void); +#else +# define mcount_enabled 0 +# define trace_enabled 0 +# define syscall_tracing 0 +# define stackframe_tracing 0 +# define trace_user_triggered 0 +# define trace_freerunning 0 +# define trace_all_cpus 0 +# define trace_verbose 0 +# define trace_special(v1,v2,v3) do { } while (0) +# define trace_special_pid(pid,v1,v2) do { } while (0) +# define trace_special_u64(v1,v2) do { } while (0) +# define trace_special_sym() do { } while (0) +# define stop_trace() do { } while (0) +# define start_trace() do { } while (0) +# define print_last_trace() do { } while (0) +# define nmi_trace(eip, parent_eip, flags) do { } while (0) +# define user_trace_start() do { } while (0) +# define user_trace_stop() do { } while (0) +# define trace_cmdline() do { } while (0) +# define init_tracer() do { } while (0) +#endif + +extern int timeofday_API_hacks(void *tv, void *tz); + +#ifdef CONFIG_WAKEUP_TIMING + extern int wakeup_timing; + extern void __trace_start_sched_wakeup(struct task_struct *p); + extern void trace_stop_sched_switched(struct task_struct *p); + extern void trace_change_sched_cpu(struct task_struct *p, int new_cpu); +#else +# define wakeup_timing 0 +# define __trace_start_sched_wakeup(p) do { } while (0) +# define trace_stop_sched_switched(p) do { } while (0) +# define trace_change_sched_cpu(p, cpu) do { } while (0) +#endif + +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + extern void notrace time_hardirqs_on(unsigned long a0, unsigned long a1); + extern void notrace time_hardirqs_off(unsigned long a0, unsigned long a1); +#else +# define time_hardirqs_on(a0, a1) do { } while (0) +# define time_hardirqs_off(a0, a1) do { } while (0) +#endif /* Attach to any functions which should be ignored in wchan output. */ #define __sched __attribute__((__section__(".sched.text"))) @@ -1134,6 +1236,13 @@ struct task_struct { unsigned int lockdep_recursion; #endif +#define MAX_PREEMPT_TRACE 16 + +#ifdef CONFIG_PREEMPT_TRACE + unsigned long preempt_trace_eip[MAX_PREEMPT_TRACE]; + unsigned long preempt_trace_parent_eip[MAX_PREEMPT_TRACE]; +#endif + /* journalling filesystem info */ void *journal_info; Index: linux/init/main.c =================================================================== --- linux.orig/init/main.c +++ linux/init/main.c @@ -592,6 +592,8 @@ asmlinkage void __init start_kernel(void if (panic_later) panic(panic_later, panic_param); + init_tracer(); + lockdep_info(); /* Index: linux/kernel/Makefile =================================================================== --- linux.orig/kernel/Makefile +++ linux/kernel/Makefile @@ -39,6 +39,11 @@ obj-$(CONFIG_COMPAT) += compat.o obj-$(CONFIG_CPUSETS) += cpuset.o obj-$(CONFIG_IKCONFIG) += configs.o obj-$(CONFIG_STOP_MACHINE) += stop_machine.o +obj-$(CONFIG_DEBUG_PREEMPT) += latency_trace.o +obj-$(CONFIG_WAKEUP_TIMING) += latency_trace.o +obj-$(CONFIG_EVENT_TRACE) += latency_trace.o +obj-$(CONFIG_CRITICAL_TIMING) += latency_trace.o +obj-$(CONFIG_LATENCY_HIST) += latency_hist.o obj-$(CONFIG_AUDIT) += audit.o auditfilter.o obj-$(CONFIG_AUDITSYSCALL) += auditsc.o obj-$(CONFIG_KPROBES) += kprobes.o Index: linux/kernel/fork.c =================================================================== --- linux.orig/kernel/fork.c +++ linux/kernel/fork.c @@ -996,7 +996,7 @@ static struct task_struct *copy_process( rt_mutex_init_task(p); -#ifdef CONFIG_TRACE_IRQFLAGS +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_LOCKDEP) DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif Index: linux/kernel/latency_hist.c =================================================================== --- /dev/null +++ linux/kernel/latency_hist.c @@ -0,0 +1,267 @@ +/* + * kernel/latency_hist.c + * + * Add support for histograms of preemption-off latency and + * interrupt-off latency and wakeup latency, it depends on + * Real-Time Preemption Support. + * + * Copyright (C) 2005 MontaVista Software, Inc. + * Yi Yang + * + */ +#include +#include +#include +#include +#include +#include +#include + +typedef struct hist_data_struct { + atomic_t hist_mode; /* 0 log, 1 don't log */ + unsigned long min_lat; + unsigned long avg_lat; + unsigned long max_lat; + unsigned long long beyond_hist_bound_samples; + unsigned long long accumulate_lat; + unsigned long long total_samples; + unsigned long long hist_array[MAX_ENTRY_NUM]; +} hist_data_t; + +static struct proc_dir_entry * latency_hist_root = NULL; +static char * latency_hist_proc_dir_root = "latency_hist"; + +static char * percpu_proc_name = "CPU"; + +#ifdef CONFIG_INTERRUPT_OFF_HIST +static DEFINE_PER_CPU(hist_data_t, interrupt_off_hist); +static char * interrupt_off_hist_proc_dir = "interrupt_off_latency"; +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST +static DEFINE_PER_CPU(hist_data_t, preempt_off_hist); +static char * preempt_off_hist_proc_dir = "preempt_off_latency"; +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST +static DEFINE_PER_CPU(hist_data_t, wakeup_latency_hist); +static char * wakeup_latency_hist_proc_dir = "wakeup_latency"; +#endif + +static struct proc_dir_entry *entry[LATENCY_TYPE_NUM][NR_CPUS]; + +static inline u64 u64_div(u64 x, u64 y) +{ + do_div(x, y); + return x; +} + +void latency_hist(int latency_type, int cpu, unsigned long latency) +{ + hist_data_t * my_hist; + + if ((cpu < 0) || (cpu >= NR_CPUS) || (latency_type < INTERRUPT_LATENCY) + || (latency_type > WAKEUP_LATENCY) || (latency < 0)) + return; + + switch(latency_type) { +#ifdef CONFIG_INTERRUPT_OFF_HIST + case INTERRUPT_LATENCY: + my_hist = (hist_data_t *)&per_cpu(interrupt_off_hist, cpu); + break; +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST + case PREEMPT_LATENCY: + my_hist = (hist_data_t *)&per_cpu(preempt_off_hist, cpu); + break; +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + my_hist = (hist_data_t *)&per_cpu(wakeup_latency_hist, cpu); + break; +#endif + default: + return; + } + + if (atomic_read(&my_hist->hist_mode) == 0) + return; + + if (latency >= MAX_ENTRY_NUM) + my_hist->beyond_hist_bound_samples++; + else + my_hist->hist_array[latency]++; + + if (latency < my_hist->min_lat) + my_hist->min_lat = latency; + else if (latency > my_hist->max_lat) + my_hist->max_lat = latency; + + my_hist->total_samples++; + my_hist->accumulate_lat += latency; + my_hist->avg_lat = (unsigned long) u64_div(my_hist->accumulate_lat, + my_hist->total_samples); + return; +} + +static void *l_start(struct seq_file *m, loff_t * pos) +{ + loff_t *index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); + loff_t index = *pos; + hist_data_t *my_hist = (hist_data_t *) m->private; + + if (!index_ptr) + return NULL; + + if (index == 0) { + atomic_dec(&my_hist->hist_mode); + seq_printf(m, "#Minimum latency: %lu microseconds.\n" + "#Average latency: %lu microseconds.\n" + "#Maximum latency: %lu microseconds.\n" + "#Total samples: %llu\n" + "#There are %llu samples greater or equal than %d microseconds\n" + "#usecs\t%16s\n" + , my_hist->min_lat + , my_hist->avg_lat + , my_hist->max_lat + , my_hist->total_samples + , my_hist->beyond_hist_bound_samples + , MAX_ENTRY_NUM, "samples"); + } + if (index >= MAX_ENTRY_NUM) + return NULL; + + *index_ptr = index; + return index_ptr; +} + +static void *l_next(struct seq_file *m, void *p, loff_t * pos) +{ + loff_t *index_ptr = p; + hist_data_t *my_hist = (hist_data_t *) m->private; + + if (++*pos >= MAX_ENTRY_NUM) { + atomic_inc(&my_hist->hist_mode); + return NULL; + } + *index_ptr = *pos; + return index_ptr; +} + +static void l_stop(struct seq_file *m, void *p) +{ + kfree(p); +} + +static int l_show(struct seq_file *m, void *p) +{ + int index = *(loff_t *) p; + hist_data_t *my_hist = (hist_data_t *) m->private; + + seq_printf(m, "%5d\t%16llu\n", index, my_hist->hist_array[index]); + return 0; +} + +static struct seq_operations latency_hist_seq_op = { + .start = l_start, + .next = l_next, + .stop = l_stop, + .show = l_show +}; + +static int latency_hist_seq_open(struct inode *inode, struct file *file) +{ + struct proc_dir_entry *entry_ptr = NULL; + int ret, i, j, break_flags = 0; + struct seq_file *seq; + + entry_ptr = PDE(file->f_dentry->d_inode); + for (i = 0; i < LATENCY_TYPE_NUM; i++) { + for (j = 0; j < NR_CPUS; j++) { + if (entry[i][j] == NULL) + continue; + if (entry_ptr->low_ino == entry[i][j]->low_ino) { + break_flags = 1; + break; + } + } + if (break_flags == 1) + break; + } + ret = seq_open(file, &latency_hist_seq_op); + if (break_flags == 1) { + seq = (struct seq_file *)file->private_data; + seq->private = entry[i][j]->data; + } + return ret; +} + +static struct file_operations latency_hist_seq_fops = { + .open = latency_hist_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static __init int latency_hist_init(void) +{ + struct proc_dir_entry *tmp_parent_proc_dir; + int i = 0, len = 0; + hist_data_t *my_hist; + char procname[64]; + + latency_hist_root = proc_mkdir(latency_hist_proc_dir_root, NULL); + + +#ifdef CONFIG_INTERRUPT_OFF_HIST + tmp_parent_proc_dir = proc_mkdir(interrupt_off_hist_proc_dir, latency_hist_root); + for (i = 0; i < NR_CPUS; i++) { + len = sprintf(procname, "%s%d", percpu_proc_name, i); + procname[len] = '\0'; + entry[INTERRUPT_LATENCY][i] = + create_proc_entry(procname, 0, tmp_parent_proc_dir); + entry[INTERRUPT_LATENCY][i]->data = (void *)&per_cpu(interrupt_off_hist, i); + entry[INTERRUPT_LATENCY][i]->proc_fops = &latency_hist_seq_fops; + my_hist = (hist_data_t *) entry[INTERRUPT_LATENCY][i]->data; + atomic_set(&my_hist->hist_mode,1); + my_hist->min_lat = 0xFFFFFFFFUL; + } +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST + tmp_parent_proc_dir = proc_mkdir(preempt_off_hist_proc_dir, latency_hist_root); + for (i = 0; i < NR_CPUS; i++) { + len = sprintf(procname, "%s%d", percpu_proc_name, i); + procname[len] = '\0'; + entry[PREEMPT_LATENCY][i] = + create_proc_entry(procname, 0, tmp_parent_proc_dir); + entry[PREEMPT_LATENCY][i]->data = (void *)&per_cpu(preempt_off_hist, i); + entry[PREEMPT_LATENCY][i]->proc_fops = &latency_hist_seq_fops; + my_hist = (hist_data_t *) entry[PREEMPT_LATENCY][i]->data; + atomic_set(&my_hist->hist_mode,1); + my_hist->min_lat = 0xFFFFFFFFUL; + } +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST + tmp_parent_proc_dir = proc_mkdir(wakeup_latency_hist_proc_dir, latency_hist_root); + for (i = 0; i < NR_CPUS; i++) { + len = sprintf(procname, "%s%d", percpu_proc_name, i); + procname[len] = '\0'; + entry[WAKEUP_LATENCY][i] = + create_proc_entry(procname, 0, tmp_parent_proc_dir); + entry[WAKEUP_LATENCY][i]->data = (void *)&per_cpu(wakeup_latency_hist, i); + entry[WAKEUP_LATENCY][i]->proc_fops = &latency_hist_seq_fops; + my_hist = (hist_data_t *) entry[WAKEUP_LATENCY][i]->data; + atomic_set(&my_hist->hist_mode,1); + my_hist->min_lat = 0xFFFFFFFFUL; + } +#endif + return 0; + +} + +__initcall(latency_hist_init); + Index: linux/kernel/latency_trace.c =================================================================== --- /dev/null +++ linux/kernel/latency_trace.c @@ -0,0 +1,2742 @@ +/* + * kernel/latency_trace.c + * + * Copyright (C) 2004-2006 Ingo Molnar + * Copyright (C) 2004 William Lee Irwin III + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef DEFINE_RAW_SPINLOCK +# define DEFINE_RAW_SPINLOCK DEFINE_SPINLOCK +#endif + +#ifndef RAW_SPIN_LOCK_UNLOCKED +# define RAW_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED +#endif + +int trace_use_raw_cycles = 0; + +#define __raw_spinlock_t raw_spinlock_t +#define need_resched_delayed() 0 + +#ifdef CONFIG_EVENT_TRACE +/* + * Convert raw cycles to usecs. + * Note: this is not the 'clocksource cycles' value, it's the raw + * cycle counter cycles. We use GTOD to timestamp latency start/end + * points, but the trace entries inbetween are timestamped with + * get_cycles(). + */ +static unsigned long notrace cycles_to_us(cycle_t delta) +{ + if (!trace_use_raw_cycles) + return cycles_to_usecs(delta); +#ifdef CONFIG_X86 + do_div(delta, cpu_khz/1000+1); +#elif defined(CONFIG_PPC) + delta = mulhwu(tb_to_us, delta); +#elif defined(CONFIG_ARM) + delta = mach_cycles_to_usecs(delta); +#else + #error Implement cycles_to_usecs. +#endif + + return (unsigned long) delta; +} +#endif + +static notrace inline cycle_t now(void) +{ + if (trace_use_raw_cycles) + return get_cycles(); + return get_monotonic_cycles(); +} + +#ifndef irqs_off +# define irqs_off() 0 +#endif + +#ifndef DEBUG_WARN_ON +static inline int DEBUG_WARN_ON(int cond) +{ + WARN_ON(cond); + return 0; +} +#endif + +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING +# ifdef CONFIG_CRITICAL_PREEMPT_TIMING +# define irqs_off_preempt_count() preempt_count() +# else +# define irqs_off_preempt_count() 0 +# endif +#endif + +#ifdef CONFIG_WAKEUP_TIMING +struct sch_struct { + __raw_spinlock_t trace_lock; + struct task_struct *task; + int cpu; + struct cpu_trace *tr; +} ____cacheline_aligned_in_smp; + +static __cacheline_aligned_in_smp struct sch_struct sch = + { trace_lock: __RAW_SPIN_LOCK_UNLOCKED }; + +int wakeup_timing = 1; +#endif + +/* + * Track maximum latencies and save the trace: + */ + +/* + * trace_stop_sched_switched must not be called with runqueue locks held! + */ +static __cacheline_aligned_in_smp DECLARE_MUTEX(max_mutex); + +/* + * Sequence count - we record it when starting a measurement and + * skip the latency if the sequence has changed - some other section + * did a maximum and could disturb our measurement with serial console + * printouts, etc. Truly coinciding maximum latencies should be rare + * and what happens together happens separately as well, so this doesnt + * decrease the validity of the maximum found: + */ +static __cacheline_aligned_in_smp unsigned long max_sequence; + +enum trace_type +{ + __TRACE_FIRST_TYPE = 0, + + TRACE_FN, + TRACE_SPECIAL, + TRACE_SPECIAL_PID, + TRACE_SPECIAL_U64, + TRACE_SPECIAL_SYM, + TRACE_CMDLINE, + TRACE_SYSCALL, + TRACE_SYSRET, + + __TRACE_LAST_TYPE +}; + +enum trace_flag_type +{ + TRACE_FLAG_IRQS_OFF = 0x01, + TRACE_FLAG_NEED_RESCHED = 0x02, + TRACE_FLAG_NEED_RESCHED_DELAYED = 0x04, + TRACE_FLAG_HARDIRQ = 0x08, + TRACE_FLAG_SOFTIRQ = 0x10, + TRACE_FLAG_IRQS_HARD_OFF = 0x20, +}; + +/* + * Maximum preemption latency measured. Initialize to maximum, + * we clear it after bootup. + */ +#ifdef CONFIG_LATENCY_HIST +unsigned long preempt_max_latency = (cycle_t)0UL; +#else +unsigned long preempt_max_latency = (cycle_t)ULONG_MAX; +#endif + +unsigned long preempt_thresh; + +/* + * Should this new latency be reported/recorded? + */ +static int report_latency(cycle_t delta) +{ + if (latency_hist_flag && !trace_user_triggered) + return 1; + + if (preempt_thresh) { + if (delta < preempt_thresh) + return 0; + } else { + if (delta <= preempt_max_latency) + return 0; + } + return 1; +} + +#ifdef CONFIG_EVENT_TRACE + +/* + * Number of per-CPU trace entries: + */ +#define MAX_TRACE (65536UL*16UL) + +#define CMDLINE_BYTES 16 + +/* + * 32 bytes on 32-bit platforms: + */ +struct trace_entry { + char type; + char cpu; + char flags; + char preempt_count; // assumes PREEMPT_MASK is 8 bits or less + int pid; + cycle_t timestamp; + union { + struct { + unsigned long eip; + unsigned long parent_eip; + } fn; + struct { + unsigned long eip; + unsigned long v1, v2, v3; + } special; + struct { + unsigned char str[CMDLINE_BYTES]; + } cmdline; + struct { + unsigned long nr; // highest bit: compat call + unsigned long p1, p2, p3; + } syscall; + struct { + unsigned long ret; + } sysret; + struct { + unsigned long __pad3[4]; + } pad; + } u; +} __attribute__((packed)); + +#endif + +struct cpu_trace { + atomic_t disabled; + unsigned long trace_idx; + cycle_t preempt_timestamp; + unsigned long critical_start, critical_end; + unsigned long critical_sequence; + atomic_t underrun; + atomic_t overrun; + int early_warning; + int latency_type; + int cpu; + +#ifdef CONFIG_EVENT_TRACE + struct trace_entry *trace; + char comm[CMDLINE_BYTES]; + pid_t pid; + unsigned long uid; + unsigned long nice; + unsigned long policy; + unsigned long rt_priority; + unsigned long saved_latency; +#endif +#ifdef CONFIG_DEBUG_STACKOVERFLOW + unsigned long stack_check; +#endif +} ____cacheline_aligned_in_smp; + +static struct cpu_trace cpu_traces[NR_CPUS] ____cacheline_aligned_in_smp = +{ [0 ... NR_CPUS-1] = { +#ifdef CONFIG_DEBUG_STACKOVERFLOW + .stack_check = 1 +#endif + } }; + +#ifdef CONFIG_EVENT_TRACE + +int trace_enabled = 0; +int syscall_tracing = 1; +int stackframe_tracing = 0; +int mcount_enabled = 0; +int trace_freerunning = 0; +int trace_print_on_crash = 0; +int trace_verbose = 0; +int trace_all_cpus = 0; +int print_functions = 0; +int trace_all_runnable = 0; + +/* + * user-triggered via gettimeofday(0,1)/gettimeofday(0,0) + */ +int trace_user_triggered = 0; +int trace_user_trigger_irq = -1; + +struct saved_trace_struct { + int cpu; + cycle_t first_timestamp, last_timestamp; + struct cpu_trace traces[NR_CPUS]; +} ____cacheline_aligned_in_smp; + +/* + * The current worst-case trace: + */ +static struct saved_trace_struct max_tr; + +/* + * /proc/latency_trace atomicity: + */ +static DECLARE_MUTEX(out_mutex); + +static struct saved_trace_struct out_tr; + +static void notrace printk_name(unsigned long eip) +{ + char namebuf[KSYM_NAME_LEN+1]; + unsigned long size, offset; + const char *sym_name; + char *modname; + + sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf); + if (sym_name) + printk("%s+%#lx/%#lx", sym_name, offset, size); + else + printk("<%08lx>", eip); +} + +#ifdef CONFIG_DEBUG_STACKOVERFLOW + +#ifndef STACK_WARN +# define STACK_WARN (THREAD_SIZE/8) +#endif + +#define MIN_STACK_NEEDED (sizeof(struct thread_info) + STACK_WARN) +#define MAX_STACK (THREAD_SIZE - sizeof(struct thread_info)) + +#if (defined(__i386__) || defined(__x86_64__)) && defined(CONFIG_FRAME_POINTER) +# define PRINT_EXACT_STACKFRAME +#endif + +#ifdef PRINT_EXACT_STACKFRAME +static unsigned long *worst_stack_bp; +#endif +static DEFINE_RAW_SPINLOCK(worst_stack_lock); +unsigned long worst_stack_left = THREAD_SIZE; +static unsigned long worst_stack_printed = THREAD_SIZE; +static char worst_stack_comm[TASK_COMM_LEN+1]; +static int worst_stack_pid; +static unsigned long worst_stack_sp; +static char worst_stack[THREAD_SIZE]; + +static notrace void fill_worst_stack(unsigned long stack_left) +{ + unsigned long flags; + + /* + * On x64, we must not read the PDA during early bootup: + */ +#ifdef CONFIG_X86_64 + if (system_state == SYSTEM_BOOTING) + return; +#endif + spin_lock_irqsave(&worst_stack_lock, flags); + if (likely(stack_left < worst_stack_left)) { + worst_stack_left = stack_left; + memcpy(worst_stack, current_thread_info(), THREAD_SIZE); + worst_stack_sp = (unsigned long)&stack_left; + memcpy(worst_stack_comm, current->comm, TASK_COMM_LEN); + worst_stack_pid = current->pid; +#ifdef PRINT_EXACT_STACKFRAME +# ifdef __i386__ + asm ("mov %%ebp, %0\n" :"=g"(worst_stack_bp)); +# elif defined(__x86_64__) + asm ("mov %%rbp, %0\n" :"=g"(worst_stack_bp)); +# else +# error Poke the author of above asm code lines ! +# endif +#endif + } + spin_unlock_irqrestore(&worst_stack_lock, flags); +} + +#ifdef PRINT_EXACT_STACKFRAME + +/* + * This takes a BP offset to point the BP back into the saved stack, + * the original stack might be long gone (but the stackframe within + * the saved copy still contains references to it). + */ +#define CONVERT_TO_SAVED_STACK(bp) \ + ((void *)worst_stack + ((unsigned long)bp & (THREAD_SIZE-1))) + +static void show_stackframe(void) +{ + unsigned long addr, frame_size, *bp, *prev_bp, sum = 0; + + bp = CONVERT_TO_SAVED_STACK(worst_stack_bp); + + while (bp[0]) { + addr = bp[1]; + if (!kernel_text_address(addr)) + break; + + prev_bp = bp; + bp = CONVERT_TO_SAVED_STACK((unsigned long *)bp[0]); + + frame_size = (bp - prev_bp) * sizeof(long); + + if (frame_size < THREAD_SIZE) { + printk("{ %4ld} ", frame_size); + sum += frame_size; + } else + printk("{=%4ld} ", sum); + + printk("[<%08lx>] ", addr); + printk_name(addr); + printk("\n"); + } +} + +#else + +static inline int valid_stack_ptr(void *p) +{ + return p > (void *)worst_stack && + p < (void *)worst_stack + THREAD_SIZE - 3; +} + +static void show_stackframe(void) +{ + unsigned long prev_frame, addr; + unsigned long *stack; + + prev_frame = (unsigned long)(worst_stack + + (worst_stack_sp & (THREAD_SIZE-1))); + stack = (unsigned long *)prev_frame; + + while (valid_stack_ptr(stack)) { + addr = *stack++; + if (__kernel_text_address(addr)) { + printk("(%4ld) ", (unsigned long)stack - prev_frame); + printk("[<%08lx>] ", addr); + print_symbol("%s\n", addr); + prev_frame = (unsigned long)stack; + } + if ((char *)stack >= worst_stack + THREAD_SIZE) + break; + } +} + +#endif + +static notrace void __print_worst_stack(void) +{ + unsigned long fill_ratio; + printk("----------------------------->\n"); + printk("| new stack fill maximum: %s/%d, %ld bytes (out of %ld bytes).\n", + worst_stack_comm, worst_stack_pid, + MAX_STACK-worst_stack_left, (long)MAX_STACK); + fill_ratio = (MAX_STACK-worst_stack_left)*100/(long)MAX_STACK; + printk("| Stack fill ratio: %02ld%%", fill_ratio); + if (fill_ratio >= 90) + printk(" - BUG: that's quite high, please report this!\n"); + else + printk(" - that's still OK, no need to report this.\n"); + printk("------------|\n"); + + show_stackframe(); + printk("<---------------------------\n\n"); +} + +static notrace void print_worst_stack(void) +{ + unsigned long flags; + + if (irqs_disabled() || preempt_count()) + return; + + spin_lock_irqsave(&worst_stack_lock, flags); + if (worst_stack_printed == worst_stack_left) { + spin_unlock_irqrestore(&worst_stack_lock, flags); + return; + } + worst_stack_printed = worst_stack_left; + spin_unlock_irqrestore(&worst_stack_lock, flags); + + __print_worst_stack(); +} + +static notrace void debug_stackoverflow(struct cpu_trace *tr) +{ + long stack_left; + + if (unlikely(tr->stack_check <= 0)) + return; + atomic_inc(&tr->disabled); + + /* Debugging check for stack overflow: is there less than 1KB free? */ +#ifdef __i386__ + __asm__ __volatile__("and %%esp,%0" : + "=r" (stack_left) : "0" (THREAD_SIZE - 1)); +#elif defined(__x86_64__) + __asm__ __volatile__("and %%rsp,%0" : + "=r" (stack_left) : "0" (THREAD_SIZE - 1)); +#else +# error Poke the author of above asm code lines ! +#endif + if (unlikely(stack_left < MIN_STACK_NEEDED)) { + tr->stack_check = 0; + printk(KERN_ALERT "BUG: stack overflow: only %ld bytes left! [%08lx...(%08lx-%08lx)]\n", + stack_left - sizeof(struct thread_info), + (long)&stack_left, + (long)current_thread_info(), + (long)current_thread_info() + THREAD_SIZE); + fill_worst_stack(stack_left); + __print_worst_stack(); + goto out; + } + if (unlikely(stack_left < worst_stack_left)) { + tr->stack_check--; + fill_worst_stack(stack_left); + print_worst_stack(); + tr->stack_check++; + } else + if (worst_stack_printed != worst_stack_left) { + tr->stack_check--; + print_worst_stack(); + tr->stack_check++; + } +out: + atomic_dec(&tr->disabled); +} + +#endif + +#ifdef CONFIG_EARLY_PRINTK +static void notrace early_printk_name(unsigned long eip) +{ + char namebuf[KSYM_NAME_LEN+1]; + unsigned long size, offset; + const char *sym_name; + char *modname; + + sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf); + if (sym_name) + early_printk("%s <%08lx>", sym_name, eip); + else + early_printk("<%08lx>", eip); +} + +static __raw_spinlock_t early_print_lock = __RAW_SPIN_LOCK_UNLOCKED; + +static void notrace early_print_entry(struct trace_entry *entry) +{ + int hardirq, softirq; + + __raw_spin_lock(&early_print_lock); + early_printk("%-5d ", entry->pid); + + early_printk("%d%c%c", + entry->cpu, + (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : + (entry->flags & TRACE_FLAG_IRQS_HARD_OFF) ? 'D' : '.', + (entry->flags & TRACE_FLAG_NEED_RESCHED_DELAYED) ? 'n' : + ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); + + hardirq = entry->flags & TRACE_FLAG_HARDIRQ; + softirq = entry->flags & TRACE_FLAG_SOFTIRQ; + if (hardirq && softirq) + early_printk("H"); + else { + if (hardirq) + early_printk("h"); + else { + if (softirq) + early_printk("s"); + else + early_printk("."); + } + } + + early_printk(":%d: ", entry->preempt_count); + + if (entry->type == TRACE_FN) { + early_printk_name(entry->u.fn.eip); + early_printk(" <= ("); + early_printk_name(entry->u.fn.parent_eip); + early_printk(")\n"); + } else { + /* special entries: */ + early_printk_name(entry->u.special.eip); + early_printk(": <%08lx> <%08lx> <%08lx>\n", + entry->u.special.v1, + entry->u.special.v2, + entry->u.special.v3); + } + __raw_spin_unlock(&early_print_lock); +} +#else +# define early_print_entry(x) do { } while(0) +#endif + +static void notrace +____trace(int cpu, enum trace_type type, struct cpu_trace *tr, + unsigned long eip, unsigned long parent_eip, + unsigned long v1, unsigned long v2, unsigned long v3, + unsigned long flags) +{ + struct trace_entry *entry; + unsigned long idx, idx_next; + cycle_t timestamp; + u32 pc; + +#ifdef CONFIG_DEBUG_PREEMPT +// WARN_ON(!atomic_read(&tr->disabled)); +#endif + if (!tr->critical_start && !trace_user_triggered && !trace_all_cpus && + !trace_print_on_crash && !print_functions) + goto out; + /* + * Allocate the next index. Make sure an NMI (or interrupt) + * has not taken it away. Potentially redo the timestamp as + * well to make sure the trace timestamps are in chronologic + * order. + */ +again: + idx = tr->trace_idx; + idx_next = idx + 1; + timestamp = now(); + + if (unlikely((trace_freerunning || print_functions || atomic_read(&tr->underrun)) && + (idx_next >= MAX_TRACE) && !atomic_read(&tr->overrun))) { + atomic_inc(&tr->underrun); + idx_next = 0; + } + if (unlikely(idx >= MAX_TRACE)) { + atomic_inc(&tr->overrun); + goto out; + } +#ifdef __HAVE_ARCH_CMPXCHG + if (unlikely(cmpxchg(&tr->trace_idx, idx, idx_next) != idx)) { + if (idx_next == 0) + atomic_dec(&tr->underrun); + goto again; + } +#else +# ifdef CONFIG_SMP +# error CMPXCHG missing +# else + /* No worry, we are protected by the atomic_incr(&tr->disabled) + * in __trace further down + */ + tr->trace_idx = idx_next; +# endif +#endif + if (unlikely(idx_next != 0 && atomic_read(&tr->underrun))) + atomic_inc(&tr->underrun); + + pc = preempt_count(); + + if (unlikely(!tr->trace)) + goto out; + entry = tr->trace + idx; + entry->type = type; +#ifdef CONFIG_SMP + entry->cpu = cpu; +#endif + entry->flags = (irqs_off() ? TRACE_FLAG_IRQS_OFF : 0) | + (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_HARD_OFF : 0)| + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | + (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | + (need_resched_delayed() ? TRACE_FLAG_NEED_RESCHED_DELAYED : 0); + entry->preempt_count = pc & 0xff; + entry->pid = current->pid; + entry->timestamp = timestamp; + + switch (type) { + case TRACE_FN: + entry->u.fn.eip = eip; + entry->u.fn.parent_eip = parent_eip; + if (unlikely(print_functions && !in_interrupt())) + early_print_entry(entry); + break; + case TRACE_SPECIAL: + case TRACE_SPECIAL_PID: + case TRACE_SPECIAL_U64: + case TRACE_SPECIAL_SYM: + entry->u.special.eip = eip; + entry->u.special.v1 = v1; + entry->u.special.v2 = v2; + entry->u.special.v3 = v3; + if (unlikely(print_functions && !in_interrupt())) + early_print_entry(entry); + break; + case TRACE_SYSCALL: + entry->u.syscall.nr = eip; + entry->u.syscall.p1 = v1; + entry->u.syscall.p2 = v2; + entry->u.syscall.p3 = v3; + break; + case TRACE_SYSRET: + entry->u.sysret.ret = eip; + break; + case TRACE_CMDLINE: + memcpy(entry->u.cmdline.str, current->comm, CMDLINE_BYTES); + break; + default: + break; + } +out: + ; +} + +static inline void notrace +___trace(enum trace_type type, unsigned long eip, unsigned long parent_eip, + unsigned long v1, unsigned long v2, + unsigned long v3) +{ + struct cpu_trace *tr; + unsigned long flags; + int cpu; + + if (unlikely(trace_enabled <= 0)) + return; + +#if defined(CONFIG_DEBUG_STACKOVERFLOW) && defined(CONFIG_X86) + debug_stackoverflow(cpu_traces + raw_smp_processor_id()); +#endif + + raw_local_irq_save(flags); + cpu = raw_smp_processor_id(); + /* + * Trace on the CPU where the current highest-prio task + * is waiting to become runnable: + */ +#ifdef CONFIG_WAKEUP_TIMING + if (wakeup_timing && !trace_all_cpus && !trace_print_on_crash && + !print_functions) { + if (!sch.tr || cpu != sch.cpu) + goto out; + tr = sch.tr; + } else + tr = cpu_traces + cpu; +#else + tr = cpu_traces + cpu; +#endif + atomic_inc(&tr->disabled); + if (likely(atomic_read(&tr->disabled) == 1)) { +//#define DEBUG_STACK_POISON +#ifdef DEBUG_STACK_POISON + char stack; + + memset(&stack - 128, 0x34, 128); +#endif + ____trace(cpu, type, tr, eip, parent_eip, v1, v2, v3, flags); + } + atomic_dec(&tr->disabled); +#ifdef CONFIG_WAKEUP_TIMING +out: +#endif + raw_local_irq_restore(flags); +} + +/* + * Special, ad-hoc tracepoints: + */ +void notrace trace_special(unsigned long v1, unsigned long v2, unsigned long v3) +{ + ___trace(TRACE_SPECIAL, CALLER_ADDR0, 0, v1, v2, v3); +} + +EXPORT_SYMBOL(trace_special); + +void notrace trace_special_pid(int pid, unsigned long v1, unsigned long v2) +{ + ___trace(TRACE_SPECIAL_PID, CALLER_ADDR0, 0, pid, v1, v2); +} + +EXPORT_SYMBOL(trace_special_pid); + +void notrace trace_special_u64(unsigned long long v1, unsigned long v2) +{ + ___trace(TRACE_SPECIAL_U64, CALLER_ADDR0, 0, + (unsigned long) (v1 >> 32), (unsigned long) (v1 & 0xFFFFFFFF), + v2); +} + +EXPORT_SYMBOL(trace_special_u64); + +void notrace trace_special_sym(void) +{ +#define STACK_ENTRIES 8 + unsigned long entries[STACK_ENTRIES]; + struct stack_trace trace; + + if (!trace_enabled || !stackframe_tracing) + return; + + trace.entries = entries; + trace.skip = 3; + trace.max_entries = STACK_ENTRIES; + trace.nr_entries = 0; + + save_stack_trace(&trace); + /* + * clear out the rest: + */ + while (trace.nr_entries < trace.max_entries) + entries[trace.nr_entries++] = 0; + + ___trace(TRACE_SPECIAL_SYM, entries[0], 0, + entries[1], entries[2], entries[3]); + ___trace(TRACE_SPECIAL_SYM, entries[4], 0, + entries[5], entries[6], entries[7]); +} + +EXPORT_SYMBOL(trace_special_sym); + +/* + * Non-inlined function: + */ +void notrace __trace(unsigned long eip, unsigned long parent_eip) +{ + ___trace(TRACE_FN, eip, parent_eip, 0, 0, 0); +} + +#ifdef CONFIG_MCOUNT + +extern void mcount(void); + +EXPORT_SYMBOL(mcount); + +void notrace __mcount(void) +{ + ___trace(TRACE_FN, CALLER_ADDR1, CALLER_ADDR2, 0, 0, 0); +} + +#endif + +void notrace +sys_call(unsigned long nr, unsigned long p1, unsigned long p2, unsigned long p3) +{ + if (syscall_tracing) + ___trace(TRACE_SYSCALL, nr, 0, p1, p2, p3); +} + +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86) + +void notrace +sys_ia32_call(unsigned long nr, unsigned long p1, unsigned long p2, + unsigned long p3) +{ + if (syscall_tracing) + ___trace(TRACE_SYSCALL, nr | 0x80000000, 0, p1, p2, p3); +} + +#endif + +void notrace sys_ret(unsigned long ret) +{ + if (syscall_tracing) + ___trace(TRACE_SYSRET, ret, 0, 0, 0, 0); +} + +static void notrace print_name(struct seq_file *m, unsigned long eip) +{ + char namebuf[KSYM_NAME_LEN+1]; + unsigned long size, offset; + const char *sym_name; + char *modname; + + /* + * Special trace values: + */ + if (((long)eip < 100000L) && ((long)eip > -100000L)) { + seq_printf(m, "(%5ld)", eip); + return; + } + sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf); + if (sym_name) + seq_puts(m, sym_name); + else + seq_printf(m, "<%08lx>", eip); +} + +static void notrace print_name_offset(struct seq_file *m, unsigned long eip) +{ + char namebuf[KSYM_NAME_LEN+1]; + unsigned long size, offset; + const char *sym_name; + char *modname; + + sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf); + if (sym_name) + seq_printf(m, "%s+%#lx/%#lx <%08lx>", + sym_name, offset, size, eip); + else + seq_printf(m, "<%08lx>", eip); +} + +static unsigned long out_sequence = -1; + +static int pid_to_cmdline_array[PID_MAX_DEFAULT+1]; + +static void notrace _trace_cmdline(int cpu, struct cpu_trace *tr) +{ + unsigned long flags; + + local_save_flags(flags); + ____trace(cpu, TRACE_CMDLINE, tr, 0, 0, 0, 0, 0, flags); +} + +void notrace trace_cmdline(void) +{ + ___trace(TRACE_CMDLINE, 0, 0, 0, 0, 0); +} + +static void construct_pid_to_cmdline(struct cpu_trace *tr) +{ + unsigned int i, j, entries, pid; + + if (tr->critical_sequence == out_sequence) + return; + out_sequence = tr->critical_sequence; + + memset(pid_to_cmdline_array, -1, sizeof(int) * (PID_MAX_DEFAULT + 1)); + + if (!tr->trace) + return; + + entries = min(tr->trace_idx, MAX_TRACE); + + for (i = 0; i < entries; i++) { + struct trace_entry *entry = tr->trace + i; + + if (entry->type != TRACE_CMDLINE) + continue; + pid = entry->pid; + if (pid < PID_MAX_DEFAULT) { + pid_to_cmdline_array[pid] = i; + /* + * Replace space with underline - makes it easier + * to process for tools: + */ + for (j = 0; j < CMDLINE_BYTES; j++) + if (entry->u.cmdline.str[j] == ' ') + entry->u.cmdline.str[j] = '_'; + } + } +} + +char *pid_to_cmdline(unsigned long pid) +{ + struct cpu_trace *tr = out_tr.traces + 0; + char *cmdline = "<...>"; + int idx; + + pid = min(pid, (unsigned long)PID_MAX_DEFAULT); + if (!pid) + return ""; + + if (pid_to_cmdline_array[pid] != -1) { + idx = pid_to_cmdline_array[pid]; + if (tr->trace[idx].type == TRACE_CMDLINE) + cmdline = tr->trace[idx].u.cmdline.str; + } + return cmdline; +} + +static void copy_trace(struct cpu_trace *save, struct cpu_trace *tr, int reorder) +{ + if (!save->trace || !tr->trace) + return; + /* free-running needs reordering */ + if (reorder && atomic_read(&tr->underrun)) { + int i, idx, idx0 = tr->trace_idx; + + for (i = 0; i < MAX_TRACE; i++) { + idx = (idx0 + i) % MAX_TRACE; + save->trace[i] = tr->trace[idx]; + } + save->trace_idx = MAX_TRACE; + } else { + save->trace_idx = tr->trace_idx; + + memcpy(save->trace, tr->trace, + min(save->trace_idx, MAX_TRACE) * + sizeof(struct trace_entry)); + } + save->underrun = tr->underrun; + save->overrun = tr->overrun; +} + + +struct block_idx { + int idx[NR_CPUS]; +}; + +/* + * return the trace entry (position) of the smallest-timestamp + * one (that is still in the valid idx range): + */ +static int min_idx(struct block_idx *bidx) +{ + cycle_t min_stamp = (cycle_t) -1; + struct trace_entry *entry; + int cpu, min_cpu = -1, idx; + + for_each_online_cpu(cpu) { + idx = bidx->idx[cpu]; + if (idx >= min(max_tr.traces[cpu].trace_idx, MAX_TRACE)) + continue; + if (idx >= MAX_TRACE*NR_CPUS) { + printk("huh: idx (%d) > %ld*%d!\n", idx, MAX_TRACE, + NR_CPUS); + WARN_ON(1); + break; + } + entry = max_tr.traces[cpu].trace + bidx->idx[cpu]; + if (entry->timestamp < min_stamp) { + min_cpu = cpu; + min_stamp = entry->timestamp; + } + } + + return min_cpu; +} + +/* + * This code is called to construct an output trace from + * the maximum trace. Having separate traces serves both + * atomicity (a new max might be saved while we are busy + * accessing /proc/latency_trace) and it is also used to + * delay the (expensive) sorting of the output trace by + * timestamps, in the trace_all_cpus case. + */ +static void update_out_trace(void) +{ + struct trace_entry *out_entry, *entry, *tmp; + cycle_t stamp, first_stamp, last_stamp; + struct block_idx bidx = { { 0, }, }; + struct cpu_trace *tmp_max, *tmp_out; + int cpu, sum, entries, underrun_sum, overrun_sum; + + /* + * For out_tr we only have the first array's trace entries + * allocated - and they have are larger on SMP to make room + * for all trace entries from all CPUs. + */ + tmp_out = out_tr.traces + 0; + tmp_max = max_tr.traces + max_tr.cpu; + /* + * Easier to copy this way. Note: the trace buffer is private + * to the output buffer, so preserve it: + */ + copy_trace(tmp_out, tmp_max, 0); + tmp = tmp_out->trace; + *tmp_out = *tmp_max; + tmp_out->trace = tmp; + + out_tr.cpu = max_tr.cpu; + + if (!tmp_out->trace) + return; + + out_entry = tmp_out->trace + 0; + + if (!trace_all_cpus) { + entries = min(tmp_out->trace_idx, MAX_TRACE); + if (!entries) + return; + out_tr.first_timestamp = tmp_out->trace[0].timestamp; + out_tr.last_timestamp = tmp_out->trace[entries-1].timestamp; + return; + } + /* + * Find the range of timestamps that are fully traced in + * all CPU traces. (since CPU traces can cover a variable + * range of time, we have to find the best range.) + */ + first_stamp = 0; + for_each_online_cpu(cpu) { + tmp_max = max_tr.traces + cpu; + stamp = tmp_max->trace[0].timestamp; + if (stamp > first_stamp) + first_stamp = stamp; + } + /* + * Save the timestamp range: + */ + tmp_max = max_tr.traces + max_tr.cpu; + entries = min(tmp_max->trace_idx, MAX_TRACE); + /* + * No saved trace yet? + */ + if (!entries) { + out_tr.traces[0].trace_idx = 0; + return; + } + + last_stamp = tmp_max->trace[entries-1].timestamp; + + if (last_stamp < first_stamp) { + WARN_ON(1); + + for_each_online_cpu(cpu) { + tmp_max = max_tr.traces + cpu; + entries = min(tmp_max->trace_idx, MAX_TRACE); + printk("CPU%d: %016Lx (%016Lx) ... #%d (%016Lx) %016Lx\n", + cpu, + tmp_max->trace[0].timestamp, + tmp_max->trace[1].timestamp, + entries, + tmp_max->trace[entries-2].timestamp, + tmp_max->trace[entries-1].timestamp); + } + tmp_max = max_tr.traces + max_tr.cpu; + entries = min(tmp_max->trace_idx, MAX_TRACE); + + printk("CPU%d entries: %d\n", max_tr.cpu, entries); + printk("first stamp: %016Lx\n", first_stamp); + printk(" last stamp: %016Lx\n", first_stamp); + } + +#if 0 + printk("first_stamp: %Ld [%016Lx]\n", first_stamp, first_stamp); + printk(" last_stamp: %Ld [%016Lx]\n", last_stamp, last_stamp); + printk(" +1 stamp: %Ld [%016Lx]\n", + tmp_max->trace[entries].timestamp, + tmp_max->trace[entries].timestamp); + printk(" +2 stamp: %Ld [%016Lx]\n", + tmp_max->trace[entries+1].timestamp, + tmp_max->trace[entries+1].timestamp); + printk(" delta: %Ld\n", last_stamp-first_stamp); + printk(" entries: %d\n", entries); +#endif + + out_tr.first_timestamp = first_stamp; + out_tr.last_timestamp = last_stamp; + + /* + * Fetch trace entries one by one, in increasing timestamp + * order. Start at first_stamp, stop at last_stamp: + */ + sum = 0; + for (;;) { + cpu = min_idx(&bidx); + if (cpu == -1) + break; + entry = max_tr.traces[cpu].trace + bidx.idx[cpu]; + if (entry->timestamp > last_stamp) + break; + + bidx.idx[cpu]++; + if (entry->timestamp < first_stamp) + continue; + *out_entry = *entry; + out_entry++; + sum++; + if (sum >= MAX_TRACE*NR_CPUS) { + printk("huh: sum (%d) > %ld*%d!\n", sum, MAX_TRACE, + NR_CPUS); + WARN_ON(1); + break; + } + } + + sum = 0; + underrun_sum = 0; + overrun_sum = 0; + for_each_online_cpu(cpu) { + sum += max_tr.traces[cpu].trace_idx; + underrun_sum += atomic_read(&max_tr.traces[cpu].underrun); + overrun_sum += atomic_read(&max_tr.traces[cpu].overrun); + } + tmp_out->trace_idx = sum; + atomic_set(&tmp_out->underrun, underrun_sum); + atomic_set(&tmp_out->overrun, overrun_sum); +} + +static void notrace print_help_header(struct seq_file *m) +{ + seq_puts(m, " _------=> CPU# \n"); + seq_puts(m, " / _-----=> irqs-off \n"); + seq_puts(m, " | / _----=> need-resched \n"); + seq_puts(m, " || / _---=> hardirq/softirq \n"); + seq_puts(m, " ||| / _--=> preempt-depth \n"); + seq_puts(m, " |||| / \n"); + seq_puts(m, " ||||| delay \n"); + seq_puts(m, " cmd pid ||||| time | caller \n"); + seq_puts(m, " \\ / ||||| \\ | / \n"); +} + +static void * notrace l_start(struct seq_file *m, loff_t *pos) +{ + loff_t n = *pos; + unsigned long entries; + struct cpu_trace *tr = out_tr.traces + 0; + + down(&out_mutex); + /* + * if the file is being read newly, update the output trace: + */ + if (!n) { + // TODO: use the sequence counter here to optimize + down(&max_mutex); + update_out_trace(); + up(&max_mutex); +#if 0 + if (!tr->trace_idx) { + up(&out_mutex); + return NULL; + } +#endif + construct_pid_to_cmdline(tr); + } + entries = min(tr->trace_idx, MAX_TRACE); + + if (!n) { + seq_printf(m, "preemption latency trace v1.1.5 on %s\n", + UTS_RELEASE); + seq_puts(m, "--------------------------------------------------------------------\n"); + seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d | (M:%s VP:%d, KP:%d, SP:%d HP:%d", + cycles_to_usecs(tr->saved_latency), + entries, + (entries + atomic_read(&tr->underrun) + + atomic_read(&tr->overrun)), + out_tr.cpu, +#if defined(CONFIG_PREEMPT_NONE) + "server", +#elif defined(CONFIG_PREEMPT_VOLUNTARY) + "desktop", +#elif defined(CONFIG_PREEMPT_DESKTOP) + "preempt", +#else + "rt", +#endif + 0, 0, +#ifdef CONFIG_PREEMPT_SOFTIRQS + softirq_preemption +#else + 0 +#endif + , +#ifdef CONFIG_PREEMPT_HARDIRQS + hardirq_preemption +#else + 0 +#endif + ); +#ifdef CONFIG_SMP + seq_printf(m, " #P:%d)\n", num_online_cpus()); +#else + seq_puts(m, ")\n"); +#endif + seq_puts(m, " -----------------\n"); + seq_printf(m, " | task: %.16s-%d (uid:%ld nice:%ld policy:%ld rt_prio:%ld)\n", + tr->comm, tr->pid, tr->uid, tr->nice, + tr->policy, tr->rt_priority); + seq_puts(m, " -----------------\n"); + if (trace_user_triggered) { + seq_puts(m, " => started at: "); + print_name_offset(m, tr->critical_start); + seq_puts(m, "\n => ended at: "); + print_name_offset(m, tr->critical_end); + seq_puts(m, "\n"); + } + seq_puts(m, "\n"); + + if (!trace_verbose) + print_help_header(m); + } + if (n >= entries || !tr->trace) + return NULL; + + return tr->trace + n; +} + +static void * notrace l_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct cpu_trace *tr = out_tr.traces; + unsigned long entries = min(tr->trace_idx, MAX_TRACE); + + WARN_ON(!tr->trace); + + if (++*pos >= entries) { + if (*pos == entries) + seq_puts(m, "\n\nvim:ft=help\n"); + return NULL; + } + return tr->trace + *pos; +} + +static void notrace l_stop(struct seq_file *m, void *p) +{ + up(&out_mutex); +} + +static void print_timestamp(struct seq_file *m, unsigned long abs_usecs, + unsigned long rel_usecs) +{ + seq_printf(m, " %4ldus", abs_usecs); + if (rel_usecs > 100) + seq_puts(m, "!: "); + else if (rel_usecs > 1) + seq_puts(m, "+: "); + else + seq_puts(m, " : "); +} + +static void +print_timestamp_short(struct seq_file *m, unsigned long abs_usecs, + unsigned long rel_usecs) +{ + seq_printf(m, " %4ldus", abs_usecs); + if (rel_usecs > 100) + seq_putc(m, '!'); + else if (rel_usecs > 1) + seq_putc(m, '+'); + else + seq_putc(m, ' '); +} + +static void +print_generic(struct seq_file *m, struct trace_entry *entry) +{ + int hardirq, softirq; + + seq_printf(m, "%8.8s-%-5d ", pid_to_cmdline(entry->pid), entry->pid); + seq_printf(m, "%d", entry->cpu); + seq_printf(m, "%c%c", + (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : + (entry->flags & TRACE_FLAG_IRQS_HARD_OFF) ? 'D' : '.', + (entry->flags & TRACE_FLAG_NEED_RESCHED_DELAYED) ? 'n' : + ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); + + hardirq = entry->flags & TRACE_FLAG_HARDIRQ; + softirq = entry->flags & TRACE_FLAG_SOFTIRQ; + if (hardirq && softirq) + seq_putc(m, 'H'); + else { + if (hardirq) + seq_putc(m, 'h'); + else { + if (softirq) + seq_putc(m, 's'); + else + seq_putc(m, '.'); + } + } + + if (entry->preempt_count) + seq_printf(m, "%x", entry->preempt_count); + else + seq_puts(m, "."); +} + + +static int notrace l_show_fn(struct seq_file *m, unsigned long trace_idx, + struct trace_entry *entry, struct trace_entry *entry0, + struct trace_entry *next_entry) +{ + unsigned long abs_usecs, rel_usecs; + + abs_usecs = cycles_to_us(entry->timestamp - entry0->timestamp); + rel_usecs = cycles_to_us(next_entry->timestamp - entry->timestamp); + + if (trace_verbose) { + seq_printf(m, "%16s %5d %d %d %08x %08lx [%016Lx] %ld.%03ldms (+%ld.%03ldms): ", + pid_to_cmdline(entry->pid), + entry->pid, entry->cpu, entry->flags, + entry->preempt_count, trace_idx, + entry->timestamp, abs_usecs/1000, + abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000); + print_name_offset(m, entry->u.fn.eip); + seq_puts(m, " ("); + print_name_offset(m, entry->u.fn.parent_eip); + seq_puts(m, ")\n"); + } else { + print_generic(m, entry); + print_timestamp(m, abs_usecs, rel_usecs); + print_name(m, entry->u.fn.eip); + seq_puts(m, " ("); + print_name(m, entry->u.fn.parent_eip); + seq_puts(m, ")\n"); + } + return 0; +} + +static int notrace l_show_special(struct seq_file *m, unsigned long trace_idx, + struct trace_entry *entry, struct trace_entry *entry0, + struct trace_entry *next_entry, int mode64) +{ + unsigned long abs_usecs, rel_usecs; + + abs_usecs = cycles_to_us(entry->timestamp - entry0->timestamp); + rel_usecs = cycles_to_us(next_entry->timestamp - entry->timestamp); + + print_generic(m, entry); + print_timestamp(m, abs_usecs, rel_usecs); + if (trace_verbose) + print_name_offset(m, entry->u.special.eip); + else + print_name(m, entry->u.special.eip); + + if (!mode64) { + /* + * For convenience, print small numbers in decimal: + */ + if (abs((int)entry->u.special.v1) < 100000) + seq_printf(m, " (%5ld ", entry->u.special.v1); + else + seq_printf(m, " (%lx ", entry->u.special.v1); + if (abs((int)entry->u.special.v2) < 100000) + seq_printf(m, "%5ld ", entry->u.special.v2); + else + seq_printf(m, "%lx ", entry->u.special.v2); + if (abs((int)entry->u.special.v3) < 100000) + seq_printf(m, "%5ld)\n", entry->u.special.v3); + else + seq_printf(m, "%lx)\n", entry->u.special.v3); + } else { + seq_printf(m, " (%13Ld %ld)\n", + ((u64)entry->u.special.v1 << 32) + + (u64)entry->u.special.v2, entry->u.special.v3); + } + return 0; +} + +static int notrace +l_show_special_pid(struct seq_file *m, unsigned long trace_idx, + struct trace_entry *entry, struct trace_entry *entry0, + struct trace_entry *next_entry) +{ + unsigned long abs_usecs, rel_usecs; + unsigned int pid; + + pid = entry->u.special.v1; + + abs_usecs = cycles_to_us(entry->timestamp - entry0->timestamp); + rel_usecs = cycles_to_us(next_entry->timestamp - entry->timestamp); + + print_generic(m, entry); + print_timestamp(m, abs_usecs, rel_usecs); + if (trace_verbose) + print_name_offset(m, entry->u.special.eip); + else + print_name(m, entry->u.special.eip); + seq_printf(m, " <%.8s-%d> (%ld %ld)\n", + pid_to_cmdline(pid), pid, + entry->u.special.v2, entry->u.special.v3); + + return 0; +} + +static int notrace +l_show_special_sym(struct seq_file *m, unsigned long trace_idx, + struct trace_entry *entry, struct trace_entry *entry0, + struct trace_entry *next_entry, int mode64) +{ + unsigned long abs_usecs, rel_usecs; + + abs_usecs = cycles_to_us(entry->timestamp - entry0->timestamp); + rel_usecs = cycles_to_us(next_entry->timestamp - entry->timestamp); + + print_generic(m, entry); + print_timestamp(m, abs_usecs, rel_usecs); + if (trace_verbose) + print_name_offset(m, entry->u.special.eip); + else + print_name(m, entry->u.special.eip); + + seq_puts(m, "()<-"); + print_name(m, entry->u.special.v1); + seq_puts(m, "()<-"); + print_name(m, entry->u.special.v2); + seq_puts(m, "()<-"); + print_name(m, entry->u.special.v3); + seq_puts(m, "()\n"); + + return 0; +} + + +static int notrace l_show_cmdline(struct seq_file *m, unsigned long trace_idx, + struct trace_entry *entry, struct trace_entry *entry0, + struct trace_entry *next_entry) +{ + unsigned long abs_usecs, rel_usecs; + + if (!trace_verbose) + return 0; + + abs_usecs = cycles_to_us(entry->timestamp - entry0->timestamp); + rel_usecs = cycles_to_us(next_entry->timestamp - entry->timestamp); + + seq_printf(m, + "[ => %16s ] %ld.%03ldms (+%ld.%03ldms)\n", + entry->u.cmdline.str, + abs_usecs/1000, abs_usecs % 1000, + rel_usecs/1000, rel_usecs % 1000); + + return 0; +} + +extern unsigned long sys_call_table[NR_syscalls]; + +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86) +extern unsigned long ia32_sys_call_table[], ia32_syscall_end[]; +#define IA32_NR_syscalls (ia32_syscall_end - ia32_sys_call_table) +#endif + +static int notrace l_show_syscall(struct seq_file *m, unsigned long trace_idx, + struct trace_entry *entry, struct trace_entry *entry0, + struct trace_entry *next_entry) +{ + unsigned long abs_usecs, rel_usecs; + unsigned long nr; + + abs_usecs = cycles_to_us(entry->timestamp - entry0->timestamp); + rel_usecs = cycles_to_us(next_entry->timestamp - entry->timestamp); + + print_generic(m, entry); + print_timestamp_short(m, abs_usecs, rel_usecs); + + seq_puts(m, "> "); + nr = entry->u.syscall.nr; +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86) + if (nr & 0x80000000) { + nr &= ~0x80000000; + if (nr < IA32_NR_syscalls) + print_name(m, ia32_sys_call_table[nr]); + else + seq_printf(m, "", nr); + } else +#endif + if (nr < NR_syscalls) + print_name(m, sys_call_table[nr]); + else + seq_printf(m, "", nr); + +#ifdef CONFIG_64BIT + seq_printf(m, " (%016lx %016lx %016lx)\n", + entry->u.syscall.p1, entry->u.syscall.p2, entry->u.syscall.p3); +#else + seq_printf(m, " (%08lx %08lx %08lx)\n", + entry->u.syscall.p1, entry->u.syscall.p2, entry->u.syscall.p3); +#endif + + return 0; +} + +static int notrace l_show_sysret(struct seq_file *m, unsigned long trace_idx, + struct trace_entry *entry, struct trace_entry *entry0, + struct trace_entry *next_entry) +{ + unsigned long abs_usecs, rel_usecs; + + abs_usecs = cycles_to_us(entry->timestamp - entry0->timestamp); + rel_usecs = cycles_to_us(next_entry->timestamp - entry->timestamp); + + print_generic(m, entry); + print_timestamp_short(m, abs_usecs, rel_usecs); + + seq_printf(m, "< (%ld)\n", entry->u.sysret.ret); + + return 0; +} + + +static int notrace l_show(struct seq_file *m, void *p) +{ + struct cpu_trace *tr = out_tr.traces; + struct trace_entry *entry, *entry0, *next_entry; + unsigned long trace_idx; + + cond_resched(); + entry = p; + if (entry->timestamp < out_tr.first_timestamp) + return 0; + if (entry->timestamp > out_tr.last_timestamp) + return 0; + + entry0 = tr->trace; + trace_idx = entry - entry0; + + if (trace_idx + 1 < tr->trace_idx) + next_entry = entry + 1; + else + next_entry = entry; + + if (trace_verbose) + seq_printf(m, "(T%d/#%ld) ", entry->type, trace_idx); + + switch (entry->type) { + case TRACE_FN: + l_show_fn(m, trace_idx, entry, entry0, next_entry); + break; + case TRACE_SPECIAL: + l_show_special(m, trace_idx, entry, entry0, next_entry, 0); + break; + case TRACE_SPECIAL_PID: + l_show_special_pid(m, trace_idx, entry, entry0, next_entry); + break; + case TRACE_SPECIAL_U64: + l_show_special(m, trace_idx, entry, entry0, next_entry, 1); + break; + case TRACE_SPECIAL_SYM: + l_show_special_sym(m, trace_idx, entry, entry0, + next_entry, 1); + break; + case TRACE_CMDLINE: + l_show_cmdline(m, trace_idx, entry, entry0, next_entry); + break; + case TRACE_SYSCALL: + l_show_syscall(m, trace_idx, entry, entry0, next_entry); + break; + case TRACE_SYSRET: + l_show_sysret(m, trace_idx, entry, entry0, next_entry); + break; + default: + seq_printf(m, "unknown trace type %d\n", entry->type); + } + return 0; +} + +struct seq_operations latency_trace_op = { + .start = l_start, + .next = l_next, + .stop = l_stop, + .show = l_show +}; + +/* + * Copy the new maximum trace into the separate maximum-trace + * structure. (this way the maximum trace is permanently saved, + * for later retrieval via /proc/latency_trace) + */ +static void update_max_tr(struct cpu_trace *tr) +{ + struct cpu_trace *save; + int cpu, all_cpus = 0; + +#ifdef CONFIG_PREEMPT + WARN_ON(!preempt_count() && !irqs_disabled()); +#endif + + max_tr.cpu = tr->cpu; + save = max_tr.traces + tr->cpu; + + if ((wakeup_timing || trace_user_triggered || trace_print_on_crash || + print_functions) && trace_all_cpus) { + all_cpus = 1; + for_each_online_cpu(cpu) + atomic_inc(&cpu_traces[cpu].disabled); + } + + save->saved_latency = preempt_max_latency; + save->preempt_timestamp = tr->preempt_timestamp; + save->critical_start = tr->critical_start; + save->critical_end = tr->critical_end; + save->critical_sequence = tr->critical_sequence; + + memcpy(save->comm, current->comm, CMDLINE_BYTES); + save->pid = current->pid; + save->uid = current->uid; + save->nice = current->static_prio - 20 - MAX_RT_PRIO; + save->policy = current->policy; + save->rt_priority = current->rt_priority; + + if (all_cpus) { + for_each_online_cpu(cpu) { + copy_trace(max_tr.traces + cpu, cpu_traces + cpu, 1); + atomic_dec(&cpu_traces[cpu].disabled); + } + } else + copy_trace(save, tr, 1); +} + +#else /* !EVENT_TRACE */ + +static inline void notrace +____trace(int cpu, enum trace_type type, struct cpu_trace *tr, + unsigned long eip, unsigned long parent_eip, + unsigned long v1, unsigned long v2, unsigned long v3, + unsigned long flags) +{ +} + +static inline void notrace +___trace(enum trace_type type, unsigned long eip, unsigned long parent_eip, + unsigned long v1, unsigned long v2, + unsigned long v3) +{ +} + +static inline void notrace __trace(unsigned long eip, unsigned long parent_eip) +{ +} + +static inline void update_max_tr(struct cpu_trace *tr) +{ +} + +static inline void notrace _trace_cmdline(int cpu, struct cpu_trace *tr) +{ +} + +#endif + +static int setup_preempt_thresh(char *s) +{ + int thresh; + + get_option(&s, &thresh); + if (thresh > 0) { + preempt_thresh = usecs_to_cycles(thresh); + printk("Preemption threshold = %u us\n", thresh); + } + return 1; +} +__setup("preempt_thresh=", setup_preempt_thresh); + +static inline void notrace reset_trace_idx(int cpu, struct cpu_trace *tr) +{ + if (trace_all_cpus) + for_each_online_cpu(cpu) { + tr = cpu_traces + cpu; + tr->trace_idx = 0; + atomic_set(&tr->underrun, 0); + atomic_set(&tr->overrun, 0); + } + else{ + tr->trace_idx = 0; + atomic_set(&tr->underrun, 0); + atomic_set(&tr->overrun, 0); + } +} + +#ifdef CONFIG_CRITICAL_TIMING + +static void notrace +check_critical_timing(int cpu, struct cpu_trace *tr, unsigned long parent_eip) +{ + unsigned long latency, t0, t1; + cycle_t T0, T1, T2, delta; + unsigned long flags; + + if (trace_user_triggered) + return; + /* + * usecs conversion is slow so we try to delay the conversion + * as long as possible: + */ + T0 = tr->preempt_timestamp; + T1 = get_monotonic_cycles(); + delta = T1-T0; + + local_save_flags(flags); + + if (!report_latency(delta)) + goto out; + + ____trace(cpu, TRACE_FN, tr, CALLER_ADDR0, parent_eip, 0, 0, 0, flags); + /* + * Update the timestamp, because the trace entry above + * might change it (it can only get larger so the latency + * is fair to be reported): + */ + T2 = get_monotonic_cycles(); + + delta = T2-T0; + + latency = cycles_to_usecs(delta); + latency_hist(tr->latency_type, cpu, latency); + + if (latency_hist_flag) { + if (preempt_max_latency >= delta) + goto out; + } + + if (tr->critical_sequence != max_sequence || down_trylock(&max_mutex)) + goto out; + +#ifndef CONFIG_CRITICAL_LATENCY_HIST + if (!preempt_thresh && preempt_max_latency > delta) { + printk("bug: updating %016Lx > %016Lx?\n", + preempt_max_latency, delta); + printk(" [%016Lx %016Lx %016Lx]\n", T0, T1, T2); + } +#endif + + preempt_max_latency = delta; + t0 = cycles_to_usecs(T0); + t1 = cycles_to_usecs(T1); + + tr->critical_end = parent_eip; + + update_max_tr(tr); + +#ifndef CONFIG_CRITICAL_LATENCY_HIST + if (preempt_thresh) + printk("(%16s-%-5d|#%d): %lu us critical section " + "violates %lu us threshold.\n" + " => started at timestamp %lu: ", + current->comm, current->pid, + raw_smp_processor_id(), + latency, cycles_to_usecs(preempt_thresh), t0); + else + printk("(%16s-%-5d|#%d): new %lu us maximum-latency " + "critical section.\n => started at timestamp %lu: ", + current->comm, current->pid, + raw_smp_processor_id(), + latency, t0); + + print_symbol("<%s>\n", tr->critical_start); + printk(" => ended at timestamp %lu: ", t1); + print_symbol("<%s>\n", tr->critical_end); + dump_stack(); + t1 = cycles_to_usecs(get_monotonic_cycles()); + printk(" => dump-end timestamp %lu\n\n", t1); +#endif + + max_sequence++; + + up(&max_mutex); + +out: + tr->critical_sequence = max_sequence; + tr->preempt_timestamp = get_monotonic_cycles(); + tr->early_warning = 0; + reset_trace_idx(cpu, tr); + _trace_cmdline(cpu, tr); + ____trace(cpu, TRACE_FN, tr, CALLER_ADDR0, parent_eip, 0, 0, 0, flags); +} + +void notrace touch_critical_timing(void) +{ + int cpu = raw_smp_processor_id(); + struct cpu_trace *tr = cpu_traces + cpu; + + if (!tr->critical_start || atomic_read(&tr->disabled) || + trace_user_triggered || wakeup_timing) + return; + + if (preempt_count() > 0 && tr->critical_start) { + atomic_inc(&tr->disabled); + check_critical_timing(cpu, tr, CALLER_ADDR0); + tr->critical_start = CALLER_ADDR0; + tr->critical_sequence = max_sequence; + atomic_dec(&tr->disabled); + } +} +EXPORT_SYMBOL(touch_critical_timing); + +void notrace stop_critical_timing(void) +{ + struct cpu_trace *tr = cpu_traces + raw_smp_processor_id(); + + tr->critical_start = 0; +} +EXPORT_SYMBOL(stop_critical_timing); + +static inline void notrace +__start_critical_timing(unsigned long eip, unsigned long parent_eip, + int latency_type) +{ + int cpu = raw_smp_processor_id(); + struct cpu_trace *tr = cpu_traces + cpu; + unsigned long flags; + + if (tr->critical_start || atomic_read(&tr->disabled) || + trace_user_triggered || wakeup_timing) + return; + + atomic_inc(&tr->disabled); + + tr->critical_sequence = max_sequence; + tr->preempt_timestamp = get_monotonic_cycles(); + tr->critical_start = eip; + reset_trace_idx(cpu, tr); + tr->latency_type = latency_type; + _trace_cmdline(cpu, tr); + + local_save_flags(flags); + ____trace(cpu, TRACE_FN, tr, eip, parent_eip, 0, 0, 0, flags); + + atomic_dec(&tr->disabled); +} + +static inline void notrace +__stop_critical_timing(unsigned long eip, unsigned long parent_eip) +{ + int cpu = raw_smp_processor_id(); + struct cpu_trace *tr = cpu_traces + cpu; + unsigned long flags; + + if (!tr->critical_start || atomic_read(&tr->disabled) || + trace_user_triggered || wakeup_timing) + return; + + atomic_inc(&tr->disabled); + local_save_flags(flags); + ____trace(cpu, TRACE_FN, tr, eip, parent_eip, 0, 0, 0, flags); + check_critical_timing(cpu, tr, eip); + tr->critical_start = 0; + atomic_dec(&tr->disabled); +} + +#endif + +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + +#ifdef CONFIG_LOCKDEP + +void notrace time_hardirqs_on(unsigned long a0, unsigned long a1) +{ + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_off_preempt_count() && irqs_disabled_flags(flags)) + __stop_critical_timing(a0, a1); +} + +void notrace time_hardirqs_off(unsigned long a0, unsigned long a1) +{ + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_off_preempt_count() && irqs_disabled_flags(flags)) + __start_critical_timing(a0, a1, INTERRUPT_LATENCY); +} + +#else /* !CONFIG_LOCKDEP */ + +/* + * Dummy: + */ + +void early_boot_irqs_off(void) +{ +} + +void early_boot_irqs_on(void) +{ +} + +void trace_softirqs_on(unsigned long ip) +{ +} + +void trace_softirqs_off(unsigned long ip) +{ +} + +inline void print_irqtrace_events(struct task_struct *curr) +{ +} + +/* + * We are only interested in hardirq on/off events: + */ +void notrace trace_hardirqs_on(void) +{ + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_off_preempt_count() && irqs_disabled_flags(flags)) + __stop_critical_timing(CALLER_ADDR0, 0 /* CALLER_ADDR1 */); +} + +EXPORT_SYMBOL(trace_hardirqs_on); + +void notrace trace_hardirqs_off(void) +{ + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_off_preempt_count() && irqs_disabled_flags(flags)) + __start_critical_timing(CALLER_ADDR0, 0 /* CALLER_ADDR1 */, + INTERRUPT_LATENCY); +} + +EXPORT_SYMBOL(trace_hardirqs_off); + +#endif /* !CONFIG_LOCKDEP */ + +#endif /* CONFIG_CRITICAL_IRQSOFF_TIMING */ + +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_CRITICAL_TIMING) + +static inline unsigned long get_parent_eip(void) +{ + unsigned long parent_eip = CALLER_ADDR1; + + if (in_lock_functions(parent_eip)) { + parent_eip = CALLER_ADDR2; + if (in_lock_functions(parent_eip)) + parent_eip = CALLER_ADDR3; + } + + return parent_eip; +} + +void notrace add_preempt_count(unsigned int val) +{ + unsigned long eip = CALLER_ADDR0; + unsigned long parent_eip = get_parent_eip(); + +#ifdef CONFIG_DEBUG_PREEMPT + /* + * Underflow? + */ + if (DEBUG_WARN_ON(((int)preempt_count() < 0))) + return; + /* + * Spinlock count overflowing soon? + */ + if (DEBUG_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10)) + return; +#endif + + preempt_count() += val; +#ifdef CONFIG_PREEMPT_TRACE + if (val <= 10) { + unsigned int idx = preempt_count() & PREEMPT_MASK; + if (idx < MAX_PREEMPT_TRACE) { + current->preempt_trace_eip[idx] = eip; + current->preempt_trace_parent_eip[idx] = parent_eip; + } + } +#endif +#ifdef CONFIG_CRITICAL_PREEMPT_TIMING + { +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_disabled_flags(flags)) +#endif + if (preempt_count() == val) + __start_critical_timing(eip, parent_eip, + PREEMPT_LATENCY); + } +#endif + (void)eip, (void)parent_eip; +} +EXPORT_SYMBOL(add_preempt_count); + +void notrace sub_preempt_count(unsigned int val) +{ +#ifdef CONFIG_DEBUG_PREEMPT + /* + * Underflow? + */ + if (DEBUG_WARN_ON(unlikely(val > preempt_count()))) + return; + /* + * Is the spinlock portion underflowing? + */ + if (DEBUG_WARN_ON((val < PREEMPT_MASK) && + !(preempt_count() & PREEMPT_MASK))) + return; +#endif + +#ifdef CONFIG_CRITICAL_PREEMPT_TIMING + { +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_disabled_flags(flags)) +#endif + if (preempt_count() == val) + __stop_critical_timing(CALLER_ADDR0, + CALLER_ADDR1); + } +#endif + preempt_count() -= val; +} + +EXPORT_SYMBOL(sub_preempt_count); + +void notrace mask_preempt_count(unsigned int mask) +{ + unsigned long eip = CALLER_ADDR0; + unsigned long parent_eip = get_parent_eip(); + + preempt_count() |= mask; + +#ifdef CONFIG_CRITICAL_PREEMPT_TIMING + { +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_disabled_flags(flags)) +#endif + if (preempt_count() == mask) + __start_critical_timing(eip, parent_eip, + PREEMPT_LATENCY); + } +#endif + (void) eip, (void) parent_eip; +} +EXPORT_SYMBOL(mask_preempt_count); + +void notrace unmask_preempt_count(unsigned int mask) +{ +#ifdef CONFIG_CRITICAL_PREEMPT_TIMING + { +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_disabled_flags(flags)) +#endif + if (preempt_count() == mask) + __stop_critical_timing(CALLER_ADDR0, + CALLER_ADDR1); + } +#endif + preempt_count() &= ~mask; +} +EXPORT_SYMBOL(unmask_preempt_count); + + +#endif + +/* + * Wakeup latency timing/tracing. We get upcalls from the scheduler + * when a task is being woken up and we time/trace it until it gets + * to a CPU - or an even-higher-prio task supercedes it. (in that + * case we throw away the currently traced task - we dont try to + * handle nesting, that simplifies things significantly) + */ +#ifdef CONFIG_WAKEUP_TIMING + +static void notrace +check_wakeup_timing(struct cpu_trace *tr, unsigned long parent_eip, + unsigned long *flags) +{ + int cpu = raw_smp_processor_id(); + unsigned long latency, t0, t1; + cycle_t T0, T1, delta; + + if (trace_user_triggered) + return; + + atomic_inc(&tr->disabled); + if (atomic_read(&tr->disabled) != 1) + goto out; + + T0 = tr->preempt_timestamp; + T1 = get_monotonic_cycles(); + /* + * Any wraparound or time warp and we are out: + */ + if (T0 > T1) + goto out; + delta = T1-T0; + + if (!report_latency(delta)) + goto out; + + ____trace(smp_processor_id(), TRACE_FN, tr, CALLER_ADDR0, parent_eip, + 0, 0, 0, *flags); + + latency = cycles_to_usecs(delta); + latency_hist(tr->latency_type, cpu, latency); + + if (latency_hist_flag) { + if (preempt_max_latency >= delta) + goto out; + } + + if (tr->critical_sequence != max_sequence || down_trylock(&max_mutex)) + goto out; + +#ifndef CONFIG_WAKEUP_LATENCY_HIST + if (!preempt_thresh && preempt_max_latency > delta) { + printk("bug2: updating %016lx > %016Lx?\n", + preempt_max_latency, delta); + printk(" [%016Lx %016Lx]\n", T0, T1); + } +#endif + + preempt_max_latency = delta; + t0 = cycles_to_usecs(T0); + t1 = cycles_to_usecs(T1); + tr->critical_end = parent_eip; + + update_max_tr(tr); + + atomic_dec(&tr->disabled); + __raw_spin_unlock(&sch.trace_lock); + local_irq_restore(*flags); + +#ifndef CONFIG_WAKEUP_LATENCY_HIST + if (preempt_thresh) + printk("(%16s-%-5d|#%d): %lu us wakeup latency " + "violates %lu us threshold.\n", + current->comm, current->pid, + raw_smp_processor_id(), latency, + cycles_to_usecs(preempt_thresh)); + else + printk("(%16s-%-5d|#%d): new %lu us maximum-latency " + "wakeup.\n", current->comm, current->pid, + raw_smp_processor_id(), latency); +#endif + + max_sequence++; + + up(&max_mutex); + + return; + +out: + atomic_dec(&tr->disabled); + __raw_spin_unlock(&sch.trace_lock); + local_irq_restore(*flags); +} + +/* + * Start wakeup latency tracing - called with the runqueue held + * and interrupts disabled: + */ +void __trace_start_sched_wakeup(struct task_struct *p) +{ + struct cpu_trace *tr; + int cpu; + + if (trace_user_triggered || !wakeup_timing) { + trace_special_pid(p->pid, p->prio, -1); + return; + } + + __raw_spin_lock(&sch.trace_lock); + if (sch.task && (sch.task->prio <= p->prio)) + goto out_unlock; + + /* + * New highest-prio task just woke up - start tracing: + */ + sch.task = p; + cpu = task_cpu(p); + sch.cpu = cpu; + /* + * We keep using this CPU's trace buffer even if the task + * gets migrated to another CPU. Tracing only happens on + * the CPU that 'owns' the highest-prio task so it's + * fundamentally single-threaded. + */ + sch.tr = tr = cpu_traces + cpu; + reset_trace_idx(cpu, tr); + +// if (!atomic_read(&tr->disabled)) { + atomic_inc(&tr->disabled); + tr->critical_sequence = max_sequence; + tr->preempt_timestamp = get_monotonic_cycles(); + tr->latency_type = WAKEUP_LATENCY; + tr->critical_start = CALLER_ADDR0; + _trace_cmdline(raw_smp_processor_id(), tr); + atomic_dec(&tr->disabled); +// } + + mcount(); + trace_special_pid(p->pid, p->prio, cpu); + trace_special_sym(); +out_unlock: + __raw_spin_unlock(&sch.trace_lock); +} + +void trace_stop_sched_switched(struct task_struct *p) +{ + struct cpu_trace *tr; + unsigned long flags; + + if (trace_user_triggered || !wakeup_timing) + return; + + local_irq_save(flags); + __raw_spin_lock(&sch.trace_lock); + if (p == sch.task) { + trace_special_pid(p->pid, p->prio, task_cpu(p)); + + sch.task = NULL; + tr = sch.tr; + sch.tr = NULL; + WARN_ON(!tr); + /* auto-unlocks the spinlock: */ + check_wakeup_timing(tr, CALLER_ADDR0, &flags); + } else { + if (sch.task) + trace_special_pid(sch.task->pid, sch.task->prio, + p->prio); + if (sch.task && (sch.task->prio >= p->prio)) + sch.task = NULL; + __raw_spin_unlock(&sch.trace_lock); + } + local_irq_restore(flags); +} + +void trace_change_sched_cpu(struct task_struct *p, int new_cpu) +{ + unsigned long flags; + + if (!wakeup_timing) + return; + + trace_special_pid(p->pid, task_cpu(p), new_cpu); + trace_special_sym(); + local_irq_save(flags); + __raw_spin_lock(&sch.trace_lock); + if (p == sch.task && task_cpu(p) != new_cpu) { + sch.cpu = new_cpu; + trace_special(task_cpu(p), new_cpu, 0); + } + __raw_spin_unlock(&sch.trace_lock); + local_irq_restore(flags); +} + +#endif + +#ifdef CONFIG_EVENT_TRACE + +long user_trace_start(void) +{ + struct cpu_trace *tr; + unsigned long flags; + int cpu; + + if (!trace_user_triggered || trace_print_on_crash || print_functions) + return -EINVAL; + + /* + * If the user has not yet reset the max latency after + * bootup then we assume that this was the intention + * (we wont get any tracing done otherwise): + */ + if (preempt_max_latency == (cycle_t)ULONG_MAX) + preempt_max_latency = 0; + + /* + * user_trace_start() might be called from hardirq + * context, if trace_user_triggered_irq is set, so + * be careful about locking: + */ + if (preempt_count() || irqs_disabled()) { + if (down_trylock(&max_mutex)) + return -EAGAIN; + } else + down(&max_mutex); + + local_irq_save(flags); + cpu = smp_processor_id(); + tr = cpu_traces + cpu; + +#ifdef CONFIG_WAKEUP_TIMING + if (wakeup_timing) { + __raw_spin_lock(&sch.trace_lock); + sch.task = current; + sch.cpu = cpu; + sch.tr = tr; + __raw_spin_unlock(&sch.trace_lock); + } +#endif + reset_trace_idx(cpu, tr); + + tr->critical_sequence = max_sequence; + tr->preempt_timestamp = get_monotonic_cycles(); + tr->critical_start = CALLER_ADDR0; + _trace_cmdline(cpu, tr); + mcount(); + + WARN_ON(!irqs_disabled()); + local_irq_restore(flags); + + up(&max_mutex); + + return 0; +} + +EXPORT_SYMBOL_GPL(user_trace_start); + +long user_trace_stop(void) +{ + unsigned long latency = 0, flags; + struct cpu_trace *tr; + cycle_t delta; + + if (!trace_user_triggered || trace_print_on_crash || print_functions) + return -EINVAL; + + local_irq_save(flags); + mcount(); + +#ifdef CONFIG_WAKEUP_TIMING + if (wakeup_timing) { + struct task_struct *t; + + __raw_spin_lock(&sch.trace_lock); + t = sch.task; + if (current != t) { + __raw_spin_unlock(&sch.trace_lock); + local_irq_restore(flags); + printk("wrong stop: curr: %s/%d[%d] => %p\n", + current->comm, current->pid, + task_thread_info(current)->cpu, t); + if (t) + printk("wrong stop: curr: %s/%d[%d]\n", + t->comm, t->pid, + task_thread_info(t)->cpu); + return -EINVAL; + } + sch.task = NULL; + tr = sch.tr; + sch.tr = NULL; + __raw_spin_unlock(&sch.trace_lock); + } else +#endif + tr = cpu_traces + smp_processor_id(); + + atomic_inc(&tr->disabled); + if (tr->preempt_timestamp) { + cycle_t T0, T1; + unsigned long long tmp0; + + T0 = tr->preempt_timestamp; + T1 = get_monotonic_cycles(); + tmp0 = preempt_max_latency; + if (T1 < T0) + T0 = T1; + delta = T1 - T0; + if (!report_latency(delta)) + goto out; + if (tr->critical_sequence != max_sequence || + down_trylock(&max_mutex)) + goto out; + + WARN_ON(!preempt_thresh && preempt_max_latency > delta); + + preempt_max_latency = delta; + update_max_tr(tr); + + latency = cycles_to_usecs(delta); + + max_sequence++; + up(&max_mutex); +out: + tr->preempt_timestamp = 0; + } + atomic_dec(&tr->disabled); + local_irq_restore(flags); + + if (latency) { + if (preempt_thresh) + printk("(%16s-%-5d|#%d): %lu us user-latency " + "violates %lu us threshold.\n", + current->comm, current->pid, + raw_smp_processor_id(), latency, + cycles_to_usecs(preempt_thresh)); + else + printk("(%16s-%-5d|#%d): new %lu us user-latency.\n", + current->comm, current->pid, + raw_smp_processor_id(), latency); + } + + return 0; +} + +EXPORT_SYMBOL(user_trace_stop); + +static int trace_print_cpu = -1; + +void notrace stop_trace(void) +{ + if (trace_print_on_crash && trace_print_cpu == -1) { + trace_enabled = -1; + trace_print_cpu = raw_smp_processor_id(); + } +} + +EXPORT_SYMBOL(stop_trace); + +static void print_entry(struct trace_entry *entry, struct trace_entry *entry0) +{ + unsigned long abs_usecs; + int hardirq, softirq; + + abs_usecs = cycles_to_us(entry->timestamp - entry0->timestamp); + + printk("%-5d ", entry->pid); + + printk("%d%c%c", + entry->cpu, + (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : + (entry->flags & TRACE_FLAG_IRQS_HARD_OFF) ? 'D' : '.', + (entry->flags & TRACE_FLAG_NEED_RESCHED_DELAYED) ? 'n' : + ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); + + hardirq = entry->flags & TRACE_FLAG_HARDIRQ; + softirq = entry->flags & TRACE_FLAG_SOFTIRQ; + if (hardirq && softirq) + printk("H"); + else { + if (hardirq) + printk("h"); + else { + if (softirq) + printk("s"); + else + printk("."); + } + } + + if (entry->preempt_count) + printk(":%x ", entry->preempt_count); + else + printk(":. "); + + printk("%ld.%03ldms: ", abs_usecs/1000, abs_usecs % 1000); + + switch (entry->type) { + case TRACE_FN: + printk_name(entry->u.fn.eip); + printk(" <= ("); + printk_name(entry->u.fn.parent_eip); + printk(")\n"); + break; + case TRACE_SPECIAL: + printk(" special: %lx %lx %lx\n", + entry->u.special.v1, entry->u.special.v2, + entry->u.special.v3); + break; + case TRACE_SPECIAL_U64: + printk(" spec64: %lx%08lx %lx\n", + entry->u.special.v1, entry->u.special.v2, + entry->u.special.v3); + break; + } +} + +/* + * Print the current trace at crash time. + * + * We print it backwards, so that the newest (most interesting) entries + * are printed first. + */ +void print_last_trace(void) +{ + unsigned int idx0, idx, i, cpu; + struct cpu_trace *tr; + struct trace_entry *entry0, *entry; + + preempt_disable(); + cpu = smp_processor_id(); + if (trace_enabled != -1 || trace_print_cpu != cpu || + !trace_print_on_crash) { + if (trace_print_on_crash) + printk("skipping trace printing on CPU#%d != %d\n", + cpu, trace_print_cpu); + preempt_enable(); + return; + } + + trace_print_on_crash = 0; + + tr = cpu_traces + cpu; + if (!tr->trace) + goto out; + + printk("Last %ld trace entries:\n", MAX_TRACE); + idx0 = tr->trace_idx; + printk("curr idx: %d\n", idx0); + if (idx0 >= MAX_TRACE) + idx0 = 0; + idx = idx0; + entry0 = tr->trace + idx0; + + for (i = 0; i < MAX_TRACE; i++) { + if (idx == 0) + idx = MAX_TRACE-1; + else + idx--; + entry = tr->trace + idx; + switch (entry->type) { + case TRACE_FN: + case TRACE_SPECIAL: + case TRACE_SPECIAL_U64: + print_entry(entry, entry0); + break; + } + } + printk("printed %ld entries\n", MAX_TRACE); +out: + preempt_enable(); +} + +#ifdef CONFIG_SMP +/* + * On SMP, try to 'peek' on other CPU's traces and record them + * in this CPU's trace. This way we get a rough idea about what's + * going on there, without the overhead of global tracing. + * + * (no need to make this PER_CPU, we bounce it around anyway.) + */ +unsigned long nmi_eips[NR_CPUS]; +unsigned long nmi_flags[NR_CPUS]; + +void notrace nmi_trace(unsigned long eip, unsigned long parent_eip, + unsigned long flags) +{ + int cpu, this_cpu = smp_processor_id(); + + __trace(eip, parent_eip); + + nmi_eips[this_cpu] = parent_eip; + nmi_flags[this_cpu] = flags; + for (cpu = 0; cpu < NR_CPUS; cpu++) + if (cpu_online(cpu) && cpu != this_cpu) { + __trace(eip, nmi_eips[cpu]); + __trace(eip, nmi_flags[cpu]); + } +} +#else +/* + * On UP, NMI tracing is quite simple: + */ +void notrace nmi_trace(unsigned long eip, unsigned long parent_eip, + unsigned long flags) +{ + __trace(eip, parent_eip); +} +#endif + +#endif + +#ifdef CONFIG_PREEMPT_TRACE + +static void print_preempt_trace(struct task_struct *task) +{ + unsigned int count = task_thread_info(task)->preempt_count; + unsigned int i, lim = count & PREEMPT_MASK; + if (lim >= MAX_PREEMPT_TRACE) + lim = MAX_PREEMPT_TRACE-1; + printk("---------------------------\n"); + printk("| preempt count: %08x ]\n", count); + printk("| %d-level deep critical section nesting:\n", lim); + printk("----------------------------------------\n"); + for (i = 1; i <= lim; i++) { + printk(".. [<%08lx>] .... ", task->preempt_trace_eip[i]); + print_symbol("%s\n", task->preempt_trace_eip[i]); + printk(".....[<%08lx>] .. ( <= ", + task->preempt_trace_parent_eip[i]); + print_symbol("%s)\n", task->preempt_trace_parent_eip[i]); + } + printk("\n"); +} + +#endif + +#if defined(CONFIG_PREEMPT_TRACE) || defined(CONFIG_EVENT_TRACE) +void print_traces(struct task_struct *task) +{ + if (!task) + task = current; + +#ifdef CONFIG_PREEMPT_TRACE + print_preempt_trace(task); +#endif +#ifdef CONFIG_EVENT_TRACE + print_last_trace(); +#endif +} +#endif + +#ifdef CONFIG_EVENT_TRACE +/* + * Allocate all the per-CPU trace buffers and the + * save-maximum/save-output staging buffers: + */ +void __init init_tracer(void) +{ + unsigned long size, total_size = 0; + struct trace_entry *array; + struct cpu_trace *tr; + int cpu; + + printk("num_possible_cpus(): %d\n", num_possible_cpus()); + + size = sizeof(struct trace_entry)*MAX_TRACE; + + for_each_possible_cpu(cpu) { + tr = cpu_traces + cpu; + array = alloc_bootmem(size); + if (!array) { + printk(KERN_ERR + "CPU#%d: failed to allocate %ld bytes trace buffer!\n", + cpu, size); + } else { + printk(KERN_INFO + "CPU#%d: allocated %ld bytes trace buffer.\n", + cpu, size); + total_size += size; + } + tr->cpu = cpu; + tr->trace = array; + + array = alloc_bootmem(size); + if (!array) { + printk(KERN_ERR + "CPU#%d: failed to allocate %ld bytes max-trace buffer!\n", + cpu, size); + } else { + printk(KERN_INFO + "CPU#%d: allocated %ld bytes max-trace buffer.\n", + cpu, size); + total_size += size; + } + max_tr.traces[cpu].trace = array; + } + + /* + * The output trace buffer is a special one that only has + * trace entries for the first cpu-trace structure: + */ + size = sizeof(struct trace_entry)*MAX_TRACE*num_possible_cpus(); + array = alloc_bootmem(size); + if (!array) { + printk(KERN_ERR + "failed to allocate %ld bytes out-trace buffer!\n", + size); + } else { + printk(KERN_INFO "allocated %ld bytes out-trace buffer.\n", + size); + total_size += size; + } + out_tr.traces[0].trace = array; + printk(KERN_INFO + "tracer: a total of %ld bytes allocated.\n", + total_size); +} +#endif Index: linux/kernel/lockdep.c =================================================================== --- linux.orig/kernel/lockdep.c +++ linux/kernel/lockdep.c @@ -1036,7 +1036,7 @@ find_usage_forwards(struct lock_class *s * Return 1 otherwise and keep unchanged. * Return 0 on error. */ -static noinline int +static noinline notrace int find_usage_backwards(struct lock_class *source, unsigned int depth) { struct lock_list *entry; @@ -1586,7 +1586,7 @@ static inline int validate_chain(struct * We are building curr_chain_key incrementally, so double-check * it from scratch, to make sure that it's done correctly: */ -static void check_chain_key(struct task_struct *curr) +static void notrace check_chain_key(struct task_struct *curr) { #ifdef CONFIG_DEBUG_LOCKDEP struct held_lock *hlock, *prev_hlock = NULL; @@ -2009,7 +2009,7 @@ void early_boot_irqs_on(void) /* * Hardirqs will be enabled: */ -void trace_hardirqs_on(void) +void notrace trace_hardirqs_on(void) { struct task_struct *curr = current; unsigned long ip; @@ -2050,6 +2050,9 @@ void trace_hardirqs_on(void) curr->hardirq_enable_ip = ip; curr->hardirq_enable_event = ++curr->irq_events; debug_atomic_inc(&hardirqs_on_events); +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + time_hardirqs_on(CALLER_ADDR0, 0 /* CALLER_ADDR1 */); +#endif } EXPORT_SYMBOL(trace_hardirqs_on); @@ -2057,7 +2060,7 @@ EXPORT_SYMBOL(trace_hardirqs_on); /* * Hardirqs were disabled: */ -void trace_hardirqs_off(void) +void notrace trace_hardirqs_off(void) { struct task_struct *curr = current; @@ -2075,6 +2078,9 @@ void trace_hardirqs_off(void) curr->hardirq_disable_ip = _RET_IP_; curr->hardirq_disable_event = ++curr->irq_events; debug_atomic_inc(&hardirqs_off_events); +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + time_hardirqs_off(CALLER_ADDR0, 0 /* CALLER_ADDR1 */); +#endif } else debug_atomic_inc(&redundant_hardirqs_off); } @@ -2241,8 +2247,8 @@ static inline int separate_irq_context(s /* * Mark a lock with a usage bit, and validate the state transition: */ -static int mark_lock(struct task_struct *curr, struct held_lock *this, - enum lock_usage_bit new_bit) +static int notrace mark_lock(struct task_struct *curr, struct held_lock *this, + enum lock_usage_bit new_bit) { unsigned int new_mask = 1 << new_bit, ret = 1; @@ -2301,6 +2307,7 @@ static int mark_lock(struct task_struct * We must printk outside of the graph_lock: */ if (ret == 2) { + user_trace_stop(); printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); print_lock(this); print_irqtrace_events(curr); @@ -2648,7 +2655,7 @@ __lock_release(struct lockdep_map *lock, /* * Check whether we follow the irq-flags state precisely: */ -static void check_flags(unsigned long flags) +static notrace void check_flags(unsigned long flags) { #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) if (!debug_locks) @@ -2680,8 +2687,9 @@ static void check_flags(unsigned long fl * We are not always called with irqs disabled - do that here, * and also avoid lockdep recursion: */ -void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, unsigned long ip) +void notrace +lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, unsigned long ip) { unsigned long flags; @@ -2692,9 +2700,9 @@ void lock_acquire(struct lockdep_map *lo return; raw_local_irq_save(flags); + current->lockdep_recursion = 1; check_flags(flags); - current->lockdep_recursion = 1; __lock_acquire(lock, subclass, trylock, read, check, irqs_disabled_flags(flags), ip); current->lockdep_recursion = 0; @@ -2703,7 +2711,8 @@ void lock_acquire(struct lockdep_map *lo EXPORT_SYMBOL_GPL(lock_acquire); -void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) +void notrace +lock_release(struct lockdep_map *lock, int nested, unsigned long ip) { unsigned long flags; @@ -2714,8 +2723,8 @@ void lock_release(struct lockdep_map *lo return; raw_local_irq_save(flags); - check_flags(flags); current->lockdep_recursion = 1; + check_flags(flags); __lock_release(lock, nested, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); Index: linux/kernel/panic.c =================================================================== --- linux.orig/kernel/panic.c +++ linux/kernel/panic.c @@ -66,6 +66,8 @@ NORET_TYPE void panic(const char * fmt, unsigned long caller = (unsigned long) __builtin_return_address(0); #endif + stop_trace(); + /* * It's possible to come here directly from a panic-assertion and not * have preempt disabled. Some functions called from here want Index: linux/kernel/printk.c =================================================================== --- linux.orig/kernel/printk.c +++ linux/kernel/printk.c @@ -324,12 +324,14 @@ static void __call_console_drivers(unsig { struct console *con; + touch_critical_timing(); for (con = console_drivers; con; con = con->next) { if ((con->flags & CON_ENABLED) && con->write && (cpu_online(smp_processor_id()) || (con->flags & CON_ANYTIME))) con->write(con, &LOG_BUF(start), end - start); } + touch_critical_timing(); } static int __read_mostly ignore_loglevel; Index: linux/kernel/sched.c =================================================================== --- linux.orig/kernel/sched.c +++ linux/kernel/sched.c @@ -83,6 +83,10 @@ unsigned long long __attribute__((weak)) #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) +#define __PRIO(prio) \ + ((prio) <= 99 ? 199 - (prio) : (prio) - 120) + +#define PRIO(p) __PRIO((p)->prio) /* * 'User priority' is the nice value converted to something we * can work with better when scaling various scheduler parameters, @@ -306,6 +310,8 @@ static DEFINE_MUTEX(sched_hotcpu_mutex); static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) { rq->curr->sched_class->check_preempt_curr(rq, p); + if (p != rq->curr && p->prio < rq->curr->prio) + __trace_start_sched_wakeup(p); } static inline int cpu_of(struct rq *rq) @@ -969,6 +975,7 @@ unsigned long weighted_cpuload(const int static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) { #ifdef CONFIG_SMP + trace_change_sched_cpu(p, cpu); task_thread_info(p)->cpu = cpu; set_task_cfs_rq(p); #endif @@ -1539,14 +1546,19 @@ out: int fastcall wake_up_process(struct task_struct *p) { - return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | + int ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); + mcount(); + return ret; } EXPORT_SYMBOL(wake_up_process); int fastcall wake_up_state(struct task_struct *p, unsigned int state) { - return try_to_wake_up(p, state, 0); + int ret = try_to_wake_up(p, state, 0); + + mcount(); + return ret; } /* @@ -1791,6 +1803,7 @@ static inline void finish_task_switch(st finish_arch_switch(prev); finish_lock_switch(rq, prev); fire_sched_in_preempt_notifiers(current); + trace_stop_sched_switched(current); if (mm) mmdrop(mm); if (unlikely(prev_state == TASK_DEAD)) { @@ -1862,10 +1875,13 @@ context_switch(struct rq *rq, struct tas spin_release(&rq->lock.dep_map, 1, _THIS_IP_); #endif + trace_cmdline(); + /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); barrier(); + trace_special_pid(prev->pid, PRIO(prev), PRIO(current)); /* * this_rq must be evaluated again because prev may have moved * CPUs since it called schedule(), thus the 'rq' on its stack @@ -3309,41 +3325,39 @@ void scheduler_tick(void) #endif } -#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) +#if defined(CONFIG_EVENT_TRACE) && defined(CONFIG_DEBUG_RT_MUTEXES) -void fastcall add_preempt_count(int val) +static void trace_array(struct prio_array *array) { - /* - * Underflow? - */ - if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) - return; - preempt_count() += val; - /* - * Spinlock count overflowing soon? - */ - DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= - PREEMPT_MASK - 10); + int i; + struct task_struct *p; + struct list_head *head, *tmp; + + for (i = 0; i < MAX_RT_PRIO; i++) { + head = array->queue + i; + if (list_empty(head)) { + WARN_ON(test_bit(i, array->bitmap)); + continue; + } + WARN_ON(!test_bit(i, array->bitmap)); + list_for_each(tmp, head) { + p = list_entry(tmp, struct task_struct, run_list); + trace_special_pid(p->pid, p->prio, PRIO(p)); + } + } } -EXPORT_SYMBOL(add_preempt_count); -void fastcall sub_preempt_count(int val) +static inline void trace_all_runnable_tasks(struct rq *rq) { - /* - * Underflow? - */ - if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) - return; - /* - * Is the spinlock portion underflowing? - */ - if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && - !(preempt_count() & PREEMPT_MASK))) - return; + if (trace_enabled) + trace_array(&rq->active); +} - preempt_count() -= val; +#else + +static inline void trace_all_runnable_tasks(struct rq *rq) +{ } -EXPORT_SYMBOL(sub_preempt_count); #endif @@ -3454,6 +3468,8 @@ need_resched_nonpreemptible: prev->sched_class->put_prev_task(rq, prev, now); next = pick_next_task(rq, prev, now); + trace_all_runnable_tasks(rq); + sched_info_switch(prev, next); if (likely(prev != next)) { @@ -3462,8 +3478,10 @@ need_resched_nonpreemptible: ++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */ - } else + } else { spin_unlock_irq(&rq->lock); + trace_stop_sched_switched(next); + } if (unlikely(reacquire_kernel_lock(current) < 0)) { cpu = smp_processor_id(); @@ -3928,6 +3946,7 @@ void rt_mutex_setprio(struct task_struct check_preempt_curr(rq, p); } } + task_rq_unlock(rq, &flags); } @@ -6559,6 +6578,7 @@ void __might_sleep(char *file, int line) if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; + stop_trace(); printk(KERN_ERR "BUG: sleeping function called from invalid" " context at %s:%d\n", file, line); printk("in_atomic():%d, irqs_disabled():%d\n", Index: linux/kernel/sysctl.c =================================================================== --- linux.orig/kernel/sysctl.c +++ linux/kernel/sysctl.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -321,6 +323,132 @@ static ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, +#ifdef CONFIG_WAKEUP_TIMING + { + .ctl_name = CTL_UNNUMBERED, + .procname = "wakeup_timing", + .data = &wakeup_timing, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif +#if defined(CONFIG_WAKEUP_TIMING) || defined(CONFIG_EVENT_TRACE) + { + .ctl_name = CTL_UNNUMBERED, + .procname = "preempt_max_latency", + .data = &preempt_max_latency, + .maxlen = sizeof(preempt_max_latency), + .mode = 0644, + .proc_handler = &proc_doulongvec_minmax, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "preempt_thresh", + .data = &preempt_thresh, + .maxlen = sizeof(preempt_thresh), + .mode = 0644, + .proc_handler = &proc_doulongvec_minmax, + }, +#endif +#ifdef CONFIG_EVENT_TRACE + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_enabled", + .data = &trace_enabled, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "syscall_tracing", + .data = &syscall_tracing, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "stackframe_tracing", + .data = &stackframe_tracing, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "mcount_enabled", + .data = &mcount_enabled, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_user_triggered", + .data = &trace_user_triggered, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_user_trigger_irq", + .data = &trace_user_trigger_irq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_freerunning", + .data = &trace_freerunning, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_print_on_crash", + .data = &trace_print_on_crash, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_verbose", + .data = &trace_verbose, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_all_cpus", + .data = &trace_all_cpus, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_use_raw_cycles", + .data = &trace_use_raw_cycles, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "trace_all_runnable", + .data = &trace_all_runnable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif { .ctl_name = KERN_CORE_USES_PID, .procname = "core_uses_pid", Index: linux/kernel/time/timekeeping.c =================================================================== --- linux.orig/kernel/time/timekeeping.c +++ linux/kernel/time/timekeeping.c @@ -114,6 +114,33 @@ static inline void __get_realtime_clock_ timespec_add_ns(ts, nsecs); } +cycle_t notrace get_monotonic_cycles(void) +{ + cycle_t cycle_now, cycle_delta; + + /* read clocksource: */ + cycle_now = clocksource_read(clock); + + /* calculate the delta since the last update_wall_time: */ + cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; + + return clock->cycle_last + cycle_delta; +} + +unsigned long notrace cycles_to_usecs(cycle_t cycles) +{ + u64 ret = cyc2ns(clock, cycles); + + do_div(ret, 1000); + + return ret; +} + +cycle_t notrace usecs_to_cycles(unsigned long usecs) +{ + return ns2cyc(clock, (u64)usecs * 1000); +} + /** * getnstimeofday - Returns the time of day in a timespec * @ts: pointer to the timespec to be set Index: linux/lib/Kconfig.debug =================================================================== --- linux.orig/lib/Kconfig.debug +++ linux/lib/Kconfig.debug @@ -332,6 +332,192 @@ config STACKTRACE depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT +config PREEMPT_TRACE + bool + default y + depends on DEBUG_PREEMPT + +config EVENT_TRACE + bool "Kernel event tracing" + default n + depends on GENERIC_TIME + select FRAME_POINTER + select STACKTRACE + help + This option enables a kernel tracing mechanism that will track + certain kernel events such as system call entry and return, + IRQ entry, context-switching, etc. + + Run the scripts/trace-it utility on a kernel with this option + enabled for sample output. + +config FUNCTION_TRACE + bool "Kernel function call tracing" + default n + depends on !REORDER + select EVENT_TRACE + help + This option enables a kernel tracing mechanism that will track + precise function-call granularity kernel execution. Sample + output: + + pcscd-1772 0D..2 6867us : deactivate_task (-2 1) + pcscd-1772 0D..2 6867us : dequeue_task (deactivate_task) + -0 0D..2 6870us : __switch_to (__schedule) + -0 0D..2 6871us : __schedule (-2 20) + -0 0D..2 6871us : __lock_acquire (lock_acquire) + -0 0D..2 6872us : __spin_unlock_irq (__schedule) + + Run the scripts/trace-it sample utility on a kernel with this + option enabled to capture 1 second worth of events. + + (Note that kernel size and overhead increases noticeably + with this option enabled.) + +config WAKEUP_TIMING + bool "Wakeup latency timing" + depends on GENERIC_TIME + help + This option measures the time spent from a highprio thread being + woken up to it getting scheduled on a CPU, with microsecond + accuracy. + + The default measurement method is a maximum search, which is + disabled by default and can be runtime (re-)started via: + + echo 0 > /proc/sys/kernel/preempt_max_latency + +config LATENCY_TRACE + bool "Latency tracing" + default n + depends on LATENCY_TIMING && !REORDER && GENERIC_TIME + select FRAME_POINTER + select FUNCTION_TRACE + help + When this option is enabled then the last maximum latency timing + event's full trace can be found in /proc/latency_trace, in a + human-readable (or rather as some would say, in a + kernel-developer-readable) form. + + (Note that kernel size and overhead increases noticeably + with this option enabled.) + +config CRITICAL_PREEMPT_TIMING + bool "Non-preemptible critical section latency timing" + default n + depends on PREEMPT + depends on GENERIC_TIME + help + This option measures the time spent in preempt-off critical + sections, with microsecond accuracy. + + The default measurement method is a maximum search, which is + disabled by default and can be runtime (re-)started via: + + echo 0 > /proc/sys/kernel/preempt_max_latency + + (Note that kernel size and overhead increases with this option + enabled. This option and the irqs-off timing option can be + used together or separately.) + +config CRITICAL_IRQSOFF_TIMING + bool "Interrupts-off critical section latency timing" + default n + depends on GENERIC_TIME + select TRACE_IRQFLAGS + help + This option measures the time spent in irqs-off critical + sections, with microsecond accuracy. + + The default measurement method is a maximum search, which is + disabled by default and can be runtime (re-)started via: + + echo 0 > /proc/sys/kernel/preempt_max_latency + + (Note that kernel size and overhead increases with this option + enabled. This option and the preempt-off timing option can be + used together or separately.) + +config WAKEUP_LATENCY_HIST + bool "wakeup latency histogram" + default n + depends on WAKEUP_TIMING + help + This option logs all the wakeup latency timing to a big histogram + bucket, in the meanwhile, it also dummies up printk produced by + wakeup latency timing. + + The wakeup latency timing histogram can be viewed via: + + cat /proc/latency_hist/wakeup_latency/CPU* + + (Note: * presents CPU ID.) + +config PREEMPT_OFF_HIST + bool "non-preemptible critical section latency histogram" + default n + depends on CRITICAL_PREEMPT_TIMING + help + This option logs all the non-preemptible critical section latency + timing to a big histogram bucket, in the meanwhile, it also + dummies up printk produced by non-preemptible critical section + latency timing. + + The non-preemptible critical section latency timing histogram can + be viewed via: + + cat /proc/latency_hist/preempt_off_latency/CPU* + + (Note: * presents CPU ID.) + +config INTERRUPT_OFF_HIST + bool "interrupts-off critical section latency histogram" + default n + depends on CRITICAL_IRQSOFF_TIMING + help + This option logs all the interrupts-off critical section latency + timing to a big histogram bucket, in the meanwhile, it also + dummies up printk produced by interrupts-off critical section + latency timing. + + The interrupts-off critical section latency timing histogram can + be viewed via: + + cat /proc/latency_hist/interrupt_off_latency/CPU* + + (Note: * presents CPU ID.) + +config CRITICAL_TIMING + bool + default y + depends on CRITICAL_PREEMPT_TIMING || CRITICAL_IRQSOFF_TIMING + +config DEBUG_TRACE_IRQFLAGS + bool + default y + depends on CRITICAL_IRQSOFF_TIMING + +config LATENCY_TIMING + bool + default y + depends on WAKEUP_TIMING || CRITICAL_TIMING + select SYSCTL + +config CRITICAL_LATENCY_HIST + bool + default y + depends on PREEMPT_OFF_HIST || INTERRUPT_OFF_HIST + +config LATENCY_HIST + bool + default y + depends on WAKEUP_LATENCY_HIST || CRITICAL_LATENCY_HIST + +config MCOUNT + bool + depends on FUNCTION_TRACE + default y + config DEBUG_KOBJECT bool "kobject debugging" depends on DEBUG_KERNEL Index: linux/lib/debug_locks.c =================================================================== --- linux.orig/lib/debug_locks.c +++ linux/lib/debug_locks.c @@ -10,6 +10,7 @@ */ #include #include +#include #include #include #include @@ -36,7 +37,14 @@ int debug_locks_silent; int debug_locks_off(void) { if (xchg(&debug_locks, 0)) { +#ifdef CONFIG_DEBUG_RT_MUTEXES + if (spin_is_locked(¤t->pi_lock)) + spin_unlock(¤t->pi_lock); +#endif if (!debug_locks_silent) { + stop_trace(); + user_trace_stop(); + printk("stopped custom tracer.\n"); console_verbose(); return 1; } Index: linux/scripts/Makefile =================================================================== --- linux.orig/scripts/Makefile +++ linux/scripts/Makefile @@ -7,6 +7,7 @@ # conmakehash: Create chartable # conmakehash: Create arrays for initializing the kernel console tables +hostprogs-$(CONFIG_EVENT_TRACE) += trace-it hostprogs-$(CONFIG_KALLSYMS) += kallsyms hostprogs-$(CONFIG_LOGO) += pnmtologo hostprogs-$(CONFIG_VT) += conmakehash Index: linux/scripts/trace-it.c =================================================================== --- /dev/null +++ linux/scripts/trace-it.c @@ -0,0 +1,79 @@ + +/* + * Copyright (C) 2005, Ingo Molnar + * + * user-triggered tracing. + * + * The -rt kernel has a built-in kernel tracer, which will trace + * all kernel function calls (and a couple of special events as well), + * by using a build-time gcc feature that instruments all kernel + * functions. + * + * The tracer is highly automated for a number of latency tracing purposes, + * but it can also be switched into 'user-triggered' mode, which is a + * half-automatic tracing mode where userspace apps start and stop the + * tracer. This file shows a dumb example how to turn user-triggered + * tracing on, and how to start/stop tracing. Note that if you do + * multiple start/stop sequences, the kernel will do a maximum search + * over their latencies, and will keep the trace of the largest latency + * in /proc/latency_trace. The maximums are also reported to the kernel + * log. (but can also be read from /proc/sys/kernel/preempt_max_latency) + * + * For the tracer to be activated, turn on CONFIG_EVENT_TRACING + * in the .config, rebuild the kernel and boot into it. The trace will + * get _alot_ more verbose if you also turn on CONFIG_FUNCTION_TRACING, + * every kernel function call will be put into the trace. Note that + * CONFIG_FUNCTION_TRACING has significant runtime overhead, so you dont + * want to use it for performance testing :) + */ + +#include +#include +#include +#include +#include +#include +#include + +int main (int argc, char **argv) +{ + int ret; + + if (getuid() != 0) { + fprintf(stderr, "needs to run as root.\n"); + exit(1); + } + ret = system("cat /proc/sys/kernel/mcount_enabled >/dev/null 2>/dev/null"); + if (ret) { + fprintf(stderr, "CONFIG_LATENCY_TRACING not enabled?\n"); + exit(1); + } + system("echo 1 > /proc/sys/kernel/trace_user_triggered"); + system("[ -e /proc/sys/kernel/wakeup_timing ] && echo 0 > /proc/sys/kernel/wakeup_timing"); + system("echo 1 > /proc/sys/kernel/trace_enabled"); + system("echo 1 > /proc/sys/kernel/mcount_enabled"); + system("echo 0 > /proc/sys/kernel/trace_freerunning"); + system("echo 0 > /proc/sys/kernel/trace_print_on_crash"); + system("echo 0 > /proc/sys/kernel/trace_verbose"); + system("echo 0 > /proc/sys/kernel/preempt_thresh 2>/dev/null"); + system("echo 0 > /proc/sys/kernel/preempt_max_latency 2>/dev/null"); + + // start tracing + if (prctl(0, 1)) { + fprintf(stderr, "trace-it: couldnt start tracing!\n"); + return 1; + } + usleep(10000000); + if (prctl(0, 0)) { + fprintf(stderr, "trace-it: couldnt stop tracing!\n"); + return 1; + } + + system("echo 0 > /proc/sys/kernel/trace_user_triggered"); + system("echo 0 > /proc/sys/kernel/trace_enabled"); + system("cat /proc/latency_trace"); + + return 0; +} + + patches/clockevents-allow-build-without-runtime-use.patch0000664000077200007720000000535210655544570023267 0ustar mingomingoSubject: clockevents: Allow build w/o run-tine usage for migration purposes Migration aid to allow preparatory patches which introduce not yet used parts of clock events code. Signed-off-by: Thomas Gleixner --- include/linux/clockchips.h | 8 ++++++-- kernel/time/Kconfig | 5 +++++ kernel/time/Makefile | 2 +- kernel/time/clockevents.c | 3 ++- 4 files changed, 14 insertions(+), 4 deletions(-) Index: linux/include/linux/clockchips.h =================================================================== --- linux.orig/include/linux/clockchips.h +++ linux/include/linux/clockchips.h @@ -8,7 +8,7 @@ #ifndef _LINUX_CLOCKCHIPS_H #define _LINUX_CLOCKCHIPS_H -#ifdef CONFIG_GENERIC_CLOCKEVENTS +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD #include #include @@ -127,9 +127,13 @@ extern void clockevents_unregister_notif extern int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, ktime_t now); +#ifdef CONFIG_GENERIC_CLOCKEVENTS extern void clockevents_notify(unsigned long reason, void *arg); - #else +# define clockevents_notify(reason, arg) do { } while (0) +#endif + +#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ #define clockevents_notify(reason, arg) do { } while (0) Index: linux/kernel/time/Kconfig =================================================================== --- linux.orig/kernel/time/Kconfig +++ linux/kernel/time/Kconfig @@ -23,3 +23,8 @@ config HIGH_RES_TIMERS hardware is not capable then this option only increases the size of the kernel image. +config GENERIC_CLOCKEVENTS_BUILD + bool + default y + depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR + Index: linux/kernel/time/Makefile =================================================================== --- linux.orig/kernel/time/Makefile +++ linux/kernel/time/Makefile @@ -1,6 +1,6 @@ obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o -obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o +obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o Index: linux/kernel/time/clockevents.c =================================================================== --- linux.orig/kernel/time/clockevents.c +++ linux/kernel/time/clockevents.c @@ -204,6 +204,7 @@ void clockevents_exchange_device(struct local_irq_restore(flags); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS /** * clockevents_notify - notification about relevant events */ @@ -232,4 +233,4 @@ void clockevents_notify(unsigned long re spin_unlock(&clockevents_lock); } EXPORT_SYMBOL_GPL(clockevents_notify); - +#endif patches/move-native-irq.patch0000664000077200007720000000165610655544571015611 0ustar mingomingo--- kernel/irq/migration.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) Index: linux/kernel/irq/migration.c =================================================================== --- linux.orig/kernel/irq/migration.c +++ linux/kernel/irq/migration.c @@ -61,6 +61,7 @@ void move_masked_irq(int irq) void move_native_irq(int irq) { struct irq_desc *desc = irq_desc + irq; + int mask = 1; if (likely(!(desc->status & IRQ_MOVE_PENDING))) return; @@ -68,8 +69,17 @@ void move_native_irq(int irq) if (unlikely(desc->status & IRQ_DISABLED)) return; - desc->chip->mask(irq); + /* + * If the irq is already in progress, it should be masked. + * If we unmask it, we might cause an interrupt storm on RT. + */ + if (unlikely(desc->status & IRQ_INPROGRESS)) + mask = 0; + + if (mask) + desc->chip->mask(irq); move_masked_irq(irq); - desc->chip->unmask(irq); + if (mask) + desc->chip->unmask(irq); } patches/arm-fix-atomic-cmpxchg.patch0000664000077200007720000000121010655544573017016 0ustar mingomingo--- include/asm-arm/atomic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/include/asm-arm/atomic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/atomic.h +++ linux-rt-rebase.q/include/asm-arm/atomic.h @@ -189,10 +189,10 @@ static inline unsigned long __cmpxchg(vo volatile unsigned long *p = ptr; if (size == 4) { - local_irq_save(flags); + raw_local_irq_save(flags); if ((prev = *p) == old) *p = new; - local_irq_restore(flags); + raw_local_irq_restore(flags); return(prev); } else return wrong_size_cmpxchg(ptr); patches/cputimer-thread-rt-fix.patch0000664000077200007720000000315010655544574017064 0ustar mingomingo--- kernel/posix-cpu-timers.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) Index: linux-rt-rebase.q/kernel/posix-cpu-timers.c =================================================================== --- linux-rt-rebase.q.orig/kernel/posix-cpu-timers.c +++ linux-rt-rebase.q/kernel/posix-cpu-timers.c @@ -1292,18 +1292,6 @@ void __run_posix_cpu_timers(struct task_ LIST_HEAD(firing); struct k_itimer *timer, *next; - -#define UNEXPIRED(clock) \ - (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ - cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires)) - - if (UNEXPIRED(prof) && UNEXPIRED(virt) && - (tsk->it_sched_expires == 0 || - tsk->se.sum_exec_runtime < tsk->it_sched_expires)) - return; - -#undef UNEXPIRED - /* * Double-check with locks held. */ @@ -1428,6 +1416,19 @@ void run_posix_cpu_timers(struct task_st BUG_ON(!irqs_disabled()); if(!per_cpu(posix_timer_task, cpu)) return; + + +#define UNEXPIRED(clock) \ + (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ + cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires)) + + if (UNEXPIRED(prof) && UNEXPIRED(virt) && + (tsk->it_sched_expires == 0 || + tsk->sum_exec_runtime < tsk->it_sched_expires)) + return; + +#undef UNEXPIRED + /* get per-cpu references */ tasklist = per_cpu(posix_timer_tasklist, cpu); @@ -1446,7 +1447,7 @@ void run_posix_cpu_timers(struct task_st per_cpu(posix_timer_tasklist, cpu) = tsk; } /* XXX signal the thread somehow */ - wake_up_process(per_cpu(posix_timer_task,cpu)); + wake_up_process(per_cpu(posix_timer_task, cpu)); } patches/preempt-realtime-ppc-more-resched-fixups.patch0000664000077200007720000000544110655544574022510 0ustar mingomingo--- arch/powerpc/kernel/entry_64.S | 16 +++++++++++----- arch/powerpc/kernel/idle.c | 4 ++-- include/asm-powerpc/thread_info.h | 3 ++- 3 files changed, 15 insertions(+), 8 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/kernel/entry_64.S =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/entry_64.S +++ linux-rt-rebase.q/arch/powerpc/kernel/entry_64.S @@ -452,7 +452,8 @@ _GLOBAL(ret_from_except_lite) #ifdef CONFIG_PREEMPT clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ - li r0,_TIF_NEED_RESCHED /* bits to check */ + li r0,(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) + /* bits to check */ ld r3,_MSR(r1) ld r4,TI_FLAGS(r9) /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ @@ -560,16 +561,21 @@ do_work: cmpdi r0,0 crandc eq,cr1*4+eq,eq bne restore + /* here we are preempting the current task */ 1: - /* preempt_schedule_irq() expects interrupts disabled. */ - bl .preempt_schedule_irq + li r0,1 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + ori r10,r10,MSR_EE + mtmsrd r10,1 /* reenable interrupts */ + bl .preempt_schedule mfmsr r10 clrrdi r9,r1,THREAD_SHIFT rldicl r10,r10,48,1 /* disable interrupts again */ rotldi r10,r10,16 mtmsrd r10,1 ld r4,TI_FLAGS(r9) - andi. r0,r4,_TIF_NEED_RESCHED + andi. r0,r4,(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) bne 1b b restore @@ -584,7 +590,7 @@ user_work: ori r10,r10,MSR_EE mtmsrd r10,1 - andi. r0,r4,_TIF_NEED_RESCHED + andi. r0,r4,(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) beq 1f bl .schedule b .ret_from_except_lite Index: linux-rt-rebase.q/arch/powerpc/kernel/idle.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/idle.c +++ linux-rt-rebase.q/arch/powerpc/kernel/idle.c @@ -61,8 +61,8 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_stop_sched_tick(); - - while (!need_resched() && !cpu_should_die()) { + while (!need_resched() && !need_resched_delayed() && + !cpu_should_die()) { ppc64_runlatch_off(); if (ppc_md.power_save) { Index: linux-rt-rebase.q/include/asm-powerpc/thread_info.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/thread_info.h +++ linux-rt-rebase.q/include/asm-powerpc/thread_info.h @@ -150,7 +150,8 @@ static inline struct thread_info *curren #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) #define _TIF_USER_WORK_MASK ( _TIF_SIGPENDING | \ - _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) + _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK | \ + _TIF_NEED_RESCHED_DELAYED) #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) /* Bits in local_flags */ patches/x86_64-prep-idle-loop-for-dynticks.patch0000664000077200007720000000230210655544570020750 0ustar mingomingoSubject: x86_64: prepare idle loop for dynamic ticks From: Chris Wright Add tick_nohz_{stop,restart}_sched_tick to idle loop in prepartion for turning on dynticks. These are just noops until NO_HZ is enabled. Signed-off-by: Chris Wright Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/process.c | 4 ++++ 1 file changed, 4 insertions(+) Index: linux/arch/x86_64/kernel/process.c =================================================================== --- linux.orig/arch/x86_64/kernel/process.c +++ linux/arch/x86_64/kernel/process.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -208,6 +209,8 @@ void cpu_idle (void) if (__get_cpu_var(cpu_idle_state)) __get_cpu_var(cpu_idle_state) = 0; + tick_nohz_stop_sched_tick(); + check_pgt_cache(); rmb(); idle = pm_idle; @@ -229,6 +232,7 @@ void cpu_idle (void) __exit_idle(); } + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); patches/preempt-realtime-net-softirq-fixups.patch0000664000077200007720000000274310655544575021631 0ustar mingomingoSubject: NOHZ: local_softirq_pending with tickless From: Mikulas Patocka quota += dev->weight; else dev->quota = dev->weight; - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + raise_softirq_irqoff(NET_RX_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__netif_rx_schedule); @@ -2152,7 +2152,7 @@ out: softnet_break: __get_cpu_var(netdev_rx_stat).time_squeeze++; - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + raise_softirq_irqoff(NET_RX_SOFTIRQ); goto out; } patches/irda-fix.patch0000664000077200007720000000176610655544576014300 0ustar mingomingoThis was found around the 2.6.10 timeframe when testing with the -rt patch and I believe is still is an issue. irttp_dup() does a memcpy() of the tsap_cb structure causing the spinlock protecting various fields in the structure to be duped. This works OK in the non-RT case but in the RT case we end up with two mutexes pointing to the same wait_list and leading to an OOPS. Fix is to simply initialize the spinlock after the memcpy(). Signed-off-by: Deepak Saxena --- net/irda/irttp.c | 1 + 1 file changed, 1 insertion(+) Index: linux-rt-rebase.q/net/irda/irttp.c =================================================================== --- linux-rt-rebase.q.orig/net/irda/irttp.c +++ linux-rt-rebase.q/net/irda/irttp.c @@ -1453,6 +1453,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb } /* Dup */ memcpy(new, orig, sizeof(struct tsap_cb)); + spin_lock_init(&new->lock); /* We don't need the old instance any more */ spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); patches/trace-with-caller-addr.patch0000664000077200007720000000662410655544572017006 0ustar mingomingo--- arch/x86_64/lib/thunk.S | 18 ++++++++++++++++-- kernel/latency_trace.c | 22 ++++++++++++++++++++++ kernel/lockdep.c | 16 ++++++++++++---- 3 files changed, 50 insertions(+), 6 deletions(-) Index: linux/arch/x86_64/lib/thunk.S =================================================================== --- linux.orig/arch/x86_64/lib/thunk.S +++ linux/arch/x86_64/lib/thunk.S @@ -47,8 +47,22 @@ thunk __up_wakeup,__up #ifdef CONFIG_TRACE_IRQFLAGS - thunk trace_hardirqs_on_thunk,trace_hardirqs_on - thunk trace_hardirqs_off_thunk,trace_hardirqs_off + /* put return address in rdi (arg1) */ + .macro thunk_ra name,func + .globl \name +\name: + CFI_STARTPROC + SAVE_ARGS + /* SAVE_ARGS pushs 9 elements */ + /* the next element would be the rip */ + movq 9*8(%rsp), %rdi + call \func + jmp restore + CFI_ENDPROC + .endm + + thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller + thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller #endif /* SAVE_ARGS below is used only for the .cfi directives it contains. */ Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -1984,6 +1984,28 @@ void notrace trace_hardirqs_off(void) EXPORT_SYMBOL(trace_hardirqs_off); +/* used by x86_64 thunk.S */ +void notrace trace_hardirqs_on_caller(unsigned long caller_addr) +{ + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_off_preempt_count() && irqs_disabled_flags(flags)) + __stop_critical_timing(caller_addr, 0 /* CALLER_ADDR1 */); +} + +void notrace trace_hardirqs_off_caller(unsigned long caller_addr) +{ + unsigned long flags; + + local_save_flags(flags); + + if (!irqs_off_preempt_count() && irqs_disabled_flags(flags)) + __start_critical_timing(caller_addr, 0 /* CALLER_ADDR1 */, + INTERRUPT_LATENCY); +} + #endif /* !CONFIG_LOCKDEP */ #endif /* CONFIG_CRITICAL_IRQSOFF_TIMING */ Index: linux/kernel/lockdep.c =================================================================== --- linux.orig/kernel/lockdep.c +++ linux/kernel/lockdep.c @@ -2009,7 +2009,7 @@ void early_boot_irqs_on(void) /* * Hardirqs will be enabled: */ -void notrace trace_hardirqs_on(void) +void notrace trace_hardirqs_on_caller(unsigned long a0) { struct task_struct *curr = current; unsigned long ip; @@ -2051,16 +2051,20 @@ void notrace trace_hardirqs_on(void) curr->hardirq_enable_event = ++curr->irq_events; debug_atomic_inc(&hardirqs_on_events); #ifdef CONFIG_CRITICAL_IRQSOFF_TIMING - time_hardirqs_on(CALLER_ADDR0, 0 /* CALLER_ADDR1 */); + time_hardirqs_on(a0, 0 /* CALLER_ADDR1 */); #endif } +void notrace trace_hardirqs_on(void) { + trace_hardirqs_on_caller(CALLER_ADDR0); +} + EXPORT_SYMBOL(trace_hardirqs_on); /* * Hardirqs were disabled: */ -void notrace trace_hardirqs_off(void) +void notrace trace_hardirqs_off_caller(unsigned long a0) { struct task_struct *curr = current; @@ -2079,12 +2083,16 @@ void notrace trace_hardirqs_off(void) curr->hardirq_disable_event = ++curr->irq_events; debug_atomic_inc(&hardirqs_off_events); #ifdef CONFIG_CRITICAL_IRQSOFF_TIMING - time_hardirqs_off(CALLER_ADDR0, 0 /* CALLER_ADDR1 */); + time_hardirqs_off(a0, 0 /* CALLER_ADDR1 */); #endif } else debug_atomic_inc(&redundant_hardirqs_off); } +void notrace trace_hardirqs_off(void) { + trace_hardirqs_off_caller(CALLER_ADDR0); +} + EXPORT_SYMBOL(trace_hardirqs_off); /* patches/serial-slow-machines.patch0000664000077200007720000000323510655544574016612 0ustar mingomingo--- drivers/char/tty_io.c | 4 ++++ drivers/serial/8250.c | 11 ++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/drivers/char/tty_io.c =================================================================== --- linux-rt-rebase.q.orig/drivers/char/tty_io.c +++ linux-rt-rebase.q/drivers/char/tty_io.c @@ -3648,10 +3648,14 @@ void tty_flip_buffer_push(struct tty_str tty->buf.tail->commit = tty->buf.tail->used; spin_unlock_irqrestore(&tty->buf.lock, flags); +#ifndef CONFIG_PREEMPT_RT if (tty->low_latency) flush_to_ldisc(&tty->buf.work.work); else schedule_delayed_work(&tty->buf.work, 1); +#else + flush_to_ldisc(&tty->buf.work.work); +#endif } EXPORT_SYMBOL(tty_flip_buffer_push); Index: linux-rt-rebase.q/drivers/serial/8250.c =================================================================== --- linux-rt-rebase.q.orig/drivers/serial/8250.c +++ linux-rt-rebase.q/drivers/serial/8250.c @@ -1451,7 +1451,10 @@ static irqreturn_t serial8250_interrupt( { struct irq_info *i = dev_id; struct list_head *l, *end = NULL; - int pass_counter = 0, handled = 0; +#ifndef CONFIG_PREEMPT_RT + int pass_counter = 0; +#endif + int handled = 0; DEBUG_INTR("serial8250_interrupt(%d)...", irq); @@ -1489,12 +1492,18 @@ static irqreturn_t serial8250_interrupt( l = l->next; + /* + * On preempt-rt we can be preempted and run in our + * own thread. + */ +#ifndef CONFIG_PREEMPT_RT if (l == i->head && pass_counter++ > PASS_LIMIT) { /* If we hit this, we're dead. */ printk(KERN_ERR "serial8250: too much work for " "irq%d\n", irq); break; } +#endif } while (l != end); spin_unlock(&i->lock); patches/preempt-rt-cs5530-lock-ide-fix.patch0000664000077200007720000000163210655544574020053 0ustar mingomingo drivers/ide/pci/cs5530.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) Index: linux-rt-rebase.q/drivers/ide/pci/cs5530.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/pci/cs5530.c +++ linux-rt-rebase.q/drivers/ide/pci/cs5530.c @@ -227,8 +227,8 @@ static unsigned int __devinit init_chips goto out; } - spin_lock_irqsave(&ide_lock, flags); - /* all CPUs (there should only be one CPU with this chipset) */ + /* Local CPU. ide_lock is acquired in do_ide_setup_pci_device. */ + local_irq_save(flags); /* * Enable BusMaster and MemoryWriteAndInvalidate for the cs5530: @@ -280,7 +280,7 @@ static unsigned int __devinit init_chips pci_write_config_byte(master_0, 0x42, 0x00); pci_write_config_byte(master_0, 0x43, 0xc1); - spin_unlock_irqrestore(&ide_lock, flags); + local_irq_restore(flags); out: pci_dev_put(master_0); patches/kmap-atomic-prepare.patch0000664000077200007720000001122110655544576016416 0ustar mingomingo With the separation of pagefault_{disable,enable}() from the preempt_count a previously overlooked dependancy became painfully clear. kmap_atomic() is per cpu and relies not only on disabling the pagefault handler, but really needs preemption disabled too. make this explicit now - so that we can change pagefault_disable(). Signed-off-by: Peter Zijlstra --- arch/i386/mm/highmem.c | 4 +++- arch/mips/mm/highmem.c | 5 ++++- arch/sparc/mm/highmem.c | 4 +++- include/asm-frv/highmem.h | 2 ++ include/asm-ppc/highmem.h | 4 +++- 5 files changed, 15 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/arch/i386/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mm/highmem.c +++ linux-rt-rebase.q/arch/i386/mm/highmem.c @@ -51,7 +51,7 @@ void *__kmap_atomic_prot(struct page *pa enum fixed_addresses idx; unsigned long vaddr; - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); @@ -94,6 +94,7 @@ void __kunmap_atomic(void *kvaddr, enum arch_flush_lazy_mmu_mode(); pagefault_enable(); + preempt_enable(); } /* This is the same as kmap_atomic() but can map memory that doesn't @@ -104,6 +105,7 @@ void *__kmap_atomic_pfn(unsigned long pf enum fixed_addresses idx; unsigned long vaddr; + preempt_disable(); pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); Index: linux-rt-rebase.q/arch/mips/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/mm/highmem.c +++ linux-rt-rebase.q/arch/mips/mm/highmem.c @@ -38,7 +38,7 @@ void *__kmap_atomic(struct page *page, e enum fixed_addresses idx; unsigned long vaddr; - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr, enum if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); + preempt_enable(); return; } @@ -78,6 +79,7 @@ void __kunmap_atomic(void *kvaddr, enum #endif pagefault_enable(); + preempt_enable(); } /* @@ -89,6 +91,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum fixed_addresses idx; unsigned long vaddr; + preempt_disable(); pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); Index: linux-rt-rebase.q/arch/sparc/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/arch/sparc/mm/highmem.c +++ linux-rt-rebase.q/arch/sparc/mm/highmem.c @@ -34,7 +34,7 @@ void *kmap_atomic(struct page *page, enu unsigned long idx; unsigned long vaddr; - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -71,6 +71,7 @@ void kunmap_atomic(void *kvaddr, enum km if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); + preempt_enable(); return; } @@ -97,6 +98,7 @@ void kunmap_atomic(void *kvaddr, enum km #endif pagefault_enable(); + preempt_enable(); } /* We may be fed a pagetable here by ptep_to_xxx and others. */ Index: linux-rt-rebase.q/include/asm-frv/highmem.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-frv/highmem.h +++ linux-rt-rebase.q/include/asm-frv/highmem.h @@ -115,6 +115,7 @@ static inline void *kmap_atomic(struct p { unsigned long paddr; + preempt_disable(); pagefault_disable(); paddr = page_to_phys(page); @@ -171,6 +172,7 @@ static inline void kunmap_atomic(void *k BUG(); } pagefault_enable(); + preempt_enable(); } #endif /* !__ASSEMBLY__ */ Index: linux-rt-rebase.q/include/asm-ppc/highmem.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ppc/highmem.h +++ linux-rt-rebase.q/include/asm-ppc/highmem.h @@ -78,7 +78,7 @@ static inline void *kmap_atomic(struct p unsigned int idx; unsigned long vaddr; - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -102,6 +102,7 @@ static inline void kunmap_atomic(void *k if (vaddr < KMAP_FIX_BEGIN) { // FIXME pagefault_enable(); + preempt_enable(); return; } @@ -115,6 +116,7 @@ static inline void kunmap_atomic(void *k flush_tlb_page(NULL, vaddr); #endif pagefault_enable(); + preempt_enable(); } static inline struct page *kmap_atomic_to_page(void *ptr) patches/arm-latency-tracer-support.patch0000664000077200007720000000503010655544572017761 0ustar mingomingoadd latency tracer support for EP93xx boards Add latency tracer support for the EP93xx platform. This is done by: - adding the correct Kconfig options - add (an empty) save_stack_trace implementation. -> Someone needs to implement save_stack_trace for arm :) Maybe we can use the implementation from rmk? - implementing mach_read_cycles (read out EP93XX_TIMER4_VALUE_LOW) - implementing mach_cycles_to_usecs (just the same way as for the PXA platform) - implementing mach_usecs_to_cycles (just the same way as for the PXA platform) Signed-off-by: Jan Altenberg --- arch/arm/Kconfig | 4 ++++ arch/arm/lib/Makefile | 1 + arch/arm/lib/stacktrace.c | 7 +++++++ include/asm-arm/arch-ep93xx/timex.h | 6 ++++++ 4 files changed, 18 insertions(+) Index: linux/arch/arm/Kconfig =================================================================== --- linux.orig/arch/arm/Kconfig +++ linux/arch/arm/Kconfig @@ -33,6 +33,10 @@ config GENERIC_CLOCKEVENTS bool default n +config STACKTRACE_SUPPORT + bool + default y + config MMU bool default y Index: linux/arch/arm/lib/Makefile =================================================================== --- linux.orig/arch/arm/lib/Makefile +++ linux/arch/arm/lib/Makefile @@ -41,6 +41,7 @@ lib-$(CONFIG_ARCH_RPC) += ecard.o io-ac lib-$(CONFIG_ARCH_CLPS7500) += io-acorn.o lib-$(CONFIG_ARCH_L7200) += io-acorn.o lib-$(CONFIG_ARCH_SHARK) += io-shark.o +lib-$(CONFIG_STACKTRACE) += stacktrace.o $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S $(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S Index: linux/arch/arm/lib/stacktrace.c =================================================================== --- /dev/null +++ linux/arch/arm/lib/stacktrace.c @@ -0,0 +1,7 @@ +#include +#include + +void save_stack_trace(struct stack_trace *trace) +{ +} + Index: linux/include/asm-arm/arch-ep93xx/timex.h =================================================================== --- linux.orig/include/asm-arm/arch-ep93xx/timex.h +++ linux/include/asm-arm/arch-ep93xx/timex.h @@ -1,5 +1,11 @@ /* * linux/include/asm-arm/arch-ep93xx/timex.h */ +#include +#include #define CLOCK_TICK_RATE 983040 + +#define mach_read_cycles() __raw_readl(EP93XX_TIMER4_VALUE_LOW) +#define mach_cycles_to_usecs(d) (((d) * ((1000000LL << 32) / CLOCK_TICK_RATE)) >> 32) +#define mach_usecs_to_cycles(d) (((d) * (((long long)CLOCK_TICK_RATE << 32) / 1000000)) >> 32) patches/vortex-fix.patch0000664000077200007720000000514610655544574014702 0ustar mingomingo Argh, cut and paste wasn't enough... Use this patch instead. It needs an irq disable. But, believe it or not, on SMP this is actually better. If the irq is shared (as it is in Mark's case), we don't stop the irq of other devices from being handled on another CPU (unfortunately for Mark, he pinned all interrupts to one CPU). Andrew, should this be changed in mainline too? -- Steve Signed-off-by: Steven Rostedt drivers/net/3c59x.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) Index: linux-rt-rebase.q/drivers/net/3c59x.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/3c59x.c +++ linux-rt-rebase.q/drivers/net/3c59x.c @@ -792,9 +792,9 @@ static void poll_vortex(struct net_devic { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #endif @@ -1728,6 +1728,7 @@ vortex_timer(unsigned long data) int next_tick = 60*HZ; int ok = 0; int media_status, old_window; + unsigned long flags; if (vortex_debug > 2) { printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n", @@ -1735,7 +1736,7 @@ vortex_timer(unsigned long data) printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); } - disable_irq_lockdep(dev->irq); + spin_lock_irqsave(&vp->lock, flags); old_window = ioread16(ioaddr + EL3_CMD) >> 13; EL3WINDOW(4); media_status = ioread16(ioaddr + Wn4_Media); @@ -1758,9 +1759,7 @@ vortex_timer(unsigned long data) case XCVR_MII: case XCVR_NWAY: { ok = 1; - spin_lock_bh(&vp->lock); vortex_check_media(dev, 0); - spin_unlock_bh(&vp->lock); } break; default: /* Other media types handled by Tx timeouts. */ @@ -1816,7 +1815,7 @@ leave_media_alone: dev->name, media_tbl[dev->if_port].name); EL3WINDOW(old_window); - enable_irq_lockdep(dev->irq); + spin_unlock_irqrestore(&vp->lock, flags); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) iowrite16(FakeIntr, ioaddr + EL3_CMD); @@ -1849,13 +1848,17 @@ static void vortex_tx_timeout(struct net /* * Block interrupts because vortex_interrupt does a bare spin_lock() */ +#ifndef CONFIG_PREEMPT_RT unsigned long flags; local_irq_save(flags); +#endif if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); +#ifndef CONFIG_PREEMPT_RT local_irq_restore(flags); +#endif } } patches/preempt-realtime-i386.patch0000664000077200007720000007412310655544574016533 0ustar mingomingo--- arch/i386/Kconfig.debug | 2 + arch/i386/kernel/apic.c | 2 - arch/i386/kernel/cpu/mtrr/generic.c | 2 - arch/i386/kernel/head.S | 1 arch/i386/kernel/i8253.c | 2 - arch/i386/kernel/i8259.c | 2 - arch/i386/kernel/io_apic.c | 4 +-- arch/i386/kernel/irq.c | 4 ++- arch/i386/kernel/microcode.c | 2 - arch/i386/kernel/nmi.c | 5 +++ arch/i386/kernel/process.c | 14 ++++++++-- arch/i386/kernel/signal.c | 14 ++++++++++ arch/i386/kernel/smp.c | 22 ++++++++++++---- arch/i386/kernel/time.c | 2 - arch/i386/kernel/traps.c | 29 +++++++++++++++++---- arch/i386/kernel/vm86.c | 1 arch/i386/mm/fault.c | 7 +++-- arch/i386/mm/highmem.c | 37 +++++++++++++++++++++------- arch/i386/mm/pgtable.c | 2 - arch/i386/oprofile/Kconfig | 3 ++ arch/i386/pci/common.c | 2 - arch/i386/pci/direct.c | 29 ++++++++++++++------- arch/i386/pci/pci.h | 2 - include/asm-i386/acpi.h | 4 +-- include/asm-i386/dma.h | 2 - include/asm-i386/highmem.h | 27 ++++++++++++++++++++ include/asm-i386/i8253.h | 2 - include/asm-i386/i8259.h | 2 - include/asm-i386/mach-default/irq_vectors.h | 2 - include/asm-i386/mc146818rtc.h | 2 - include/asm-i386/pgtable.h | 2 - include/asm-i386/tlbflush.h | 26 +++++++++++++++++++ include/asm-i386/xor.h | 21 +++++++++++++-- 33 files changed, 222 insertions(+), 58 deletions(-) Index: linux-rt-rebase.q/arch/i386/Kconfig.debug =================================================================== --- linux-rt-rebase.q.orig/arch/i386/Kconfig.debug +++ linux-rt-rebase.q/arch/i386/Kconfig.debug @@ -49,6 +49,7 @@ config DEBUG_PAGEALLOC config DEBUG_RODATA bool "Write protect kernel read-only data structures" depends on DEBUG_KERNEL + default y help Mark the kernel read-only data as write-protected in the pagetables, in order to catch accidental (and incorrect) writes to such const @@ -59,6 +60,7 @@ config DEBUG_RODATA config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on DEBUG_KERNEL + default y help If you say Y here the kernel will use a 4Kb stacksize for the kernel stack attached to each process/thread. This facilitates Index: linux-rt-rebase.q/arch/i386/kernel/apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/apic.c +++ linux-rt-rebase.q/arch/i386/kernel/apic.c @@ -579,7 +579,7 @@ static void local_apic_timer_interrupt(v * interrupt as well. Thus we cannot inline the local irq ... ] */ -void fastcall smp_apic_timer_interrupt(struct pt_regs *regs) +void fastcall notrace smp_apic_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); Index: linux-rt-rebase.q/arch/i386/kernel/cpu/mtrr/generic.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/cpu/mtrr/generic.c +++ linux-rt-rebase.q/arch/i386/kernel/cpu/mtrr/generic.c @@ -330,7 +330,7 @@ static unsigned long set_mtrr_state(void static unsigned long cr4 = 0; -static DEFINE_SPINLOCK(set_atomicity_lock); +static DEFINE_RAW_SPINLOCK(set_atomicity_lock); /* * Since we are disabling the cache don't allow any interrupts - they Index: linux-rt-rebase.q/arch/i386/kernel/head.S =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/head.S +++ linux-rt-rebase.q/arch/i386/kernel/head.S @@ -492,6 +492,7 @@ ignore_int: call printk #endif addl $(5*4),%esp + call dump_stack popl %ds popl %es popl %edx Index: linux-rt-rebase.q/arch/i386/kernel/i8253.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/i8253.c +++ linux-rt-rebase.q/arch/i386/kernel/i8253.c @@ -14,7 +14,7 @@ #include #include -DEFINE_SPINLOCK(i8253_lock); +DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); /* Index: linux-rt-rebase.q/arch/i386/kernel/i8259.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/i8259.c +++ linux-rt-rebase.q/arch/i386/kernel/i8259.c @@ -34,7 +34,7 @@ */ static int i8259A_auto_eoi; -DEFINE_SPINLOCK(i8259A_lock); +DEFINE_RAW_SPINLOCK(i8259A_lock); static void mask_and_ack_8259A(unsigned int); static struct irq_chip i8259A_chip = { Index: linux-rt-rebase.q/arch/i386/kernel/io_apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/io_apic.c +++ linux-rt-rebase.q/arch/i386/kernel/io_apic.c @@ -56,8 +56,8 @@ atomic_t irq_mis_count; /* Where if anywhere is the i8259 connect in external int mode */ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; -static DEFINE_SPINLOCK(ioapic_lock); -static DEFINE_SPINLOCK(vector_lock); +static DEFINE_RAW_SPINLOCK(ioapic_lock); +static DEFINE_RAW_SPINLOCK(vector_lock); int timer_over_8254 __initdata = 1; Index: linux-rt-rebase.q/arch/i386/kernel/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/irq.c +++ linux-rt-rebase.q/arch/i386/kernel/irq.c @@ -79,6 +79,8 @@ fastcall notrace unsigned int do_IRQ(str u32 *isp; #endif + irq_show_regs_callback(smp_processor_id(), regs); + if (unlikely((unsigned)irq >= NR_IRQS)) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", __FUNCTION__, irq); @@ -100,7 +102,7 @@ fastcall notrace unsigned int do_IRQ(str __asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (THREAD_SIZE - 1)); if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) { - printk("do_IRQ: stack overflow: %ld\n", + printk("BUG: do_IRQ: stack overflow: %ld\n", esp - sizeof(struct thread_info)); dump_stack(); } Index: linux-rt-rebase.q/arch/i386/kernel/microcode.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/microcode.c +++ linux-rt-rebase.q/arch/i386/kernel/microcode.c @@ -117,7 +117,7 @@ MODULE_LICENSE("GPL"); #define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE) /* serialize access to the physical write to MSR 0x79 */ -static DEFINE_SPINLOCK(microcode_update_lock); +static DEFINE_RAW_SPINLOCK(microcode_update_lock); /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ static DEFINE_MUTEX(microcode_mutex); Index: linux-rt-rebase.q/arch/i386/kernel/nmi.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/nmi.c +++ linux-rt-rebase.q/arch/i386/kernel/nmi.c @@ -62,7 +62,12 @@ static int endflag __initdata = 0; */ static __init void nmi_cpu_busy(void *data) { + /* + * avoid a warning, on PREEMPT_RT this wont run in hardirq context: + */ +#ifndef CONFIG_PREEMPT_RT local_irq_enable_in_hardirq(); +#endif /* Intentionally don't use cpu_relax here. This is to make sure that the performance counter really ticks, even if there is a simulator or similar that catches the Index: linux-rt-rebase.q/arch/i386/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/process.c +++ linux-rt-rebase.q/arch/i386/kernel/process.c @@ -382,15 +382,23 @@ void exit_thread(void) if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { struct task_struct *tsk = current; struct thread_struct *t = &tsk->thread; - int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + void *io_bitmap_ptr = t->io_bitmap_ptr; + int cpu; + struct tss_struct *tss; - kfree(t->io_bitmap_ptr); + /* + * On PREEMPT_RT we must not call kfree() with + * preemption disabled, so we first zap the pointer: + */ t->io_bitmap_ptr = NULL; + kfree(io_bitmap_ptr); + clear_thread_flag(TIF_IO_BITMAP); /* * Careful, clear this in the TSS too: */ + cpu = get_cpu(); + tss = &per_cpu(init_tss, cpu); memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); t->io_bitmap_max = 0; tss->io_bitmap_owner = NULL; Index: linux-rt-rebase.q/arch/i386/kernel/signal.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/signal.c +++ linux-rt-rebase.q/arch/i386/kernel/signal.c @@ -540,6 +540,13 @@ handle_signal(unsigned long sig, siginfo } } +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif /* * If TF is set due to a debugger (PT_DTRACE), clear the TF flag so * that register information in the sigcontext is correct. @@ -580,6 +587,13 @@ static void fastcall do_signal(struct pt struct k_sigaction ka; sigset_t *oldset; +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif /* * We want the common case to go fast, which * is why we may in certain cases get here from Index: linux-rt-rebase.q/arch/i386/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/smp.c +++ linux-rt-rebase.q/arch/i386/kernel/smp.c @@ -247,7 +247,7 @@ void send_IPI_mask_sequence(cpumask_t ma static cpumask_t flush_cpumask; static struct mm_struct * flush_mm; static unsigned long flush_va; -static DEFINE_SPINLOCK(tlbstate_lock); +static DEFINE_RAW_SPINLOCK(tlbstate_lock); /* * We cannot call mmdrop() because we are in interrupt context, @@ -477,10 +477,20 @@ static void native_smp_send_reschedule(i } /* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them: + */ +void smp_send_reschedule_allbutself(void) +{ + send_IPI_allbutself(RESCHEDULE_VECTOR); +} + +/* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */ -static DEFINE_SPINLOCK(call_lock); +static DEFINE_RAW_SPINLOCK(call_lock); struct call_data_struct { void (*func) (void *info); @@ -635,14 +645,14 @@ static void native_smp_send_stop(void) } /* - * Reschedule call back. Nothing to do, - * all the work is done automatically when - * we return from the interrupt. + * Reschedule call back. Trigger a reschedule pass so that + * RT-overload balancing can pass tasks around. */ -fastcall void smp_reschedule_interrupt(struct pt_regs *regs) +fastcall notrace void smp_reschedule_interrupt(struct pt_regs *regs) { trace_special(regs->eip, 0, 0); ack_APIC_irq(); + set_tsk_need_resched(current); } fastcall void smp_call_function_interrupt(struct pt_regs *regs) Index: linux-rt-rebase.q/arch/i386/kernel/time.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/time.c +++ linux-rt-rebase.q/arch/i386/kernel/time.c @@ -124,7 +124,7 @@ static int set_rtc_mmss(unsigned long no int timer_ack; -unsigned long profile_pc(struct pt_regs *regs) +unsigned long notrace profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); Index: linux-rt-rebase.q/arch/i386/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/traps.c +++ linux-rt-rebase.q/arch/i386/kernel/traps.c @@ -280,6 +280,12 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); +#if defined(CONFIG_DEBUG_STACKOVERFLOW) && defined(CONFIG_EVENT_TRACE) +extern unsigned long worst_stack_left; +#else +# define worst_stack_left -1L +#endif + void show_registers(struct pt_regs *regs) { int i; @@ -308,8 +314,12 @@ void show_registers(struct pt_regs *regs regs->eax, regs->ebx, regs->ecx, regs->edx); printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", regs->esi, regs->edi, regs->ebp, esp); - printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", - regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss); + + printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x " + " preempt:%08x\n", + regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, + ss, preempt_count()); + printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", TASK_COMM_LEN, current->comm, current->pid, current_thread_info(), current, task_thread_info(current)); @@ -369,11 +379,11 @@ int is_valid_bugaddr(unsigned long eip) void die(const char * str, struct pt_regs * regs, long err) { static struct { - spinlock_t lock; + raw_spinlock_t lock; u32 lock_owner; int lock_owner_depth; } die = { - .lock = __SPIN_LOCK_UNLOCKED(die.lock), + .lock = RAW_SPIN_LOCK_UNLOCKED(die.lock), .lock_owner = -1, .lock_owner_depth = 0 }; @@ -480,6 +490,11 @@ static void __kprobes do_trap(int trapnr if (!user_mode(regs)) goto kernel_trap; +#ifdef CONFIG_PREEMPT_RT + local_irq_enable(); + preempt_check_resched(); +#endif + trap_signal: { /* * We want error_code and trap_no set for userspace faults and @@ -736,10 +751,11 @@ void __kprobes die_nmi(struct pt_regs *r crash_kexec(regs); } + nmi_exit(); do_exit(SIGSEGV); } -static __kprobes void default_do_nmi(struct pt_regs * regs) +static notrace __kprobes void default_do_nmi(struct pt_regs * regs) { unsigned char reason = 0; @@ -779,11 +795,12 @@ static __kprobes void default_do_nmi(str static int ignore_nmis; -fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code) +fastcall notrace __kprobes void do_nmi(struct pt_regs * regs, long error_code) { int cpu; nmi_enter(); + nmi_trace((unsigned long)do_nmi, regs->eip, regs->eflags); cpu = smp_processor_id(); Index: linux-rt-rebase.q/arch/i386/kernel/vm86.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/vm86.c +++ linux-rt-rebase.q/arch/i386/kernel/vm86.c @@ -137,6 +137,7 @@ struct pt_regs * fastcall save_v86_state local_irq_enable(); if (!current->thread.vm86_info) { + local_irq_disable(); printk("no vm86_info: BAD\n"); do_exit(SIGSEGV); } Index: linux-rt-rebase.q/arch/i386/mm/fault.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mm/fault.c +++ linux-rt-rebase.q/arch/i386/mm/fault.c @@ -297,8 +297,8 @@ int show_unhandled_signals = 1; * bit 3 == 1 means use of reserved bit detected * bit 4 == 1 means fault was an instruction fetch */ -fastcall void __kprobes do_page_fault(struct pt_regs *regs, - unsigned long error_code) +fastcall notrace void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long error_code) { struct task_struct *tsk; struct mm_struct *mm; @@ -309,6 +309,7 @@ fastcall void __kprobes do_page_fault(st /* get the address */ address = read_cr2(); + trace_special(regs->eip, error_code, address); tsk = current; @@ -498,6 +499,8 @@ bad_area_nosemaphore: if (nr == 6) { stop_trace(); + user_trace_stop(); + zap_rt_locks(); do_invalid_op(regs, 0); return; } Index: linux-rt-rebase.q/arch/i386/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mm/highmem.c +++ linux-rt-rebase.q/arch/i386/mm/highmem.c @@ -18,6 +18,26 @@ void kunmap(struct page *page) kunmap_high(page); } +void kunmap_virt(void *ptr) +{ + struct page *page; + + if ((unsigned long)ptr < PKMAP_ADDR(0)) + return; + page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); + kunmap(page); +} + +struct page *kmap_to_page(void *ptr) +{ + struct page *page; + + if ((unsigned long)ptr < PKMAP_ADDR(0)) + return virt_to_page(ptr); + page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); + return page; +} + /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB @@ -26,7 +46,7 @@ void kunmap(struct page *page) * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) +void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; @@ -47,12 +67,12 @@ void *kmap_atomic_prot(struct page *page return (void*) vaddr; } -void *kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic(struct page *page, enum km_type type) { return kmap_atomic_prot(page, type, kmap_prot); } -void kunmap_atomic(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); @@ -79,7 +99,7 @@ void kunmap_atomic(void *kvaddr, enum km /* This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *__kmap_atomic_pfn(unsigned long pfn, enum km_type type) { enum fixed_addresses idx; unsigned long vaddr; @@ -94,7 +114,7 @@ void *kmap_atomic_pfn(unsigned long pfn, return (void*) vaddr; } -struct page *kmap_atomic_to_page(void *ptr) +struct page *__kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; @@ -109,6 +129,7 @@ struct page *kmap_atomic_to_page(void *p EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); -EXPORT_SYMBOL(kmap_atomic); -EXPORT_SYMBOL(kunmap_atomic); -EXPORT_SYMBOL(kmap_atomic_to_page); +EXPORT_SYMBOL(kunmap_virt); +EXPORT_SYMBOL(__kmap_atomic); +EXPORT_SYMBOL(__kunmap_atomic); +EXPORT_SYMBOL(__kmap_atomic_to_page); Index: linux-rt-rebase.q/arch/i386/mm/pgtable.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mm/pgtable.c +++ linux-rt-rebase.q/arch/i386/mm/pgtable.c @@ -208,7 +208,7 @@ void pmd_ctor(void *pmd, struct kmem_cac * vmalloc faults work because attached pagetables are never freed. * -- wli */ -DEFINE_SPINLOCK(pgd_lock); +DEFINE_RAW_SPINLOCK(pgd_lock); struct page *pgd_list; static inline void pgd_list_add(pgd_t *pgd) Index: linux-rt-rebase.q/arch/i386/oprofile/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/i386/oprofile/Kconfig +++ linux-rt-rebase.q/arch/i386/oprofile/Kconfig @@ -15,3 +15,6 @@ config OPROFILE If unsure, say N. +config PROFILE_NMI + bool + default y Index: linux-rt-rebase.q/arch/i386/pci/common.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/pci/common.c +++ linux-rt-rebase.q/arch/i386/pci/common.c @@ -52,7 +52,7 @@ int pcibios_scanned; * This interrupt-safe spinlock protects all accesses to PCI * configuration space. */ -DEFINE_SPINLOCK(pci_config_lock); +DEFINE_RAW_SPINLOCK(pci_config_lock); /* * Several buggy motherboards address only 16 devices and mirror Index: linux-rt-rebase.q/arch/i386/pci/direct.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/pci/direct.c +++ linux-rt-rebase.q/arch/i386/pci/direct.c @@ -220,16 +220,23 @@ static int __init pci_check_type1(void) unsigned int tmp; int works = 0; - local_irq_save(flags); + spin_lock_irqsave(&pci_config_lock, flags); outb(0x01, 0xCFB); tmp = inl(0xCF8); outl(0x80000000, 0xCF8); - if (inl(0xCF8) == 0x80000000 && pci_sanity_check(&pci_direct_conf1)) { - works = 1; + + if (inl(0xCF8) == 0x80000000) { + spin_unlock_irqrestore(&pci_config_lock, flags); + + if (pci_sanity_check(&pci_direct_conf1)) + works = 1; + + spin_lock_irqsave(&pci_config_lock, flags); } outl(tmp, 0xCF8); - local_irq_restore(flags); + + spin_unlock_irqrestore(&pci_config_lock, flags); return works; } @@ -239,17 +246,19 @@ static int __init pci_check_type2(void) unsigned long flags; int works = 0; - local_irq_save(flags); + spin_lock_irqsave(&pci_config_lock, flags); outb(0x00, 0xCFB); outb(0x00, 0xCF8); outb(0x00, 0xCFA); - if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00 && - pci_sanity_check(&pci_direct_conf2)) { - works = 1; - } - local_irq_restore(flags); + if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00) { + spin_unlock_irqrestore(&pci_config_lock, flags); + + if (pci_sanity_check(&pci_direct_conf2)) + works = 1; + } else + spin_unlock_irqrestore(&pci_config_lock, flags); return works; } Index: linux-rt-rebase.q/arch/i386/pci/pci.h =================================================================== --- linux-rt-rebase.q.orig/arch/i386/pci/pci.h +++ linux-rt-rebase.q/arch/i386/pci/pci.h @@ -78,7 +78,7 @@ struct irq_routing_table { extern unsigned int pcibios_irq_mask; extern int pcibios_scanned; -extern spinlock_t pci_config_lock; +extern raw_spinlock_t pci_config_lock; extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); Index: linux-rt-rebase.q/include/asm-i386/acpi.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/acpi.h +++ linux-rt-rebase.q/include/asm-i386/acpi.h @@ -52,8 +52,8 @@ #define ACPI_ASM_MACROS #define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable() -#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_DISABLE_IRQS() local_irq_disable_nort() +#define ACPI_ENABLE_IRQS() local_irq_enable_nort() #define ACPI_FLUSH_CPU_CACHE() wbinvd() int __acpi_acquire_global_lock(unsigned int *lock); Index: linux-rt-rebase.q/include/asm-i386/dma.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/dma.h +++ linux-rt-rebase.q/include/asm-i386/dma.h @@ -134,7 +134,7 @@ #define DMA_AUTOINIT 0x10 -extern spinlock_t dma_spin_lock; +extern spinlock_t dma_spin_lock; static __inline__ unsigned long claim_dma_lock(void) { Index: linux-rt-rebase.q/include/asm-i386/highmem.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/highmem.h +++ linux-rt-rebase.q/include/asm-i386/highmem.h @@ -67,6 +67,16 @@ extern void * FASTCALL(kmap_high(struct extern void FASTCALL(kunmap_high(struct page *page)); void *kmap(struct page *page); +extern void kunmap_virt(void *ptr); +extern struct page *kmap_to_page(void *ptr); +void kunmap(struct page *page); + +void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); +void *__kmap_atomic(struct page *page, enum km_type type); +void __kunmap_atomic(void *kvaddr, enum km_type type); +void *__kmap_atomic_pfn(unsigned long pfn, enum km_type type); +struct page *__kmap_atomic_to_page(void *ptr); + void kunmap(struct page *page); void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); void *kmap_atomic(struct page *page, enum km_type type); @@ -80,6 +90,23 @@ struct page *kmap_atomic_to_page(void *p #define flush_cache_kmaps() do { } while (0) +/* + * on PREEMPT_RT kmap_atomic() is a wrapper that uses kmap(): + */ +#ifdef CONFIG_PREEMPT_RT +# define kmap_atomic_prot(page, type, prot) kmap(page) +# define kmap_atomic(page, type) kmap(page) +# define kmap_atomic_pfn(pfn, type) kmap(pfn_to_page(pfn)) +# define kunmap_atomic(kvaddr, type) kunmap_virt(kvaddr) +# define kmap_atomic_to_page(kvaddr) kmap_to_page(kvaddr) +#else +# define kmap_atomic_prot(page, type, prot) __kmap_atomic_prot(page, type, prot) +# define kmap_atomic(page, type) __kmap_atomic(page, type) +# define kmap_atomic_pfn(pfn, type) __kmap_atomic_pfn(pfn, type) +# define kunmap_atomic(kvaddr, type) __kunmap_atomic(kvaddr, type) +# define kmap_atomic_to_page(kvaddr) __kmap_atomic_to_page(kvaddr) +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_HIGHMEM_H */ Index: linux-rt-rebase.q/include/asm-i386/i8253.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/i8253.h +++ linux-rt-rebase.q/include/asm-i386/i8253.h @@ -6,7 +6,7 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -extern spinlock_t i8253_lock; +extern raw_spinlock_t i8253_lock; extern struct clock_event_device *global_clock_event; Index: linux-rt-rebase.q/include/asm-i386/i8259.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/i8259.h +++ linux-rt-rebase.q/include/asm-i386/i8259.h @@ -7,7 +7,7 @@ extern unsigned int cached_irq_mask; #define cached_master_mask (__byte(0, cached_irq_mask)) #define cached_slave_mask (__byte(1, cached_irq_mask)) -extern spinlock_t i8259A_lock; +extern raw_spinlock_t i8259A_lock; extern void init_8259A(int auto_eoi); extern void enable_8259A_irq(unsigned int irq); Index: linux-rt-rebase.q/include/asm-i386/mach-default/irq_vectors.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/mach-default/irq_vectors.h +++ linux-rt-rebase.q/include/asm-i386/mach-default/irq_vectors.h @@ -63,7 +63,7 @@ * levels. (0x80 is the syscall vector) */ #define FIRST_DEVICE_VECTOR 0x31 -#define FIRST_SYSTEM_VECTOR 0xef +#define FIRST_SYSTEM_VECTOR 0xee #define TIMER_IRQ 0 Index: linux-rt-rebase.q/include/asm-i386/mc146818rtc.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/mc146818rtc.h +++ linux-rt-rebase.q/include/asm-i386/mc146818rtc.h @@ -72,7 +72,7 @@ static inline unsigned char current_lock lock_cmos(reg) #define lock_cmos_suffix(reg) \ unlock_cmos(); \ - local_irq_restore(cmos_flags); \ + local_irq_restore(cmos_flags); \ } while (0) #else #define lock_cmos_prefix(reg) do {} while (0) Index: linux-rt-rebase.q/include/asm-i386/pgtable.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/pgtable.h +++ linux-rt-rebase.q/include/asm-i386/pgtable.h @@ -36,7 +36,7 @@ struct vm_area_struct; extern unsigned long empty_zero_page[1024]; extern pgd_t swapper_pg_dir[1024]; extern struct kmem_cache *pmd_cache; -extern spinlock_t pgd_lock; +extern raw_spinlock_t pgd_lock; extern struct page *pgd_list; void check_pgt_cache(void); Index: linux-rt-rebase.q/include/asm-i386/tlbflush.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/tlbflush.h +++ linux-rt-rebase.q/include/asm-i386/tlbflush.h @@ -4,6 +4,21 @@ #include #include +/* + * TLB-flush needs to be nonpreemptible on PREEMPT_RT due to the + * following complex race scenario: + * + * if the current task is lazy-TLB and does a TLB flush and + * gets preempted after the movl %%r3, %0 but before the + * movl %0, %%cr3 then its ->active_mm might change and it will + * install the wrong cr3 when it switches back. This is not a + * problem for the lazy-TLB task itself, but if the next task it + * switches to has an ->mm that is also the lazy-TLB task's + * new ->active_mm, then the scheduler will assume that cr3 is + * the new one, while we overwrote it with the old one. The result + * is the wrong cr3 in the new (non-lazy-TLB) task, which typically + * causes an infinite pagefault upon the next userspace access. + */ #ifdef CONFIG_PARAVIRT #include #else @@ -16,11 +31,13 @@ do { \ unsigned int tmpreg; \ \ + preempt_disable(); \ __asm__ __volatile__( \ "movl %%cr3, %0; \n" \ "movl %0, %%cr3; # flush TLB \n" \ : "=r" (tmpreg) \ :: "memory"); \ + preempt_enable(); \ } while (0) /* @@ -31,6 +48,7 @@ do { \ unsigned int tmpreg, cr4, cr4_orig; \ \ + preempt_disable(); \ __asm__ __volatile__( \ "movl %%cr4, %2; # turn off PGE \n" \ "movl %2, %1; \n" \ @@ -42,6 +60,7 @@ : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ : "i" (~X86_CR4_PGE) \ : "memory"); \ + preempt_enable(); \ } while (0) #define __native_flush_tlb_single(addr) \ @@ -98,6 +117,13 @@ static inline void flush_tlb_mm(struct mm_struct *mm) { + /* + * This is safe on PREEMPT_RT because if we preempt + * right after the check but before the __flush_tlb(), + * and if ->active_mm changes, then we might miss a + * TLB flush, but that TLB flush happened already when + * ->active_mm was changed: + */ if (mm == current->active_mm) __flush_tlb(); } Index: linux-rt-rebase.q/include/asm-i386/xor.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/xor.h +++ linux-rt-rebase.q/include/asm-i386/xor.h @@ -862,7 +862,21 @@ static struct xor_block_template xor_blo #include #undef XOR_TRY_TEMPLATES -#define XOR_TRY_TEMPLATES \ +/* + * MMX/SSE ops disable preemption for long periods of time, + * so on PREEMPT_RT use the register-based ops only: + */ +#ifdef CONFIG_PREEMPT_RT +# define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + } while (0) +# define XOR_SELECT_TEMPLATE(FASTEST) (FASTEST) +#else +# define XOR_TRY_TEMPLATES \ do { \ xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_8regs_p); \ @@ -875,9 +889,10 @@ static struct xor_block_template xor_blo xor_speed(&xor_block_p5_mmx); \ } \ } while (0) - /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ -#define XOR_SELECT_TEMPLATE(FASTEST) \ +# define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) +#endif + patches/latency-trace-fix.patch0000664000077200007720000000511210655544572016075 0ustar mingomingoFrom linux-rt-users-owner@vger.kernel.org Fri Jul 13 20:13:10 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by mail.tglx.de (Postfix) with ESMTP id 9AD1E65C3E9; Fri, 13 Jul 2007 20:13:10 +0200 (CEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1760492AbXGMSNJ (ORCPT + 1 other); Fri, 13 Jul 2007 14:13:09 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S932549AbXGMSNJ (ORCPT ); Fri, 13 Jul 2007 14:13:09 -0400 Received: from deeprooted.net ([216.254.16.51]:38939 "EHLO paris.hilman.org" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1759850AbXGMSNG (ORCPT ); Fri, 13 Jul 2007 14:13:06 -0400 Received: by paris.hilman.org (Postfix, from userid 1000) id 98015E4C5C2; Fri, 13 Jul 2007 10:52:28 -0700 (PDT) Message-Id: <20070713175228.311226264@mvista.com> References: <20070713175214.336577416@mvista.com> User-Agent: quilt/0.45-1 Date: Fri, 13 Jul 2007 10:52:17 -0700 From: Kevin Hilman To: tglx@linutronix.de, mingo@elte.hu Cc: linux-rt-users@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH -rt 3/6] Compile fix for PREEMPT_TIMING on and TRACE_IRQFLAGS off Content-Disposition: inline; filename=latency-trace-fix.patch Sender: linux-rt-users-owner@vger.kernel.org Precedence: bulk X-Mailing-List: linux-rt-users@vger.kernel.org X-Filter-To: .Kernel.rt-users X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Mime-Version: 1.0 Fix compile of latency_trace.c in the case where CRITICAL_PREEMPT_TIMING=y and TRACE_IRQFLAGS=n (because DEBUG_KERNEL is disabled) Signed-off-by: Kevin Hilman --- kernel/latency_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -2154,7 +2154,7 @@ void notrace unmask_preempt_count(unsign } EXPORT_SYMBOL(unmask_preempt_count); -#ifdef CONFIG_CRITICAL_PREEMPT_TIMING +#if defined(CONFIG_CRITICAL_PREEMPT_TIMING) && defined(CONFIG_TRACE_IRQFLAGS) /* Some archs do their cpu_idle with preemption on. Don't measure it */ void notrace trace_preempt_enter_idle(void) patches/s_files-pipe-fix.patch0000664000077200007720000000206010655544576015724 0ustar mingomingoSubject: s_files: free_write_pipe() fix From: Ingo Molnar file_kill() has to look at the file's inode (for the barrier logic), hence make sure we free the inode before the file. Signed-off-by: Ingo Molnar --- fs/pipe.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) Index: linux-rt-rebase.q/fs/pipe.c =================================================================== --- linux-rt-rebase.q.orig/fs/pipe.c +++ linux-rt-rebase.q/fs/pipe.c @@ -1012,12 +1012,17 @@ struct file *create_write_pipe(void) return ERR_PTR(err); } -void free_write_pipe(struct file *f) +void free_write_pipe(struct file *file) { - free_pipe_info(f->f_dentry->d_inode); - dput(f->f_path.dentry); - mntput(f->f_path.mnt); - put_filp(f); + struct dentry *dentry = file->f_path.dentry; + struct vfsmount *mnt = file->f_path.mnt; + + free_pipe_info(file->f_dentry->d_inode); + file->f_path.dentry = NULL; + file->f_path.mnt = NULL; + put_filp(file); + dput(dentry); + mntput(mnt); } struct file *create_read_pipe(struct file *wrf) patches/preempt-realtime-profiling.patch0000664000077200007720000000216510655544575020031 0ustar mingomingo--- kernel/profile.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/kernel/profile.c =================================================================== --- linux-rt-rebase.q.orig/kernel/profile.c +++ linux-rt-rebase.q/kernel/profile.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -46,6 +47,7 @@ int prof_on __read_mostly; EXPORT_SYMBOL_GPL(prof_on); static cpumask_t prof_cpu_mask = CPU_MASK_ALL; +int prof_pid = -1; #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); static DEFINE_PER_CPU(int, cpu_profile_flip); @@ -411,7 +413,8 @@ void __profile_tick(int type, struct pt_ { if (type == CPU_PROFILING && timer_hook) timer_hook(regs); - if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) + if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask) && + (prof_pid == -1 || prof_pid == current->pid)) profile_hit(type, (void *)profile_pc(regs)); } patches/kstat-add-rt-stats.patch0000664000077200007720000001232210655544574016214 0ustar mingomingoFrom: tglx Subject: add rt stats to /proc/stat add RT stats to /proc/stat Signed-off-by: Ingo Molnar fs/proc/proc_misc.c | 29 +++++++++++++++++++++-------- include/linux/kernel_stat.h | 2 ++ kernel/sched.c | 6 +++++- 3 files changed, 28 insertions(+), 9 deletions(-) Index: linux-rt-rebase.q/fs/proc/proc_misc.c =================================================================== --- linux-rt-rebase.q.orig/fs/proc/proc_misc.c +++ linux-rt-rebase.q/fs/proc/proc_misc.c @@ -442,7 +442,8 @@ static int show_stat(struct seq_file *p, { int i; unsigned long jif; - cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; + cputime64_t user_rt, user, nice, system_rt, system, idle, + iowait, irq, softirq, steal; u64 sum = 0; struct timespec boottime; unsigned int *per_irq_sum; @@ -451,7 +452,7 @@ static int show_stat(struct seq_file *p, if (!per_irq_sum) return -ENOMEM; - user = nice = system = idle = iowait = + user_rt = user = nice = system_rt = system = idle = iowait = irq = softirq = steal = cputime64_zero; getboottime(&boottime); jif = boottime.tv_sec; @@ -467,6 +468,8 @@ static int show_stat(struct seq_file *p, irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); + user_rt = cputime64_add(user_rt, kstat_cpu(i).cpustat.user_rt); + system_rt = cputime64_add(system_rt, kstat_cpu(i).cpustat.system_rt); for (j = 0; j < NR_IRQS; j++) { unsigned int temp = kstat_cpu(i).irqs[j]; sum += temp; @@ -474,7 +477,10 @@ static int show_stat(struct seq_file *p, } } - seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n", + user = cputime64_add(user_rt, user); + system = cputime64_add(system_rt, system); + + seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), (unsigned long long)cputime64_to_clock_t(system), @@ -482,19 +488,24 @@ static int show_stat(struct seq_file *p, (unsigned long long)cputime64_to_clock_t(iowait), (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), - (unsigned long long)cputime64_to_clock_t(steal)); + (unsigned long long)cputime64_to_clock_t(steal), + (unsigned long long)cputime64_to_clock_t(user_rt), + (unsigned long long)cputime64_to_clock_t(system_rt)); + for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = kstat_cpu(i).cpustat.user; + user_rt = kstat_cpu(i).cpustat.user_rt; + system_rt = kstat_cpu(i).cpustat.system_rt; + user = cputime64_add(user_rt, kstat_cpu(i).cpustat.user); nice = kstat_cpu(i).cpustat.nice; - system = kstat_cpu(i).cpustat.system; + system = cputime64_add(system_rt, kstat_cpu(i).cpustat.system); idle = kstat_cpu(i).cpustat.idle; iowait = kstat_cpu(i).cpustat.iowait; irq = kstat_cpu(i).cpustat.irq; softirq = kstat_cpu(i).cpustat.softirq; steal = kstat_cpu(i).cpustat.steal; - seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n", + seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", i, (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), @@ -503,7 +514,9 @@ static int show_stat(struct seq_file *p, (unsigned long long)cputime64_to_clock_t(iowait), (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), - (unsigned long long)cputime64_to_clock_t(steal)); + (unsigned long long)cputime64_to_clock_t(steal), + (unsigned long long)cputime64_to_clock_t(user_rt), + (unsigned long long)cputime64_to_clock_t(system_rt)); } seq_printf(p, "intr %llu", (unsigned long long)sum); Index: linux-rt-rebase.q/include/linux/kernel_stat.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/kernel_stat.h +++ linux-rt-rebase.q/include/linux/kernel_stat.h @@ -23,6 +23,8 @@ struct cpu_usage_stat { cputime64_t idle; cputime64_t iowait; cputime64_t steal; + cputime64_t user_rt; + cputime64_t system_rt; }; struct kernel_stat { Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -3277,7 +3277,9 @@ void account_user_time(struct task_struc /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); - if (TASK_NICE(p) > 0) + if (rt_task(p)) + cpustat->user_rt = cputime64_add(cpustat->user_rt, tmp); + else if (TASK_NICE(p) > 0) cpustat->nice = cputime64_add(cpustat->nice, tmp); else cpustat->user = cputime64_add(cpustat->user, tmp); @@ -3304,6 +3306,8 @@ void account_system_time(struct task_str cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count() || (p->flags & PF_SOFTIRQ)) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); + else if (rt_task(p)) + cpustat->system_rt = cputime64_add(cpustat->system_rt, tmp); else if (p != rq->idle) cpustat->system = cputime64_add(cpustat->system, tmp); else if (atomic_read(&rq->nr_iowait) > 0) patches/gtod-optimize.patch0000664000077200007720000000116210655544576015356 0ustar mingomingo--- kernel/timer.c | 7 +++++++ 1 file changed, 7 insertions(+) Index: linux-rt-rebase.q/kernel/timer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/timer.c +++ linux-rt-rebase.q/kernel/timer.c @@ -990,6 +990,13 @@ static inline void update_times(void) static unsigned long last_tick = INITIAL_JIFFIES; unsigned long ticks, flags; + /* + * Dont take the xtime_lock from every CPU in + * every tick - only when needed: + */ + if (jiffies == last_tick) + return; + write_seqlock_irqsave(&xtime_lock, flags); ticks = jiffies - last_tick; if (ticks) { patches/msi-suspend-resume-workaround.patch0000664000077200007720000000152110655544571020513 0ustar mingomingo--- drivers/base/power/resume.c | 1 + drivers/pci/msi.c | 4 ++++ 2 files changed, 5 insertions(+) Index: linux/drivers/base/power/resume.c =================================================================== --- linux.orig/drivers/base/power/resume.c +++ linux/drivers/base/power/resume.c @@ -9,6 +9,7 @@ */ #include +#include #include #include "../base.h" #include "power.h" Index: linux/drivers/pci/msi.c =================================================================== --- linux.orig/drivers/pci/msi.c +++ linux/drivers/pci/msi.c @@ -235,6 +235,10 @@ static void __pci_restore_msi_state(stru return; entry = get_irq_msi(dev->irq); + if (!entry) { + WARN_ON(1); + return; + } pos = entry->msi_attrib.pos; pci_intx(dev, 0); /* disable intx */ patches/fix-acpi-build-weirdness.patch0000664000077200007720000000110410655544571017350 0ustar mingomingo arch/i386/pci/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) Index: linux/arch/i386/pci/Makefile =================================================================== --- linux.orig/arch/i386/pci/Makefile +++ linux/arch/i386/pci/Makefile @@ -4,8 +4,9 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o mmconfig-shared.o obj-$(CONFIG_PCI_DIRECT) += direct.o +obj-$(CONFIG_ACPI) += acpi.o + pci-y := fixup.o -pci-$(CONFIG_ACPI) += acpi.o pci-y += legacy.o irq.o pci-$(CONFIG_X86_VISWS) := visws.o fixup.o patches/redo-regparm-option.patch0000664000077200007720000000611010655544571016446 0ustar mingomingo undo: commit a1a70c25bed75ed36ed48bbe18b9029428d2452d Author: Adrian Bunk Date: Thu Dec 7 02:14:12 2006 +0100 [PATCH] i386: always enable regparm needed for latency tracing. --- Documentation/stable_api_nonsense.txt | 3 +++ arch/i386/Kconfig | 7 +++++++ arch/i386/Makefile | 4 +++- include/asm-i386/module.h | 8 +++++++- 4 files changed, 20 insertions(+), 2 deletions(-) Index: linux/Documentation/stable_api_nonsense.txt =================================================================== --- linux.orig/Documentation/stable_api_nonsense.txt +++ linux/Documentation/stable_api_nonsense.txt @@ -62,6 +62,9 @@ consider the following facts about the L - different structures can contain different fields - Some functions may not be implemented at all, (i.e. some locks compile away to nothing for non-SMP builds.) + - Parameter passing of variables from function to function can be + done in different ways (the CONFIG_REGPARM option controls + this.) - Memory within the kernel can be aligned in different ways, depending on the build options. - Linux runs on a wide range of different processor architectures. Index: linux/arch/i386/Kconfig =================================================================== --- linux.orig/arch/i386/Kconfig +++ linux/arch/i386/Kconfig @@ -779,6 +779,13 @@ config BOOT_IOREMAP depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI)) default y +# +# function tracing might turn this off: +# +config REGPARM + bool + default y + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" depends on PROC_FS Index: linux/arch/i386/Makefile =================================================================== --- linux.orig/arch/i386/Makefile +++ linux/arch/i386/Makefile @@ -31,7 +31,7 @@ LDFLAGS_vmlinux := --emit-relocs endif CHECKFLAGS += -D__i386__ -CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return +CFLAGS += -pipe -msoft-float -freg-struct-return # prevent gcc from keeping the stack 16 byte aligned CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) @@ -39,6 +39,8 @@ CFLAGS += $(call cc-option,-mpreferred-s # CPU-specific tuning. Anything which can be shared with UML should go here. include $(srctree)/arch/i386/Makefile.cpu +cflags-$(CONFIG_REGPARM) += -mregparm=3 + # temporary until string.h is fixed cflags-y += -ffreestanding Index: linux/include/asm-i386/module.h =================================================================== --- linux.orig/include/asm-i386/module.h +++ linux/include/asm-i386/module.h @@ -64,12 +64,18 @@ struct mod_arch_specific #error unknown processor family #endif +#ifdef CONFIG_REGPARM +#define MODULE_REGPARM "REGPARM " +#else +#define MODULE_REGPARM "" +#endif + #ifdef CONFIG_4KSTACKS #define MODULE_STACKSIZE "4KSTACKS " #else #define MODULE_STACKSIZE "" #endif -#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE #endif /* _ASM_I386_MODULE_H */ patches/x86_64-apic-change-setup-calling-convention.patch0000664000077200007720000000315410655544570022600 0ustar mingomingoSubject: x86_64: apic change setup_APIC_timer calling convention setup_APIC_timer takes the file global calibration result as an argument. Remove it. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -784,7 +784,7 @@ static void __setup_APIC_LVTT(unsigned i apic_write(APIC_TMICT, clocks); } -static void setup_APIC_timer(unsigned int clocks) +static void setup_APIC_timer(void) { unsigned long flags; int irqen; @@ -793,7 +793,7 @@ static void setup_APIC_timer(unsigned in irqen = ! cpu_isset(smp_processor_id(), timer_interrupt_broadcast_ipi_mask); - __setup_APIC_LVTT(clocks, 0, irqen); + __setup_APIC_LVTT(calibration_result, 0, irqen); /* Turn off PIT interrupt if we use APIC timer as main timer. Only works with the PM timer right now TBD fix it for HPET too. */ @@ -880,7 +880,7 @@ void __init setup_boot_APIC_clock (void) /* * Now set up the timer for real. */ - setup_APIC_timer(calibration_result); + setup_APIC_timer(); local_irq_enable(); } @@ -888,7 +888,7 @@ void __init setup_boot_APIC_clock (void) void __cpuinit setup_secondary_APIC_clock(void) { local_irq_disable(); /* FIXME: Do we need this? --RR */ - setup_APIC_timer(calibration_result); + setup_APIC_timer(); local_irq_enable(); } patches/rt-mutex-trylock-export.patch0000664000077200007720000000734410655544573017361 0ustar mingomingoFrom linux-kernel-owner@vger.kernel.org Wed May 23 01:44:17 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=none autolearn=unavailable version=3.1.7-deb Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by mail.tglx.de (Postfix) with ESMTP id 32C4A65C3E9 for ; Wed, 23 May 2007 01:44:17 +0200 (CEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1759353AbXEVXoG (ORCPT ); Tue, 22 May 2007 19:44:06 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1757791AbXEVXn4 (ORCPT ); Tue, 22 May 2007 19:43:56 -0400 Received: from rwcrmhc11.comcast.net ([204.127.192.81]:35206 "EHLO rwcrmhc11.comcast.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757669AbXEVXn4 (ORCPT ); Tue, 22 May 2007 19:43:56 -0400 Received: from sx.thebigcorporation.com ([69.181.45.228]) by comcast.net (rwcrmhc11) with ESMTP id <20070522233624m1100rg2vge>; Tue, 22 May 2007 23:36:29 +0000 Received: from sx.thebigcorporation.com (localhost.localdomain [127.0.0.1]) by sx.thebigcorporation.com (8.13.8/8.13.8) with ESMTP id l4MNaKHv029409; Tue, 22 May 2007 16:36:20 -0700 Received: (from sven@localhost) by sx.thebigcorporation.com (8.13.8/8.13.8/Submit) id l4MNaJIn029408; Tue, 22 May 2007 16:36:19 -0700 X-Authentication-Warning: sx.thebigcorporation.com: sven set sender to sven@thebigcorporation.com using -f Subject: [PATCH] 2.6.21-rt6 From: Sven-Thorsten Dietrich To: LKML Cc: Ingo Molnar In-Reply-To: <1179874795.25500.40.camel@sx.thebigcorporation.com> References: <1179874795.25500.40.camel@sx.thebigcorporation.com> Content-Type: text/plain Organization: The Big Corporation Date: Tue, 22 May 2007 16:36:19 -0700 Message-Id: <1179876979.25500.54.camel@sx.thebigcorporation.com> Mime-Version: 1.0 X-Mailer: Evolution 2.8.3 (2.8.3-2.fc6) Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org X-Filter-To: .Kernel.LKML X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit On Tue, 2007-05-22 at 15:59 -0700, Sven-Thorsten Dietrich wrote: > Add > header and export for rt_write_trylock_irqsave. Disregard the last patch, flags parameter was missing in the header. --- include/linux/spinlock.h | 2 ++ kernel/rt.c | 1 + 2 files changed, 3 insertions(+) Index: linux-rt-rebase.q/include/linux/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/spinlock.h +++ linux-rt-rebase.q/include/linux/spinlock.h @@ -294,6 +294,8 @@ do { \ extern void __lockfunc rt_write_lock(rwlock_t *rwlock); extern void __lockfunc rt_read_lock(rwlock_t *rwlock); extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, + unsigned long *flags); extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); Index: linux-rt-rebase.q/kernel/rt.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rt.c +++ linux-rt-rebase.q/kernel/rt.c @@ -172,6 +172,7 @@ int __lockfunc rt_write_trylock_irqsave( *flags = 0; return rt_write_trylock(rwlock); } +EXPORT_SYMBOL(rt_write_trylock_irqsave); int __lockfunc rt_read_trylock(rwlock_t *rwlock) { patches/tasklet-fix-preemption-race.patch0000664000077200007720000001001010655544574020074 0ustar mingomingoFrom johnstul@us.ibm.com Wed Jun 6 04:17:34 2007 Return-Path: Received: from e3.ny.us.ibm.com (e3.ny.us.ibm.com [32.97.182.143]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by mail.tglx.de (Postfix) with ESMTP id 1CCC065C065 for ; Wed, 6 Jun 2007 04:17:34 +0200 (CEST) Received: from d01relay04.pok.ibm.com (d01relay04.pok.ibm.com [9.56.227.236]) by e3.ny.us.ibm.com (8.13.8/8.13.8) with ESMTP id l561EvIT011411 for ; Tue, 5 Jun 2007 21:14:57 -0400 Received: from d01av04.pok.ibm.com (d01av04.pok.ibm.com [9.56.224.64]) by d01relay04.pok.ibm.com (8.13.8/8.13.8/NCO v8.3) with ESMTP id l562HUG6545736 for ; Tue, 5 Jun 2007 22:17:30 -0400 Received: from d01av04.pok.ibm.com (loopback [127.0.0.1]) by d01av04.pok.ibm.com (8.12.11.20060308/8.13.3) with ESMTP id l562HUu0027167 for ; Tue, 5 Jun 2007 22:17:30 -0400 Received: from [9.47.21.16] (cog.beaverton.ibm.com [9.47.21.16]) by d01av04.pok.ibm.com (8.12.11.20060308/8.12.11) with ESMTP id l562HTkh027139; Tue, 5 Jun 2007 22:17:29 -0400 Subject: [PATCH -rt] Fix TASKLET_STATE_SCHED WARN_ON() From: john stultz To: Ingo Molnar Cc: Thomas Gleixner , Steven Rostedt , "Paul E. McKenney" , lkml Content-Type: text/plain Date: Tue, 05 Jun 2007 19:17:23 -0700 Message-Id: <1181096244.6018.20.camel@localhost> Mime-Version: 1.0 X-Mailer: Evolution 2.10.1 X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Hey Ingo, So we've been seeing the following trace fairly frequently on our SMP boxes when running kernbench: BUG: at kernel/softirq.c:639 __tasklet_action() Call Trace: [] dump_trace+0xaa/0x32a [] show_trace+0x41/0x5c [] dump_stack+0x15/0x17 [] __tasklet_action+0xdf/0x12e [] tasklet_action+0x27/0x29 [] ksoftirqd+0x16c/0x271 [] kthread+0xf5/0x128 [] child_rip+0xa/0x12 Paul also pointed this out awhile back: http://lkml.org/lkml/2007/2/25/1 Anyway, I think I finally found the issue. Its a bit hard to explain, but the idea is while __tasklet_action is running the tasklet function on CPU1, if a call to tasklet_schedule() on CPU2 is made, and if right after we mark the TASKLET_STATE_SCHED bit we are preempted, __tasklet_action on CPU1 might be able to re-run the function, clear the bit and unlock the tasklet before CPU2 enters __tasklet_common_schedule. Once __tasklet_common_schedule locks the tasklet, we will add the tasklet to the list with the TASKLET_STATE_SCHED *unset*. I've verified this race occurs w/ a WARN_ON in __tasklet_common_schedule(). This fix avoids this race by making sure *after* we've locked the tasklet that the STATE_SCHED bit is set before adding it to the list. Does it look ok to you? thanks -john Signed-off-by: John Stultz --- kernel/softirq.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -462,10 +462,17 @@ static void inline __tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) { if (tasklet_trylock(t)) { - WARN_ON(t->next != NULL); - t->next = head->list; - head->list = t; - raise_softirq_irqoff(nr); + /* We may have been preempted before tasklet_trylock + * and __tasklet_action may have already run. + * So double check the sched bit while the takslet + * is locked before adding it to the list. + */ + if (test_bit(TASKLET_STATE_SCHED, &t->state)) { + WARN_ON(t->next != NULL); + t->next = head->list; + head->list = t; + raise_softirq_irqoff(nr); + } tasklet_unlock(t); } } patches/dynticks-rcu-rt-fixlet.patch0000664000077200007720000000136210655544573017115 0ustar mingomingo--- kernel/rcupreempt.c | 11 +++++++++++ 1 file changed, 11 insertions(+) Index: linux/kernel/rcupreempt.c =================================================================== --- linux.orig/kernel/rcupreempt.c +++ linux/kernel/rcupreempt.c @@ -338,6 +338,17 @@ void __synchronize_sched(void) sched_setaffinity(0, oldmask); } +/* + * Check to see if any future RCU-related work will need to be done + * by the current CPU, even if none need be done immediately, returning + * 1 if so. This function is part of the RCU implementation; it is -not- + * an exported member of the RCU API. + */ +int rcu_needs_cpu(int cpu) +{ + return !!rcu_data.waitlist || rcu_pending(cpu); +} + int rcu_pending(int cpu) { return (rcu_data.donelist != NULL || patches/latency-tracing-x86_64.patch0000664000077200007720000001673510655544571016612 0ustar mingomingo arch/x86_64/ia32/ia32entry.S | 11 ++++++++- arch/x86_64/kernel/entry.S | 45 +++++++++++++++++++++++++++++++++++++ arch/x86_64/kernel/head64.c | 3 +- arch/x86_64/kernel/irq.c | 6 +++++ arch/x86_64/kernel/setup64.c | 4 +-- arch/x86_64/kernel/smpboot.c | 2 - arch/x86_64/kernel/traps.c | 1 arch/x86_64/kernel/vsyscall.c | 2 - include/asm-x86_64/calling.h | 50 ++++++++++++++++++++++++++++++++++++++++++ include/asm-x86_64/unistd.h | 2 + 10 files changed, 120 insertions(+), 6 deletions(-) Index: linux/arch/x86_64/ia32/ia32entry.S =================================================================== --- linux.orig/arch/x86_64/ia32/ia32entry.S +++ linux/arch/x86_64/ia32/ia32entry.S @@ -120,7 +120,9 @@ sysenter_do_call: cmpl $(IA32_NR_syscalls-1),%eax ja ia32_badsys IA32_ARG_FIXUP 1 + TRACE_SYS_IA32_CALL call *ia32_sys_call_table(,%rax,8) + TRACE_SYS_RET movq %rax,RAX-ARGOFFSET(%rsp) GET_THREAD_INFO(%r10) cli @@ -229,7 +231,9 @@ cstar_do_call: cmpl $IA32_NR_syscalls-1,%eax ja ia32_badsys IA32_ARG_FIXUP 1 + TRACE_SYS_IA32_CALL call *ia32_sys_call_table(,%rax,8) + TRACE_SYS_RET movq %rax,RAX-ARGOFFSET(%rsp) GET_THREAD_INFO(%r10) cli @@ -323,8 +327,10 @@ ia32_do_syscall: cmpl $(IA32_NR_syscalls-1),%eax ja ia32_badsys IA32_ARG_FIXUP + TRACE_SYS_IA32_CALL call *ia32_sys_call_table(,%rax,8) # xxx: rip relative ia32_sysret: + TRACE_SYS_RET movq %rax,RAX-ARGOFFSET(%rsp) jmp int_ret_from_sys_call @@ -395,7 +401,7 @@ END(ia32_ptregs_common) .section .rodata,"a" .align 8 -ia32_sys_call_table: +ENTRY(ia32_sys_call_table) .quad sys_restart_syscall .quad sys_exit .quad stub32_fork @@ -721,4 +727,7 @@ ia32_sys_call_table: .quad compat_sys_timerfd .quad sys_eventfd .quad sys32_fallocate +#ifdef CONFIG_EVENT_TRACE + .globl ia32_syscall_end +#endif ia32_syscall_end: Index: linux/arch/x86_64/kernel/entry.S =================================================================== --- linux.orig/arch/x86_64/kernel/entry.S +++ linux/arch/x86_64/kernel/entry.S @@ -53,6 +53,47 @@ .code64 +#ifdef CONFIG_EVENT_TRACE + +ENTRY(mcount) + cmpl $0, mcount_enabled + jz out + + push %rbp + mov %rsp,%rbp + + push %r11 + push %r10 + push %r9 + push %r8 + push %rdi + push %rsi + push %rdx + push %rcx + push %rax + + mov 0x0(%rbp),%rax + mov 0x8(%rbp),%rdi + mov 0x8(%rax),%rsi + + call __trace + + pop %rax + pop %rcx + pop %rdx + pop %rsi + pop %rdi + pop %r8 + pop %r9 + pop %r10 + pop %r11 + + pop %rbp +out: + ret + +#endif + #ifndef CONFIG_PREEMPT #define retint_kernel retint_restore_args #endif @@ -234,7 +275,9 @@ ENTRY(system_call) cmpq $__NR_syscall_max,%rax ja badsys movq %r10,%rcx + TRACE_SYS_CALL call *sys_call_table(,%rax,8) # XXX: rip relative + TRACE_SYS_RET movq %rax,RAX-ARGOFFSET(%rsp) /* * Syscall return path ending with SYSRET (fast path) @@ -316,7 +359,9 @@ tracesys: cmova %rcx,%rax ja 1f movq %r10,%rcx /* fixup for C */ + TRACE_SYS_CALL call *sys_call_table(,%rax,8) + TRACE_SYS_RET 1: movq %rax,RAX-ARGOFFSET(%rsp) /* Use IRET because user could have changed frame */ Index: linux/arch/x86_64/kernel/head64.c =================================================================== --- linux.orig/arch/x86_64/kernel/head64.c +++ linux/arch/x86_64/kernel/head64.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -58,7 +59,7 @@ static void __init copy_bootdata(char *r memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); } -void __init x86_64_start_kernel(char * real_mode_data) +void __init notrace x86_64_start_kernel(char * real_mode_data) { int i; Index: linux/arch/x86_64/kernel/irq.c =================================================================== --- linux.orig/arch/x86_64/kernel/irq.c +++ linux/arch/x86_64/kernel/irq.c @@ -117,6 +117,12 @@ asmlinkage unsigned int do_IRQ(struct pt irq_enter(); irq = __get_cpu_var(vector_irq)[vector]; +#ifdef CONFIG_EVENT_TRACE + if (irq == trace_user_trigger_irq) + user_trace_start(); +#endif + trace_special(regs->rip, irq, 0); + #ifdef CONFIG_DEBUG_STACKOVERFLOW stack_overflow_check(regs); #endif Index: linux/arch/x86_64/kernel/setup64.c =================================================================== --- linux.orig/arch/x86_64/kernel/setup64.c +++ linux/arch/x86_64/kernel/setup64.c @@ -114,7 +114,7 @@ void __init setup_per_cpu_areas(void) } } -void pda_init(int cpu) +void notrace pda_init(int cpu) { struct x8664_pda *pda = cpu_pda(cpu); @@ -191,7 +191,7 @@ unsigned long kernel_eflags; * 'CPU state barrier', nothing should get across. * A lot of state is already set up in PDA init. */ -void __cpuinit cpu_init (void) +void __cpuinit notrace cpu_init (void) { int cpu = stack_smp_processor_id(); struct tss_struct *t = &per_cpu(init_tss, cpu); Index: linux/arch/x86_64/kernel/smpboot.c =================================================================== --- linux.orig/arch/x86_64/kernel/smpboot.c +++ linux/arch/x86_64/kernel/smpboot.c @@ -316,7 +316,7 @@ static inline void set_cpu_sibling_map(i /* * Setup code on secondary processor (after comming out of the trampoline) */ -void __cpuinit start_secondary(void) +void __cpuinit notrace start_secondary(void) { /* * Dont put anything before smp_callin(), SMP Index: linux/arch/x86_64/kernel/traps.c =================================================================== --- linux.orig/arch/x86_64/kernel/traps.c +++ linux/arch/x86_64/kernel/traps.c @@ -351,6 +351,7 @@ show_trace(struct task_struct *tsk, stru printk("\nCall Trace:\n"); dump_trace(tsk, regs, stack, &print_trace_ops, NULL); printk("\n"); + print_traces(tsk); } static void Index: linux/arch/x86_64/kernel/vsyscall.c =================================================================== --- linux.orig/arch/x86_64/kernel/vsyscall.c +++ linux/arch/x86_64/kernel/vsyscall.c @@ -44,7 +44,7 @@ #include #include -#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) +#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) notrace #define __syscall_clobber "r11","rcx","memory" #define __pa_vsymbol(x) \ ({unsigned long v; \ Index: linux/include/asm-x86_64/calling.h =================================================================== --- linux.orig/include/asm-x86_64/calling.h +++ linux/include/asm-x86_64/calling.h @@ -160,3 +160,53 @@ .macro icebp .byte 0xf1 .endm + +/* + * latency-tracing helpers: + */ + + .macro TRACE_SYS_CALL + +#ifdef CONFIG_EVENT_TRACE + SAVE_ARGS + + mov %rdx, %rcx + mov %rsi, %rdx + mov %rdi, %rsi + mov %rax, %rdi + + call sys_call + + RESTORE_ARGS +#endif + .endm + + + .macro TRACE_SYS_IA32_CALL + +#ifdef CONFIG_EVENT_TRACE + SAVE_ARGS + + mov %rdx, %rcx + mov %rsi, %rdx + mov %rdi, %rsi + mov %rax, %rdi + + call sys_ia32_call + + RESTORE_ARGS +#endif + .endm + + .macro TRACE_SYS_RET + +#ifdef CONFIG_EVENT_TRACE + SAVE_ARGS + + mov %rax, %rdi + + call sys_ret + + RESTORE_ARGS +#endif + .endm Index: linux/include/asm-x86_64/unistd.h =================================================================== --- linux.orig/include/asm-x86_64/unistd.h +++ linux/include/asm-x86_64/unistd.h @@ -11,6 +11,8 @@ * Note: holes are not allowed. */ +#define NR_syscalls (__NR_syscall_max+1) + /* at least 8 syscall per cacheline */ #define __NR_read 0 __SYSCALL(__NR_read, sys_read) patches/preempt-realtime-input.patch0000664000077200007720000000250310655544575017173 0ustar mingomingo--- drivers/input/gameport/gameport.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/drivers/input/gameport/gameport.c =================================================================== --- linux-rt-rebase.q.orig/drivers/input/gameport/gameport.c +++ linux-rt-rebase.q/drivers/input/gameport/gameport.c @@ -21,6 +21,7 @@ #include #include #include +#include #include /* HZ */ #include #include @@ -102,12 +103,12 @@ static int gameport_measure_speed(struct tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); GET_TIME(t1); for (t = 0; t < 50; t++) gameport_read(gameport); GET_TIME(t2); GET_TIME(t3); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; } @@ -126,11 +127,11 @@ static int gameport_measure_speed(struct tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); rdtscl(t1); for (t = 0; t < 50; t++) gameport_read(gameport); rdtscl(t2); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if (t2 - t1 < tx) tx = t2 - t1; } patches/preempt-irqs-i386-ioapic-mask-quirk.patch0000664000077200007720000001360610655544573021231 0ustar mingomingoFrom mschmidt@redhat.com Thu Jun 21 13:32:02 2007 Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [66.187.233.31]) by mail.tglx.de (Postfix) with ESMTP id CA11565C065 for ; Thu, 21 Jun 2007 13:32:02 +0200 (CEST) Received: from int-mx1.corp.redhat.com (int-mx1.corp.redhat.com [172.16.52.254]) by mx1.redhat.com (8.13.1/8.13.1) with ESMTP id l5LBVoq3016914; Thu, 21 Jun 2007 07:31:50 -0400 Received: from pobox.stuttgart.redhat.com (pobox.stuttgart.redhat.com [172.16.2.10]) by int-mx1.corp.redhat.com (8.13.1/8.13.1) with ESMTP id l5LBVmp0010104; Thu, 21 Jun 2007 07:31:49 -0400 Received: from [10.34.32.84] (brian.englab.brq.redhat.com [10.34.32.84]) by pobox.stuttgart.redhat.com (8.12.11.20060308/8.12.11) with ESMTP id l5LBVl5k000423; Thu, 21 Jun 2007 13:31:47 +0200 Message-ID: <467A61A3.7060804@redhat.com> Date: Thu, 21 Jun 2007 13:31:47 +0200 From: Michal Schmidt User-Agent: Thunderbird 1.5.0.12 (X11/20070529) MIME-Version: 1.0 To: Steven Rostedt CC: Ingo Molnar , Thomas Gleixner , linux-rt-users@vger.kernel.org, linux-kernel@vger.kernel.org Subject: Re: [PATCH -rt] irq nobody cared workaround for i386 References: <4676CF81.2000205@redhat.com> <4677D7AF.7040700@redhat.com> <467932B4.6030800@redhat.com> <467936FE.8050704@redhat.com> In-Reply-To: <467936FE.8050704@redhat.com> X-Enigmail-Version: 0.94.2.0 Content-Type: text/plain; charset=ISO-8859-1 X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Steven Rostedt wrote: > Michal Schmidt wrote: > >> I came to the conclusion that the IO-APICs which need the fix for the >> nobody cared bug don't have the issue ack_ioapic_quirk_irq is designed >> to work-around. It should be safe simply to use the normal >> ack_ioapic_irq as the .eoi method in pcix_ioapic_chip. >> So this is the port of Steven's fix for the nobody cared bug to i386. It >> works fine on IBM LS21 I have access to. >> >> > You want to make that "apic > 0". Note the spacing. If it breaks > 80 characters, then simply put it to a new line. > > [...] > ACK > > -- Steve > OK, I fixed the spacing in both occurences. Signed-off-by: Michal Schmidt --- arch/i386/kernel/io_apic.c | 66 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 57 insertions(+), 9 deletions(-) Index: linux-rt-rebase.q/arch/i386/kernel/io_apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/io_apic.c +++ linux-rt-rebase.q/arch/i386/kernel/io_apic.c @@ -261,6 +261,18 @@ static void __unmask_IO_APIC_irq (unsign __modify_IO_APIC_irq(irq, 0, 0x00010000); } +/* trigger = 0 (edge mode) */ +static void __pcix_mask_IO_APIC_irq (unsigned int irq) +{ + __modify_IO_APIC_irq(irq, 0, 0x00008000); +} + +/* mask = 0, trigger = 1 (level mode) */ +static void __pcix_unmask_IO_APIC_irq (unsigned int irq) +{ + __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000); +} + static void mask_IO_APIC_irq (unsigned int irq) { unsigned long flags; @@ -279,6 +291,24 @@ static void unmask_IO_APIC_irq (unsigned spin_unlock_irqrestore(&ioapic_lock, flags); } +static void pcix_mask_IO_APIC_irq (unsigned int irq) +{ + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + __pcix_mask_IO_APIC_irq(irq); + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void pcix_unmask_IO_APIC_irq (unsigned int irq) +{ + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + __pcix_unmask_IO_APIC_irq(irq); + spin_unlock_irqrestore(&ioapic_lock, flags); +} + static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) { struct IO_APIC_route_entry entry; @@ -1235,22 +1265,27 @@ static int assign_irq_vector(int irq) return vector; } + static struct irq_chip ioapic_chip; +static struct irq_chip pcix_ioapic_chip; #define IOAPIC_AUTO -1 #define IOAPIC_EDGE 0 #define IOAPIC_LEVEL 1 -static void ioapic_register_intr(int irq, int vector, unsigned long trigger) +static void ioapic_register_intr(int irq, int vector, unsigned long trigger, + int pcix) { + struct irq_chip *chip = pcix ? &pcix_ioapic_chip : &ioapic_chip; + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || trigger == IOAPIC_LEVEL) - set_irq_chip_and_handler_name(irq, &ioapic_chip, - handle_fasteoi_irq, "fasteoi"); - else { - set_irq_chip_and_handler_name(irq, &ioapic_chip, - handle_edge_irq, "edge"); - } + set_irq_chip_and_handler_name(irq, chip, handle_fasteoi_irq, + pcix ? "pcix-fasteoi" : "fasteoi"); + else + set_irq_chip_and_handler_name(irq, chip, handle_edge_irq, + pcix ? "pcix-edge" : "edge"); + set_intr_gate(vector, interrupt[irq]); } @@ -1314,7 +1349,8 @@ static void __init setup_IO_APIC_irqs(vo if (IO_APIC_IRQ(irq)) { vector = assign_irq_vector(irq); entry.vector = vector; - ioapic_register_intr(irq, vector, IOAPIC_AUTO); + ioapic_register_intr(irq, vector, IOAPIC_AUTO, + apic > 0); if (!apic && (irq < 16)) disable_8259A_irq(irq); @@ -2006,6 +2042,18 @@ static struct irq_chip ioapic_chip __rea .retrigger = ioapic_retrigger_irq, }; +static struct irq_chip pcix_ioapic_chip __read_mostly = { + .name = "IO-APIC", + .startup = startup_ioapic_irq, + .mask = pcix_mask_IO_APIC_irq, + .unmask = pcix_unmask_IO_APIC_irq, + .ack = ack_ioapic_irq, + .eoi = ack_ioapic_irq, +#ifdef CONFIG_SMP + .set_affinity = set_ioapic_affinity_irq, +#endif + .retrigger = ioapic_retrigger_irq, +}; static inline void init_IO_APIC_traps(void) { @@ -2806,7 +2854,7 @@ int io_apic_set_pci_routing (int ioapic, mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, edge_level, active_high_low); - ioapic_register_intr(irq, entry.vector, edge_level); + ioapic_register_intr(irq, entry.vector, edge_level, ioapic > 0); if (!ioapic && (irq < 16)) disable_8259A_irq(irq); patches/x86_64-preparatory-apic-set-lvtt.patch0000664000077200007720000000451010655544570020553 0ustar mingomingoSubject: x86_64: prepare apic code for clock events Change __setup_APIC_LVTT so it takes the arguments which are necessary for the later clock events switch. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -760,14 +760,14 @@ void __init init_apic_mappings(void) #define APIC_DIVISOR 16 -static void __setup_APIC_LVTT(unsigned int clocks) +static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) { unsigned int lvtt_value, tmp_value; - int cpu = smp_processor_id(); - - lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; - if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) + lvtt_value = LOCAL_TIMER_VECTOR; + if (!oneshot) + lvtt_value |= APIC_LVT_TIMER_PERIODIC; + if (!irqen) lvtt_value |= APIC_LVT_MASKED; apic_write(APIC_LVTT, lvtt_value); @@ -780,12 +780,14 @@ static void __setup_APIC_LVTT(unsigned i & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | APIC_TDR_DIV_16); - apic_write(APIC_TMICT, clocks/APIC_DIVISOR); + if (!oneshot) + apic_write(APIC_TMICT, clocks/APIC_DIVISOR); } static void setup_APIC_timer(unsigned int clocks) { unsigned long flags; + int irqen; local_irq_save(flags); @@ -808,7 +810,10 @@ static void setup_APIC_timer(unsigned in c2 |= inb_p(0x40) << 8; } while (c2 - c1 < 300); } - __setup_APIC_LVTT(clocks); + + irqen = ! cpu_isset(smp_processor_id(), + timer_interrupt_broadcast_ipi_mask); + __setup_APIC_LVTT(clocks, 0, irqen); /* Turn off PIT interrupt if we use APIC timer as main timer. Only works with the PM timer right now TBD fix it for HPET too. */ @@ -846,8 +851,10 @@ static int __init calibrate_APIC_clock(v * Put whatever arbitrary (but long enough) timeout * value into the APIC clock, we just want to get the * counter running for calibration. + * + * No interrupt enable ! */ - __setup_APIC_LVTT(4000000000); + __setup_APIC_LVTT(4000000000, 0, 0); apic_start = apic_read(APIC_TMCCT); #ifdef CONFIG_X86_PM_TIMER patches/ppc-remove-broken-vsyscall.patch0000664000077200007720000005762510655544572017767 0ustar mingomingoFrom sshtylyov@ru.mvista.com Wed May 16 20:55:24 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from imap.sh.mvista.com (unknown [63.81.120.155]) by mail.tglx.de (Postfix) with ESMTP id A9FD665C065 for ; Wed, 16 May 2007 20:55:24 +0200 (CEST) Received: from wasted.dev.rtsoft.ru (unknown [10.150.0.9]) by imap.sh.mvista.com (Postfix) with ESMTP id A97873EC9; Wed, 16 May 2007 11:55:18 -0700 (PDT) From: Sergei Shtylyov (by way of Sergei Shtylyov ) Organization: MontaVista Software Inc. Subject: [PATCH 2.6.21-rt2] PowerPC: remove broken vsyscall code Date: Wed, 16 May 2007 21:56:51 +0300 User-Agent: KMail/1.5 MIME-Version: 1.0 Content-Disposition: inline To: tglx@linutronix.de, mingo@elte.hu Cc: linux-kernel@vger.kernel.org, johnstul@us.ibm.com Content-Type: text/plain; charset="iso-8859-1" Message-Id: <200705162256.51722.sshtylyov@ru.mvista.com> X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Remove PowerPC vsyscalls that were broken by the generic TOD patch. Signed-off-by: Sergei Shtylyov --- Since there's still no working PowerPC TOD vsyscalls fix, and they continue to be broken in the RT patch, I've respun this patch again... arch/powerpc/kernel/asm-offsets.c | 15 - arch/powerpc/kernel/smp.c | 2 arch/powerpc/kernel/vdso32/Makefile | 2 arch/powerpc/kernel/vdso32/datapage.S | 18 - arch/powerpc/kernel/vdso32/gettimeofday.S | 324 ------------------------------ arch/powerpc/kernel/vdso32/vdso32.lds.S | 4 arch/powerpc/kernel/vdso64/Makefile | 2 arch/powerpc/kernel/vdso64/datapage.S | 18 - arch/powerpc/kernel/vdso64/gettimeofday.S | 255 ----------------------- arch/powerpc/kernel/vdso64/vdso64.lds.S | 4 include/asm-powerpc/time.h | 20 - include/asm-powerpc/vdso_datapage.h | 14 - 12 files changed, 2 insertions(+), 676 deletions(-) Index: linux/arch/powerpc/kernel/asm-offsets.c =================================================================== --- linux.orig/arch/powerpc/kernel/asm-offsets.c +++ linux/arch/powerpc/kernel/asm-offsets.c @@ -273,16 +273,7 @@ int main(void) #endif /* ! CONFIG_PPC64 */ /* datapage offsets for use by vdso */ - DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); - DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); - DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); - DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec)); - DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); - DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); - DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); - DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); - DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); #ifdef CONFIG_PPC64 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); @@ -303,12 +294,6 @@ int main(void) DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); - /* Other bits used by the vdso */ - DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); - DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); - DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); - DEFINE(CLOCK_REALTIME_RES, TICK_NSEC); - #ifdef CONFIG_BUG DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); #endif Index: linux/arch/powerpc/kernel/smp.c =================================================================== --- linux.orig/arch/powerpc/kernel/smp.c +++ linux/arch/powerpc/kernel/smp.c @@ -333,8 +333,6 @@ void smp_call_function_interrupt(void) } } -extern struct gettimeofday_struct do_gtod; - struct thread_info *current_set[NR_CPUS]; DECLARE_PER_CPU(unsigned int, pvr); Index: linux/arch/powerpc/kernel/vdso32/Makefile =================================================================== --- linux.orig/arch/powerpc/kernel/vdso32/Makefile +++ linux/arch/powerpc/kernel/vdso32/Makefile @@ -1,7 +1,7 @@ # List of files in the vdso, has to be asm only for now -obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o +obj-vdso32 = sigtramp.o datapage.o cacheflush.o note.o # Build rules Index: linux/arch/powerpc/kernel/vdso32/datapage.S =================================================================== --- linux.orig/arch/powerpc/kernel/vdso32/datapage.S +++ linux/arch/powerpc/kernel/vdso32/datapage.S @@ -65,21 +65,3 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_ma blr .cfi_endproc V_FUNCTION_END(__kernel_get_syscall_map) - -/* - * void unsigned long long __kernel_get_tbfreq(void); - * - * returns the timebase frequency in HZ - */ -V_FUNCTION_BEGIN(__kernel_get_tbfreq) - .cfi_startproc - mflr r12 - .cfi_register lr,r12 - bl __get_datapage@local - lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3) - lwz r3,CFG_TB_TICKS_PER_SEC(r3) - mtlr r12 - crclr cr0*4+so - blr - .cfi_endproc -V_FUNCTION_END(__kernel_get_tbfreq) Index: linux/arch/powerpc/kernel/vdso32/gettimeofday.S =================================================================== --- linux.orig/arch/powerpc/kernel/vdso32/gettimeofday.S +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Userland implementation of gettimeofday() for 32 bits processes in a - * ppc64 kernel for use in the vDSO - * - * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org, - * IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include -#include -#include - - .text -/* - * Exact prototype of gettimeofday - * - * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); - * - */ -V_FUNCTION_BEGIN(__kernel_gettimeofday) - .cfi_startproc - mflr r12 - .cfi_register lr,r12 - - mr r10,r3 /* r10 saves tv */ - mr r11,r4 /* r11 saves tz */ - bl __get_datapage@local /* get data page */ - mr r9, r3 /* datapage ptr in r9 */ - cmplwi r10,0 /* check if tv is NULL */ - beq 3f - bl __do_get_xsec@local /* get xsec from tb & kernel */ - bne- 2f /* out of line -> do syscall */ - - /* seconds are xsec >> 20 */ - rlwinm r5,r4,12,20,31 - rlwimi r5,r3,12,0,19 - stw r5,TVAL32_TV_SEC(r10) - - /* get remaining xsec and convert to usec. we scale - * up remaining xsec by 12 bits and get the top 32 bits - * of the multiplication - */ - rlwinm r5,r4,12,0,19 - lis r6,1000000@h - ori r6,r6,1000000@l - mulhwu r5,r5,r6 - stw r5,TVAL32_TV_USEC(r10) - -3: cmplwi r11,0 /* check if tz is NULL */ - beq 1f - lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */ - lwz r5,CFG_TZ_DSTTIME(r9) - stw r4,TZONE_TZ_MINWEST(r11) - stw r5,TZONE_TZ_DSTTIME(r11) - -1: mtlr r12 - crclr cr0*4+so - li r3,0 - blr - -2: - mtlr r12 - mr r3,r10 - mr r4,r11 - li r0,__NR_gettimeofday - sc - blr - .cfi_endproc -V_FUNCTION_END(__kernel_gettimeofday) - -/* - * Exact prototype of clock_gettime() - * - * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); - * - */ -V_FUNCTION_BEGIN(__kernel_clock_gettime) - .cfi_startproc - /* Check for supported clock IDs */ - cmpli cr0,r3,CLOCK_REALTIME - cmpli cr1,r3,CLOCK_MONOTONIC - cror cr0*4+eq,cr0*4+eq,cr1*4+eq - bne cr0,99f - - mflr r12 /* r12 saves lr */ - .cfi_register lr,r12 - mr r10,r3 /* r10 saves id */ - mr r11,r4 /* r11 saves tp */ - bl __get_datapage@local /* get data page */ - mr r9,r3 /* datapage ptr in r9 */ - beq cr1,50f /* if monotonic -> jump there */ - - /* - * CLOCK_REALTIME - */ - - bl __do_get_xsec@local /* get xsec from tb & kernel */ - bne- 98f /* out of line -> do syscall */ - - /* seconds are xsec >> 20 */ - rlwinm r5,r4,12,20,31 - rlwimi r5,r3,12,0,19 - stw r5,TSPC32_TV_SEC(r11) - - /* get remaining xsec and convert to nsec. we scale - * up remaining xsec by 12 bits and get the top 32 bits - * of the multiplication, then we multiply by 1000 - */ - rlwinm r5,r4,12,0,19 - lis r6,1000000@h - ori r6,r6,1000000@l - mulhwu r5,r5,r6 - mulli r5,r5,1000 - stw r5,TSPC32_TV_NSEC(r11) - mtlr r12 - crclr cr0*4+so - li r3,0 - blr - - /* - * CLOCK_MONOTONIC - */ - -50: bl __do_get_xsec@local /* get xsec from tb & kernel */ - bne- 98f /* out of line -> do syscall */ - - /* seconds are xsec >> 20 */ - rlwinm r6,r4,12,20,31 - rlwimi r6,r3,12,0,19 - - /* get remaining xsec and convert to nsec. we scale - * up remaining xsec by 12 bits and get the top 32 bits - * of the multiplication, then we multiply by 1000 - */ - rlwinm r7,r4,12,0,19 - lis r5,1000000@h - ori r5,r5,1000000@l - mulhwu r7,r7,r5 - mulli r7,r7,1000 - - /* now we must fixup using wall to monotonic. We need to snapshot - * that value and do the counter trick again. Fortunately, we still - * have the counter value in r8 that was returned by __do_get_xsec. - * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5 - * can be used - */ - - lwz r3,WTOM_CLOCK_SEC(r9) - lwz r4,WTOM_CLOCK_NSEC(r9) - - /* We now have our result in r3,r4. We create a fake dependency - * on that result and re-check the counter - */ - or r5,r4,r3 - xor r0,r5,r5 - add r9,r9,r0 -#ifdef CONFIG_PPC64 - lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) -#else - lwz r0,(CFG_TB_UPDATE_COUNT)(r9) -#endif - cmpl cr0,r8,r0 /* check if updated */ - bne- 50b - - /* Calculate and store result. Note that this mimmics the C code, - * which may cause funny results if nsec goes negative... is that - * possible at all ? - */ - add r3,r3,r6 - add r4,r4,r7 - lis r5,NSEC_PER_SEC@h - ori r5,r5,NSEC_PER_SEC@l - cmpl cr0,r4,r5 - cmpli cr1,r4,0 - blt 1f - subf r4,r5,r4 - addi r3,r3,1 -1: bge cr1,1f - addi r3,r3,-1 - add r4,r4,r5 -1: stw r3,TSPC32_TV_SEC(r11) - stw r4,TSPC32_TV_NSEC(r11) - - mtlr r12 - crclr cr0*4+so - li r3,0 - blr - - /* - * syscall fallback - */ -98: - mtlr r12 - mr r3,r10 - mr r4,r11 -99: - li r0,__NR_clock_gettime - sc - blr - .cfi_endproc -V_FUNCTION_END(__kernel_clock_gettime) - - -/* - * Exact prototype of clock_getres() - * - * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); - * - */ -V_FUNCTION_BEGIN(__kernel_clock_getres) - .cfi_startproc - /* Check for supported clock IDs */ - cmpwi cr0,r3,CLOCK_REALTIME - cmpwi cr1,r3,CLOCK_MONOTONIC - cror cr0*4+eq,cr0*4+eq,cr1*4+eq - bne cr0,99f - - li r3,0 - cmpli cr0,r4,0 - crclr cr0*4+so - beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l - stw r3,TSPC32_TV_SEC(r4) - stw r5,TSPC32_TV_NSEC(r4) - blr - - /* - * syscall fallback - */ -99: - li r0,__NR_clock_getres - sc - blr - .cfi_endproc -V_FUNCTION_END(__kernel_clock_getres) - - -/* - * This is the core of gettimeofday() & friends, it returns the xsec - * value in r3 & r4 and expects the datapage ptr (non clobbered) - * in r9. clobbers r0,r4,r5,r6,r7,r8. - * When returning, r8 contains the counter value that can be reused - * by the monotonic clock implementation - */ -__do_get_xsec: - .cfi_startproc - /* Check for update count & load values. We use the low - * order 32 bits of the update count - */ -#ifdef CONFIG_PPC64 -1: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9) -#else -1: lwz r8,(CFG_TB_UPDATE_COUNT)(r9) -#endif - andi. r0,r8,1 /* pending update ? loop */ - bne- 1b - xor r0,r8,r8 /* create dependency */ - add r9,r9,r0 - - /* Load orig stamp (offset to TB) */ - lwz r5,CFG_TB_ORIG_STAMP(r9) - lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) - - /* Get a stable TB value */ -2: mftbu r3 - mftbl r4 - mftbu r0 - cmpl cr0,r3,r0 - bne- 2b - - /* Substract tb orig stamp. If the high part is non-zero, we jump to - * the slow path which call the syscall. - * If it's ok, then we have our 32 bits tb_ticks value in r7 - */ - subfc r7,r6,r4 - subfe. r0,r5,r3 - bne- 3f - - /* Load scale factor & do multiplication */ - lwz r5,CFG_TB_TO_XS(r9) /* load values */ - lwz r6,(CFG_TB_TO_XS+4)(r9) - mulhwu r4,r7,r5 - mulhwu r6,r7,r6 - mullw r0,r7,r5 - addc r6,r6,r0 - - /* At this point, we have the scaled xsec value in r4 + XER:CA - * we load & add the stamp since epoch - */ - lwz r5,CFG_STAMP_XSEC(r9) - lwz r6,(CFG_STAMP_XSEC+4)(r9) - adde r4,r4,r6 - addze r3,r5 - - /* We now have our result in r3,r4. We create a fake dependency - * on that result and re-check the counter - */ - or r6,r4,r3 - xor r0,r6,r6 - add r9,r9,r0 -#ifdef CONFIG_PPC64 - lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) -#else - lwz r0,(CFG_TB_UPDATE_COUNT)(r9) -#endif - cmpl cr0,r8,r0 /* check if updated */ - bne- 1b - - /* Warning ! The caller expects CR:EQ to be set to indicate a - * successful calculation (so it won't fallback to the syscall - * method). We have overriden that CR bit in the counter check, - * but fortunately, the loop exit condition _is_ CR:EQ set, so - * we can exit safely here. If you change this code, be careful - * of that side effect. - */ -3: blr - .cfi_endproc Index: linux/arch/powerpc/kernel/vdso32/vdso32.lds.S =================================================================== --- linux.orig/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ linux/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -117,10 +117,6 @@ VERSION global: __kernel_datapage_offset; /* Has to be there for the kernel to find */ __kernel_get_syscall_map; - __kernel_gettimeofday; - __kernel_clock_gettime; - __kernel_clock_getres; - __kernel_get_tbfreq; __kernel_sync_dicache; __kernel_sync_dicache_p5; __kernel_sigtramp32; Index: linux/arch/powerpc/kernel/vdso64/Makefile =================================================================== --- linux.orig/arch/powerpc/kernel/vdso64/Makefile +++ linux/arch/powerpc/kernel/vdso64/Makefile @@ -1,6 +1,6 @@ # List of files in the vdso, has to be asm only for now -obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o +obj-vdso64 = sigtramp.o datapage.o cacheflush.o note.o # Build rules Index: linux/arch/powerpc/kernel/vdso64/datapage.S =================================================================== --- linux.orig/arch/powerpc/kernel/vdso64/datapage.S +++ linux/arch/powerpc/kernel/vdso64/datapage.S @@ -65,21 +65,3 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_ma blr .cfi_endproc V_FUNCTION_END(__kernel_get_syscall_map) - - -/* - * void unsigned long __kernel_get_tbfreq(void); - * - * returns the timebase frequency in HZ - */ -V_FUNCTION_BEGIN(__kernel_get_tbfreq) - .cfi_startproc - mflr r12 - .cfi_register lr,r12 - bl V_LOCAL_FUNC(__get_datapage) - ld r3,CFG_TB_TICKS_PER_SEC(r3) - mtlr r12 - crclr cr0*4+so - blr - .cfi_endproc -V_FUNCTION_END(__kernel_get_tbfreq) Index: linux/arch/powerpc/kernel/vdso64/gettimeofday.S =================================================================== --- linux.orig/arch/powerpc/kernel/vdso64/gettimeofday.S +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Userland implementation of gettimeofday() for 64 bits processes in a - * ppc64 kernel for use in the vDSO - * - * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), - * IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include -#include -#include - - .text -/* - * Exact prototype of gettimeofday - * - * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); - * - */ -V_FUNCTION_BEGIN(__kernel_gettimeofday) - .cfi_startproc - mflr r12 - .cfi_register lr,r12 - - mr r11,r3 /* r11 holds tv */ - mr r10,r4 /* r10 holds tz */ - bl V_LOCAL_FUNC(__get_datapage) /* get data page */ - cmpldi r11,0 /* check if tv is NULL */ - beq 2f - bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ - lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ - ori r7,r7,16960 - rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ - rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ - std r5,TVAL64_TV_SEC(r11) /* store sec in tv */ - subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ - mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / - * XSEC_PER_SEC - */ - rldicl r0,r0,44,20 - std r0,TVAL64_TV_USEC(r11) /* store usec in tv */ -2: cmpldi r10,0 /* check if tz is NULL */ - beq 1f - lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */ - lwz r5,CFG_TZ_DSTTIME(r3) - stw r4,TZONE_TZ_MINWEST(r10) - stw r5,TZONE_TZ_DSTTIME(r10) -1: mtlr r12 - crclr cr0*4+so - li r3,0 /* always success */ - blr - .cfi_endproc -V_FUNCTION_END(__kernel_gettimeofday) - - -/* - * Exact prototype of clock_gettime() - * - * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); - * - */ -V_FUNCTION_BEGIN(__kernel_clock_gettime) - .cfi_startproc - /* Check for supported clock IDs */ - cmpwi cr0,r3,CLOCK_REALTIME - cmpwi cr1,r3,CLOCK_MONOTONIC - cror cr0*4+eq,cr0*4+eq,cr1*4+eq - bne cr0,99f - - mflr r12 /* r12 saves lr */ - .cfi_register lr,r12 - mr r10,r3 /* r10 saves id */ - mr r11,r4 /* r11 saves tp */ - bl V_LOCAL_FUNC(__get_datapage) /* get data page */ - beq cr1,50f /* if monotonic -> jump there */ - - /* - * CLOCK_REALTIME - */ - - bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ - - lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ - ori r7,r7,16960 - rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ - rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ - std r5,TSPC64_TV_SEC(r11) /* store sec in tv */ - subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ - mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / - * XSEC_PER_SEC - */ - rldicl r0,r0,44,20 - mulli r0,r0,1000 /* nsec = usec * 1000 */ - std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */ - - mtlr r12 - crclr cr0*4+so - li r3,0 - blr - - /* - * CLOCK_MONOTONIC - */ - -50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ - - lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ - ori r7,r7,16960 - rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ - rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ - subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ - mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / - * XSEC_PER_SEC - */ - rldicl r6,r0,44,20 - mulli r6,r6,1000 /* nsec = usec * 1000 */ - - /* now we must fixup using wall to monotonic. We need to snapshot - * that value and do the counter trick again. Fortunately, we still - * have the counter value in r8 that was returned by __do_get_xsec. - * At this point, r5,r6 contain our sec/nsec values. - * can be used - */ - - lwa r4,WTOM_CLOCK_SEC(r3) - lwa r7,WTOM_CLOCK_NSEC(r3) - - /* We now have our result in r4,r7. We create a fake dependency - * on that result and re-check the counter - */ - or r9,r4,r7 - xor r0,r9,r9 - add r3,r3,r0 - ld r0,CFG_TB_UPDATE_COUNT(r3) - cmpld cr0,r0,r8 /* check if updated */ - bne- 50b - - /* Calculate and store result. Note that this mimmics the C code, - * which may cause funny results if nsec goes negative... is that - * possible at all ? - */ - add r4,r4,r5 - add r7,r7,r6 - lis r9,NSEC_PER_SEC@h - ori r9,r9,NSEC_PER_SEC@l - cmpl cr0,r7,r9 - cmpli cr1,r7,0 - blt 1f - subf r7,r9,r7 - addi r4,r4,1 -1: bge cr1,1f - addi r4,r4,-1 - add r7,r7,r9 -1: std r4,TSPC64_TV_SEC(r11) - std r7,TSPC64_TV_NSEC(r11) - - mtlr r12 - crclr cr0*4+so - li r3,0 - blr - - /* - * syscall fallback - */ -98: - mtlr r12 - mr r3,r10 - mr r4,r11 -99: - li r0,__NR_clock_gettime - sc - blr - .cfi_endproc -V_FUNCTION_END(__kernel_clock_gettime) - - -/* - * Exact prototype of clock_getres() - * - * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); - * - */ -V_FUNCTION_BEGIN(__kernel_clock_getres) - .cfi_startproc - /* Check for supported clock IDs */ - cmpwi cr0,r3,CLOCK_REALTIME - cmpwi cr1,r3,CLOCK_MONOTONIC - cror cr0*4+eq,cr0*4+eq,cr1*4+eq - bne cr0,99f - - li r3,0 - cmpli cr0,r4,0 - crclr cr0*4+so - beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l - std r3,TSPC64_TV_SEC(r4) - std r5,TSPC64_TV_NSEC(r4) - blr - - /* - * syscall fallback - */ -99: - li r0,__NR_clock_getres - sc - blr - .cfi_endproc -V_FUNCTION_END(__kernel_clock_getres) - - -/* - * This is the core of gettimeofday(), it returns the xsec - * value in r4 and expects the datapage ptr (non clobbered) - * in r3. clobbers r0,r4,r5,r6,r7,r8 - * When returning, r8 contains the counter value that can be reused - */ -V_FUNCTION_BEGIN(__do_get_xsec) - .cfi_startproc - /* check for update count & load values */ -1: ld r8,CFG_TB_UPDATE_COUNT(r3) - andi. r0,r8,1 /* pending update ? loop */ - bne- 1b - xor r0,r8,r8 /* create dependency */ - add r3,r3,r0 - - /* Get TB & offset it. We use the MFTB macro which will generate - * workaround code for Cell. - */ - MFTB(r7) - ld r9,CFG_TB_ORIG_STAMP(r3) - subf r7,r9,r7 - - /* Scale result */ - ld r5,CFG_TB_TO_XS(r3) - mulhdu r7,r7,r5 - - /* Add stamp since epoch */ - ld r6,CFG_STAMP_XSEC(r3) - add r4,r6,r7 - - xor r0,r4,r4 - add r3,r3,r0 - ld r0,CFG_TB_UPDATE_COUNT(r3) - cmpld cr0,r0,r8 /* check if updated */ - bne- 1b - blr - .cfi_endproc -V_FUNCTION_END(__do_get_xsec) Index: linux/arch/powerpc/kernel/vdso64/vdso64.lds.S =================================================================== --- linux.orig/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ linux/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -115,10 +115,6 @@ VERSION global: __kernel_datapage_offset; /* Has to be there for the kernel to find */ __kernel_get_syscall_map; - __kernel_gettimeofday; - __kernel_clock_gettime; - __kernel_clock_getres; - __kernel_get_tbfreq; __kernel_sync_dicache; __kernel_sync_dicache_p5; __kernel_sigtramp_rt64; Index: linux/include/asm-powerpc/time.h =================================================================== --- linux.orig/include/asm-powerpc/time.h +++ linux/include/asm-powerpc/time.h @@ -47,26 +47,6 @@ extern unsigned long ppc_proc_freq; extern unsigned long ppc_tb_freq; #define DEFAULT_TB_FREQ 125000000UL -/* - * By putting all of this stuff into a single struct we - * reduce the number of cache lines touched by do_gettimeofday. - * Both by collecting all of the data in one cache line and - * by touching only one TOC entry on ppc64. - */ -struct gettimeofday_vars { - u64 tb_to_xs; - u64 stamp_xsec; - u64 tb_orig_stamp; -}; - -struct gettimeofday_struct { - unsigned long tb_ticks_per_sec; - struct gettimeofday_vars vars[2]; - struct gettimeofday_vars * volatile varp; - unsigned var_idx; - unsigned tb_to_us; -}; - struct div_result { u64 result_high; u64 result_low; Index: linux/include/asm-powerpc/vdso_datapage.h =================================================================== --- linux.orig/include/asm-powerpc/vdso_datapage.h +++ linux/include/asm-powerpc/vdso_datapage.h @@ -74,11 +74,6 @@ struct vdso_data { __u32 icache_size; /* L1 i-cache size 0x68 */ __u32 icache_line_size; /* L1 i-cache line size 0x6C */ - /* those additional ones don't have to be located anywhere - * special as they were not part of the original systemcfg - */ - __s32 wtom_clock_sec; /* Wall to monotonic clock */ - __s32 wtom_clock_nsec; __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; @@ -89,15 +84,6 @@ struct vdso_data { * And here is the simpler 32 bits version */ struct vdso_data { - __u64 tb_orig_stamp; /* Timebase at boot 0x30 */ - __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ - __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */ - __u64 stamp_xsec; /* 0x48 */ - __u32 tb_update_count; /* Timebase atomicity ctr 0x50 */ - __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */ - __u32 tz_dsttime; /* Type of dst correction 0x5C */ - __s32 wtom_clock_sec; /* Wall to monotonic clock */ - __s32 wtom_clock_nsec; __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; patches/lockdep-more-entries.patch0000664000077200007720000000130410655544572016605 0ustar mingomingo--- kernel/lockdep_internals.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux/kernel/lockdep_internals.h =================================================================== --- linux.orig/kernel/lockdep_internals.h +++ linux/kernel/lockdep_internals.h @@ -15,12 +15,12 @@ * table (if it's not there yet), and we check it for lock order * conflicts and deadlocks. */ -#define MAX_LOCKDEP_ENTRIES 8192UL +#define MAX_LOCKDEP_ENTRIES 16384UL #define MAX_LOCKDEP_KEYS_BITS 11 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) -#define MAX_LOCKDEP_CHAINS_BITS 14 +#define MAX_LOCKDEP_CHAINS_BITS 15 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) /* patches/x86_64-apic-shuffle-calibration-around.patch0000664000077200007720000000413610655544570021636 0ustar mingomingoSubject: x86_64: Move apic calibration code around Let the calibration code fill in calibration_result directly and move the variable on top of the file. Fixup a printk w/o log level while at it. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -56,6 +56,8 @@ static struct resource lapic_resource = .flags = IORESOURCE_MEM | IORESOURCE_BUSY, }; +static unsigned int calibration_result; + /* * cpu_mask that denotes the CPUs that needs timer interrupt coming in as * IPIs in place of local APIC timers @@ -822,7 +824,7 @@ static void setup_APIC_timer(unsigned in #define TICK_COUNT 100000000 -static int __init calibrate_APIC_clock(void) +static void __init calibrate_APIC_clock(void) { unsigned apic, apic_start; unsigned long tsc, tsc_start; @@ -856,17 +858,14 @@ static int __init calibrate_APIC_clock(v result = (apic_start - apic) * 1000L * tsc_khz / (tsc - tsc_start); } - printk("result %d\n", result); - + printk(KERN_DEBUG "APIC timer calibration result %d\n", result); printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", result / 1000 / 1000, result / 1000 % 1000); - return result * APIC_DIVISOR / HZ; + calibration_result = result * APIC_DIVISOR / HZ; } -static unsigned int calibration_result; - void __init setup_boot_APIC_clock (void) { if (disable_apic_timer) { @@ -879,7 +878,7 @@ void __init setup_boot_APIC_clock (void) local_irq_disable(); - calibration_result = calibrate_APIC_clock(); + calibrate_APIC_clock(); /* * Now set up the timer for real. */ @@ -986,8 +985,6 @@ void setup_APIC_extended_lvt(unsigned ch apic_write(reg, v); } -#undef APIC_DIVISOR - /* * Local timer interrupt handler. It does both profiling and * process statistics/rescheduling. patches/reset-latency-histogram.patch0000664000077200007720000000546710655544572017345 0ustar mingomingoSubject: Latency tracer: Reset histogram when preempt_max_latency was reset From: Carsten Emde When the histogram mode is active, it is not possible to reset the histogram for a second one. Reset it, when preempt_max_latency was reset. Signed-off-by: Carsten Emde --- include/linux/latency_hist.h | 1 + kernel/latency_hist.c | 28 ++++++++++++++++++++++++++++ kernel/latency_trace.c | 15 +++++++++++++++ 3 files changed, 44 insertions(+) Index: linux/include/linux/latency_hist.h =================================================================== --- linux.orig/include/linux/latency_hist.h +++ linux/include/linux/latency_hist.h @@ -23,6 +23,7 @@ enum { #ifdef CONFIG_LATENCY_HIST extern void latency_hist(int latency_type, int cpu, unsigned long latency); +extern void latency_hist_reset(void); # define latency_hist_flag 1 #else # define latency_hist(a,b,c) do { (void)(cpu); } while (0) Index: linux/kernel/latency_hist.c =================================================================== --- linux.orig/kernel/latency_hist.c +++ linux/kernel/latency_hist.c @@ -265,3 +265,31 @@ static __init int latency_hist_init(void __initcall(latency_hist_init); + +#ifdef CONFIG_WAKEUP_LATENCY_HIST +static void hist_reset(hist_data_t *hist) +{ + atomic_dec(&hist->hist_mode); + + memset(hist->hist_array, 0, sizeof(hist->hist_array)); + hist->beyond_hist_bound_samples = 0UL; + hist->min_lat = 0xFFFFFFFFUL; + hist->max_lat = 0UL; + hist->total_samples = 0UL; + hist->accumulate_lat = 0UL; + hist->avg_lat = 0UL; + + atomic_inc(&hist->hist_mode); +} + +void latency_hist_reset(void) +{ + int cpu; + hist_data_t *hist; + + for_each_online_cpu(cpu) { + hist = &per_cpu(wakeup_latency_hist, cpu); + hist_reset(hist); + } +} +#endif Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -2180,6 +2180,8 @@ void notrace trace_preempt_exit_idle(voi */ #ifdef CONFIG_WAKEUP_TIMING +unsigned long last_preempt_max_latency; + static void notrace check_wakeup_timing(struct cpu_trace *tr, unsigned long parent_eip, unsigned long *flags) @@ -2207,6 +2209,19 @@ check_wakeup_timing(struct cpu_trace *tr if (!report_latency(delta)) goto out; +#ifdef CONFIG_WAKEUP_LATENCY_HIST + /* + * Was preempt_max_latency reset? + * If so, we reinitialize the latency histograms to keep them in sync. + * + * FIXME: Remove the poll and write our own procfs handler, so + * we can trigger on the write to preempt_max_latency + */ + if (last_preempt_max_latency > 0 && preempt_max_latency == 0) + latency_hist_reset(); + last_preempt_max_latency = preempt_max_latency; +#endif + ____trace(smp_processor_id(), TRACE_FN, tr, CALLER_ADDR0, parent_eip, 0, 0, 0, *flags); patches/gcc-warnings-shut-up.patch0000664000077200007720000000651010655544571016545 0ustar mingomingo fs/isofs/namei.c | 2 +- fs/jffs2/erase.c | 2 +- fs/nfsd/nfsctl.c | 2 +- kernel/audit.c | 2 +- net/core/flow.c | 2 +- net/sunrpc/svc.c | 2 +- sound/core/control_compat.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) Index: linux/fs/isofs/namei.c =================================================================== --- linux.orig/fs/isofs/namei.c +++ linux/fs/isofs/namei.c @@ -158,7 +158,7 @@ isofs_find_entry(struct inode *dir, stru struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { int found; - unsigned long block, offset; + unsigned long block = 0, offset = 0 /* avoid stupid gcc warning */; struct inode *inode; struct page *page; Index: linux/fs/jffs2/erase.c =================================================================== --- linux.orig/fs/jffs2/erase.c +++ linux/fs/jffs2/erase.c @@ -362,7 +362,7 @@ static void jffs2_mark_erased_block(stru { size_t retlen; int ret; - uint32_t bad_offset; + uint32_t bad_offset = 0 /* shut up gcc */; switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { case -EAGAIN: goto refile; Index: linux/fs/nfsd/nfsctl.c =================================================================== --- linux.orig/fs/nfsd/nfsctl.c +++ linux/fs/nfsd/nfsctl.c @@ -298,7 +298,7 @@ static ssize_t write_filehandle(struct f * qword quoting is used, so filehandle will be \x.... */ char *dname, *path; - int maxsize; + int maxsize = 0; char *mesg = buf; int len; struct auth_domain *dom; Index: linux/kernel/audit.c =================================================================== --- linux.orig/kernel/audit.c +++ linux/kernel/audit.c @@ -1051,7 +1051,7 @@ struct audit_buffer *audit_log_start(str { struct audit_buffer *ab = NULL; struct timespec t; - unsigned int serial; + unsigned int serial = 0 /* shut up gcc */; int reserve; unsigned long timeout_start = jiffies; Index: linux/net/core/flow.c =================================================================== --- linux.orig/net/core/flow.c +++ linux/net/core/flow.c @@ -172,7 +172,7 @@ static int flow_key_compare(struct flowi void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, flow_resolve_t resolver) { - struct flow_cache_entry *fle, **head; + struct flow_cache_entry *fle, **head = NULL /* shut up GCC */; unsigned int hash; int cpu; Index: linux/net/sunrpc/svc.c =================================================================== --- linux.orig/net/sunrpc/svc.c +++ linux/net/sunrpc/svc.c @@ -547,7 +547,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_rqst *rqstp; int error = -ENOMEM; int have_oldmask = 0; - cpumask_t oldmask; + cpumask_t oldmask = CPU_MASK_NONE /* shut up GCC */; rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); if (!rqstp) Index: linux/sound/core/control_compat.c =================================================================== --- linux.orig/sound/core/control_compat.c +++ linux/sound/core/control_compat.c @@ -219,7 +219,7 @@ static int copy_ctl_value_from_user(stru struct snd_ctl_elem_value32 __user *data32, int *typep, int *countp) { - int i, type, count, size; + int i, type, count = 0 /* shut up gcc warning */, size; unsigned int indirect; if (copy_from_user(&data->id, &data32->id, sizeof(data->id))) patches/latency-tracing-remove-trace-array.patch0000664000077200007720000000250010655544571021342 0ustar mingomingo--- kernel/sched.c | 38 -------------------------------------- 1 file changed, 38 deletions(-) Index: linux/kernel/sched.c =================================================================== --- linux.orig/kernel/sched.c +++ linux/kernel/sched.c @@ -3325,42 +3325,6 @@ void scheduler_tick(void) #endif } -#if defined(CONFIG_EVENT_TRACE) && defined(CONFIG_DEBUG_RT_MUTEXES) - -static void trace_array(struct prio_array *array) -{ - int i; - struct task_struct *p; - struct list_head *head, *tmp; - - for (i = 0; i < MAX_RT_PRIO; i++) { - head = array->queue + i; - if (list_empty(head)) { - WARN_ON(test_bit(i, array->bitmap)); - continue; - } - WARN_ON(!test_bit(i, array->bitmap)); - list_for_each(tmp, head) { - p = list_entry(tmp, struct task_struct, run_list); - trace_special_pid(p->pid, p->prio, PRIO(p)); - } - } -} - -static inline void trace_all_runnable_tasks(struct rq *rq) -{ - if (trace_enabled) - trace_array(&rq->active); -} - -#else - -static inline void trace_all_runnable_tasks(struct rq *rq) -{ -} - -#endif - /* * Print scheduling while atomic bug: */ @@ -3468,8 +3432,6 @@ need_resched_nonpreemptible: prev->sched_class->put_prev_task(rq, prev, now); next = pick_next_task(rq, prev, now); - trace_all_runnable_tasks(rq); - sched_info_switch(prev, next); if (likely(prev != next)) { patches/x86_64-apic-calibration-remove-divisor.patch0000664000077200007720000000325610655544570021670 0ustar mingomingoSubject: x86_64: Remove APIC_DIVISOR APIC_DIVISOR is rather useless. It makes the calibration result more accurate in the first place, but we discard this later when we write the value to the APIC timer by dividing the calibration value by APIC_DIVISOR. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -760,8 +760,6 @@ void __init init_apic_mappings(void) * P5 APIC double write bug. */ -#define APIC_DIVISOR 16 - static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) { unsigned int lvtt_value, tmp_value; @@ -783,7 +781,7 @@ static void __setup_APIC_LVTT(unsigned i | APIC_TDR_DIV_16); if (!oneshot) - apic_write(APIC_TMICT, clocks/APIC_DIVISOR); + apic_write(APIC_TMICT, clocks); } static void setup_APIC_timer(unsigned int clocks) @@ -836,7 +834,7 @@ static void __init calibrate_APIC_clock( * * No interrupt enable ! */ - __setup_APIC_LVTT(4000000000, 0, 0); + __setup_APIC_LVTT(250000000, 0, 0); apic_start = apic_read(APIC_TMCCT); #ifdef CONFIG_X86_PM_TIMER @@ -863,7 +861,7 @@ static void __init calibrate_APIC_clock( printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", result / 1000 / 1000, result / 1000 % 1000); - calibration_result = result * APIC_DIVISOR / HZ; + calibration_result = result / HZ; } void __init setup_boot_APIC_clock (void) patches/rt-mutex-compat-semaphores.patch0000664000077200007720000003224110655544574017775 0ustar mingomingo drivers/acpi/osl.c | 12 ++++++------ drivers/media/dvb/dvb-core/dvb_frontend.c | 2 +- drivers/media/dvb/dvb-core/dvb_frontend.h | 2 +- drivers/net/3c527.c | 2 +- drivers/net/hamradio/6pack.c | 2 +- drivers/net/hamradio/mkiss.c | 2 +- drivers/net/plip.c | 5 ++++- drivers/net/ppp_async.c | 2 +- drivers/net/ppp_synctty.c | 2 +- drivers/pci/hotplug/cpqphp_ctrl.c | 4 ++-- drivers/pci/hotplug/ibmphp_hpc.c | 2 +- drivers/scsi/aacraid/aacraid.h | 4 ++-- drivers/scsi/qla2xxx/qla_def.h | 2 +- drivers/usb/storage/usb.h | 2 +- fs/jffs2/jffs2_fs_i.h | 2 +- fs/xfs/linux-2.6/sema.h | 9 +++++++-- fs/xfs/linux-2.6/xfs_buf.h | 4 ++-- include/linux/parport.h | 2 +- 18 files changed, 35 insertions(+), 27 deletions(-) Index: linux-rt-rebase.q/drivers/acpi/osl.c =================================================================== --- linux-rt-rebase.q.orig/drivers/acpi/osl.c +++ linux-rt-rebase.q/drivers/acpi/osl.c @@ -739,13 +739,13 @@ void acpi_os_delete_lock(acpi_spinlock h acpi_status acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) { - struct semaphore *sem = NULL; + struct compat_semaphore *sem = NULL; - sem = acpi_os_allocate(sizeof(struct semaphore)); + sem = acpi_os_allocate(sizeof(struct compat_semaphore)); if (!sem) return AE_NO_MEMORY; - memset(sem, 0, sizeof(struct semaphore)); + memset(sem, 0, sizeof(struct compat_semaphore)); sema_init(sem, initial_units); @@ -768,7 +768,7 @@ EXPORT_SYMBOL(acpi_os_create_semaphore); acpi_status acpi_os_delete_semaphore(acpi_handle handle) { - struct semaphore *sem = (struct semaphore *)handle; + struct compat_semaphore *sem = (struct compat_semaphore *)handle; if (!sem) @@ -796,7 +796,7 @@ EXPORT_SYMBOL(acpi_os_delete_semaphore); acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) { acpi_status status = AE_OK; - struct semaphore *sem = (struct semaphore *)handle; + struct compat_semaphore *sem = (struct compat_semaphore *)handle; int ret = 0; @@ -883,7 +883,7 @@ EXPORT_SYMBOL(acpi_os_wait_semaphore); */ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) { - struct semaphore *sem = (struct semaphore *)handle; + struct compat_semaphore *sem = (struct compat_semaphore *)handle; if (!sem || (units < 1)) Index: linux-rt-rebase.q/drivers/media/dvb/dvb-core/dvb_frontend.c =================================================================== --- linux-rt-rebase.q.orig/drivers/media/dvb/dvb-core/dvb_frontend.c +++ linux-rt-rebase.q/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -98,7 +98,7 @@ struct dvb_frontend_private { struct dvb_device *dvbdev; struct dvb_frontend_parameters parameters; struct dvb_fe_events events; - struct semaphore sem; + struct compat_semaphore sem; struct list_head list_head; wait_queue_head_t wait_queue; struct task_struct *thread; Index: linux-rt-rebase.q/drivers/media/dvb/dvb-core/dvb_frontend.h =================================================================== --- linux-rt-rebase.q.orig/drivers/media/dvb/dvb-core/dvb_frontend.h +++ linux-rt-rebase.q/drivers/media/dvb/dvb-core/dvb_frontend.h @@ -142,7 +142,7 @@ struct dvb_fe_events { int eventr; int overflow; wait_queue_head_t wait_queue; - struct semaphore sem; + struct compat_semaphore sem; }; struct dvb_frontend { Index: linux-rt-rebase.q/drivers/net/3c527.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/3c527.c +++ linux-rt-rebase.q/drivers/net/3c527.c @@ -182,7 +182,7 @@ struct mc32_local u16 rx_ring_tail; /* index to rx de-queue end */ - struct semaphore cmd_mutex; /* Serialises issuing of execute commands */ + struct compat_semaphore cmd_mutex; /* Serialises issuing of execute commands */ struct completion execution_cmd; /* Card has completed an execute command */ struct completion xceiver_cmd; /* Card has completed a tx or rx command */ }; Index: linux-rt-rebase.q/drivers/net/hamradio/6pack.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/hamradio/6pack.c +++ linux-rt-rebase.q/drivers/net/hamradio/6pack.c @@ -123,7 +123,7 @@ struct sixpack { struct timer_list tx_t; struct timer_list resync_t; atomic_t refcnt; - struct semaphore dead_sem; + struct compat_semaphore dead_sem; spinlock_t lock; }; Index: linux-rt-rebase.q/drivers/net/hamradio/mkiss.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/hamradio/mkiss.c +++ linux-rt-rebase.q/drivers/net/hamradio/mkiss.c @@ -84,7 +84,7 @@ struct mkiss { #define CRC_MODE_SMACK_TEST 4 atomic_t refcnt; - struct semaphore dead_sem; + struct compat_semaphore dead_sem; }; /*---------------------------------------------------------------------------*/ Index: linux-rt-rebase.q/drivers/net/plip.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/plip.c +++ linux-rt-rebase.q/drivers/net/plip.c @@ -228,7 +228,10 @@ struct net_local { struct hh_cache *hh); spinlock_t lock; atomic_t kill_timer; - struct semaphore killed_timer_sem; + /* + * PREEMPT_RT: this isnt a mutex, it should be struct completion. + */ + struct compat_semaphore killed_timer_sem; }; static inline void enable_parport_interrupts (struct net_device *dev) Index: linux-rt-rebase.q/drivers/net/ppp_async.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/ppp_async.c +++ linux-rt-rebase.q/drivers/net/ppp_async.c @@ -67,7 +67,7 @@ struct asyncppp { struct tasklet_struct tsk; atomic_t refcnt; - struct semaphore dead_sem; + struct compat_semaphore dead_sem; struct ppp_channel chan; /* interface to generic ppp layer */ unsigned char obuf[OBUFSIZE]; }; Index: linux-rt-rebase.q/drivers/net/ppp_synctty.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/ppp_synctty.c +++ linux-rt-rebase.q/drivers/net/ppp_synctty.c @@ -70,7 +70,7 @@ struct syncppp { struct tasklet_struct tsk; atomic_t refcnt; - struct semaphore dead_sem; + struct compat_semaphore dead_sem; struct ppp_channel chan; /* interface to generic ppp layer */ }; Index: linux-rt-rebase.q/drivers/pci/hotplug/cpqphp_ctrl.c =================================================================== --- linux-rt-rebase.q.orig/drivers/pci/hotplug/cpqphp_ctrl.c +++ linux-rt-rebase.q/drivers/pci/hotplug/cpqphp_ctrl.c @@ -45,8 +45,8 @@ static int configure_new_function(struct u8 behind_bridge, struct resource_lists *resources); static void interrupt_event_handler(struct controller *ctrl); -static struct semaphore event_semaphore; /* mutex for process loop (up if something to process) */ -static struct semaphore event_exit; /* guard ensure thread has exited before calling it quits */ +static struct compat_semaphore event_semaphore; /* mutex for process loop (up if something to process) */ +static struct compat_semaphore event_exit; /* guard ensure thread has exited before calling it quits */ static int event_finished; static unsigned long pushbutton_pending; /* = 0 */ Index: linux-rt-rebase.q/drivers/pci/hotplug/ibmphp_hpc.c =================================================================== --- linux-rt-rebase.q.orig/drivers/pci/hotplug/ibmphp_hpc.c +++ linux-rt-rebase.q/drivers/pci/hotplug/ibmphp_hpc.c @@ -106,7 +106,7 @@ static int tid_poll; static struct mutex sem_hpcaccess; // lock access to HPC static struct semaphore semOperations; // lock all operations and // access to data structures -static struct semaphore sem_exit; // make sure polling thread goes away +static struct compat_semaphore sem_exit; // make sure polling thread goes away //---------------------------------------------------------------------------- // local function prototypes //---------------------------------------------------------------------------- Index: linux-rt-rebase.q/drivers/scsi/aacraid/aacraid.h =================================================================== --- linux-rt-rebase.q.orig/drivers/scsi/aacraid/aacraid.h +++ linux-rt-rebase.q/drivers/scsi/aacraid/aacraid.h @@ -715,7 +715,7 @@ struct aac_fib_context { u32 unique; // unique value representing this context ulong jiffies; // used for cleanup - dmb changed to ulong struct list_head next; // used to link context's into a linked list - struct semaphore wait_sem; // this is used to wait for the next fib to arrive. + struct compat_semaphore wait_sem; // this is used to wait for the next fib to arrive. int wait; // Set to true when thread is in WaitForSingleObject unsigned long count; // total number of FIBs on FibList struct list_head fib_list; // this holds fibs and their attachd hw_fibs @@ -785,7 +785,7 @@ struct fib { * This is the event the sendfib routine will wait on if the * caller did not pass one and this is synch io. */ - struct semaphore event_wait; + struct compat_semaphore event_wait; spinlock_t event_lock; u32 done; /* gets set to 1 when fib is complete */ Index: linux-rt-rebase.q/drivers/scsi/qla2xxx/qla_def.h =================================================================== --- linux-rt-rebase.q.orig/drivers/scsi/qla2xxx/qla_def.h +++ linux-rt-rebase.q/drivers/scsi/qla2xxx/qla_def.h @@ -2417,7 +2417,7 @@ typedef struct scsi_qla_host { struct semaphore mbx_cmd_sem; /* Serialialize mbx access */ struct semaphore vport_sem; /* Virtual port synchronization */ - struct semaphore mbx_intr_sem; /* Used for completion notification */ + struct compat_semaphore mbx_intr_sem; /* Used for completion notification */ uint32_t mbx_flags; #define MBX_IN_PROGRESS BIT_0 Index: linux-rt-rebase.q/drivers/usb/storage/usb.h =================================================================== --- linux-rt-rebase.q.orig/drivers/usb/storage/usb.h +++ linux-rt-rebase.q/drivers/usb/storage/usb.h @@ -147,7 +147,7 @@ struct us_data { struct task_struct *ctl_thread; /* the control thread */ /* mutual exclusion and synchronization structures */ - struct semaphore sema; /* to sleep thread on */ + struct compat_semaphore sema; /* to sleep thread on */ struct completion notify; /* thread begin/end */ wait_queue_head_t delay_wait; /* wait during scan, reset */ Index: linux-rt-rebase.q/fs/jffs2/jffs2_fs_i.h =================================================================== --- linux-rt-rebase.q.orig/fs/jffs2/jffs2_fs_i.h +++ linux-rt-rebase.q/fs/jffs2/jffs2_fs_i.h @@ -24,7 +24,7 @@ struct jffs2_inode_info { before letting GC proceed. Or we'd have to put ugliness into the GC code so it didn't attempt to obtain the i_mutex for the inode(s) which are already locked */ - struct semaphore sem; + struct compat_semaphore sem; /* The highest (datanode) version number used for this ino */ uint32_t highest_version; Index: linux-rt-rebase.q/fs/xfs/linux-2.6/sema.h =================================================================== --- linux-rt-rebase.q.orig/fs/xfs/linux-2.6/sema.h +++ linux-rt-rebase.q/fs/xfs/linux-2.6/sema.h @@ -27,7 +27,7 @@ * sema_t structure just maps to struct semaphore in Linux kernel. */ -typedef struct semaphore sema_t; +typedef struct compat_semaphore sema_t; #define initnsema(sp, val, name) sema_init(sp, val) #define psema(sp, b) down(sp) @@ -36,7 +36,12 @@ typedef struct semaphore sema_t; static inline int issemalocked(sema_t *sp) { - return down_trylock(sp) || (up(sp), 0); + int rv; + + if ((rv = down_trylock(sp))) + return (rv); + up(sp); + return (0); } /* Index: linux-rt-rebase.q/fs/xfs/linux-2.6/xfs_buf.h =================================================================== --- linux-rt-rebase.q.orig/fs/xfs/linux-2.6/xfs_buf.h +++ linux-rt-rebase.q/fs/xfs/linux-2.6/xfs_buf.h @@ -118,7 +118,7 @@ typedef int (*xfs_buf_bdstrat_t)(struct #define XB_PAGES 2 typedef struct xfs_buf { - struct semaphore b_sema; /* semaphore for lockables */ + struct compat_semaphore b_sema; /* semaphore for lockables */ unsigned long b_queuetime; /* time buffer was queued */ atomic_t b_pin_count; /* pin count */ wait_queue_head_t b_waiters; /* unpin waiters */ @@ -138,7 +138,7 @@ typedef struct xfs_buf { xfs_buf_iodone_t b_iodone; /* I/O completion function */ xfs_buf_relse_t b_relse; /* releasing function */ xfs_buf_bdstrat_t b_strat; /* pre-write function */ - struct semaphore b_iodonesema; /* Semaphore for I/O waiters */ + struct compat_semaphore b_iodonesema; /* Semaphore for I/O waiters */ void *b_fspriv; void *b_fspriv2; void *b_fspriv3; Index: linux-rt-rebase.q/include/linux/parport.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/parport.h +++ linux-rt-rebase.q/include/linux/parport.h @@ -265,7 +265,7 @@ enum ieee1284_phase { struct ieee1284_info { int mode; volatile enum ieee1284_phase phase; - struct semaphore irq; + struct compat_semaphore irq; }; /* A parallel port */ patches/fix-migrating-softirq.patch0000664000077200007720000001117410655544576017021 0ustar mingomingoFrom rostedt@goodmis.org Wed Jun 13 14:47:26 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from ms-smtp-02.nyroc.rr.com (ms-smtp-02.nyroc.rr.com [24.24.2.56]) by mail.tglx.de (Postfix) with ESMTP id AB7B665C3D9 for ; Wed, 13 Jun 2007 14:47:26 +0200 (CEST) Received: from [192.168.23.10] (cpe-24-94-51-176.stny.res.rr.com [24.94.51.176]) by ms-smtp-02.nyroc.rr.com (8.13.6/8.13.6) with ESMTP id l5DClGVg022890; Wed, 13 Jun 2007 08:47:17 -0400 (EDT) Subject: [PATCH RT] fix migrating softirq [cause of network hang] From: Steven Rostedt To: Ingo Molnar Cc: LKML , RT , Thomas Gleixner , john stultz Content-Type: text/plain Date: Wed, 13 Jun 2007 08:47:16 -0400 Message-Id: <1181738836.10408.54.camel@localhost.localdomain> Mime-Version: 1.0 X-Mailer: Evolution 2.6.3 X-Virus-Scanned: Symantec AntiVirus Scan Engine X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Softirqs are bound to a single CPU. That is to say, that once a softirq function starts to run, it will stay on the CPU that it is running on while it's running. In RT, softirqs are threads, and we have a softirq thread per cpu. Each softirq thread is bound to a single CPU that it represents. In order to speed things up and lower context switches in RT, if a softirq thread is of the same priority as an interrupt thread, then when the interrupt thread is about to exit, it tests to see if any softirq threads need to be run on that cpu. Instead of running the softirq thread, it simply performs the functions for the softirq within the interrupt thread. The problem is, nothing prevents the interrupt thread from migrating. So while the interrupt thread is running the softirq function, it may migrate to another CPU in the middle of that function. This means that any CPU data that the softirq is touching can be corrupted. I was experiencing a network hang that sometimes would come back, and sometimes not. Using my logdev debugger, I started to debug this problem. I came across this at the moment of the hang: [ 389.131279] cpu:0 (IRQ-11:427) tcp_rcv_established:4056 rcv_nxt=-1665585797 [ 389.131615] cpu:1 192.168.23.72:22 <== 192.168.23.60:41352 ack:2629381499 seq:1773074099 (----A-) len:0 win:790 end_seq:1773074099 [ 389.131626] cpu:1 (IRQ-11:427) ip_finish_output2:187 dst->hh=ffff81003b213080 [ 389.131635] cpu:1 (IRQ-11:427) ip_finish_output2:189 hh_output=ffffffff80429009 Here we see IRQ-11 in the process of finishing up the softirq-net-tx function. In the middle of it, we receive a packet, and that must have pushed the interrupt thread over to CPU 1, and it finished up the softirq there. This patch temporarily binds the hardirq thread on the CPU that it runs the softirqs on. With this patch I have not seen my network hang. I ran it over night, doing compiles and such, and it seems fine. I would be able to cause the hang with various loads within a minute, now I can't cause it after several minutes. I'm assuming that this fix may fix other bugs too. Signed-off-by: Steven Rostedt --- kernel/irq/manage.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) Index: linux-rt-rebase.q/kernel/irq/manage.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/manage.c +++ linux-rt-rebase.q/kernel/irq/manage.c @@ -763,7 +763,15 @@ static int do_irqd(void * __desc) struct irq_desc *desc = __desc; #ifdef CONFIG_SMP - set_cpus_allowed(current, desc->affinity); + cpumask_t cpus_allowed, mask; + + cpus_allowed = desc->affinity; + /* + * Restrict it to one cpu so we avoid being migrated inside of + * do_softirq_from_hardirq() + */ + mask = cpumask_of_cpu(first_cpu(desc->affinity)); + set_cpus_allowed(current, mask); #endif current->flags |= PF_NOFREEZE | PF_HARDIRQ; @@ -787,8 +795,16 @@ static int do_irqd(void * __desc) /* * Did IRQ affinities change? */ - if (!cpus_equal(current->cpus_allowed, desc->affinity)) - set_cpus_allowed(current, desc->affinity); + if (!cpus_equal(cpus_allowed, desc->affinity)) { + cpus_allowed = desc->affinity; + /* + * Restrict it to one cpu so we avoid being + * migrated inside of + * do_softirq_from_hardirq() + */ + mask = cpumask_of_cpu(first_cpu(desc->affinity)); + set_cpus_allowed(current, mask); + } #endif schedule(); } patches/new-softirq-code.patch0000664000077200007720000002317510655544576015761 0ustar mingomingoSubject: [patch] softirq preemption: optimization From: Ingo Molnar optimize softirq preemption by allowing a hardirq context to pick up softirq processing. Signed-off-by: Ingo Molnar --- kernel/irq/manage.c | 19 +----- kernel/softirq.c | 160 ++++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 131 insertions(+), 48 deletions(-) Index: linux-rt-rebase.q/kernel/irq/manage.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/manage.c +++ linux-rt-rebase.q/kernel/irq/manage.c @@ -694,7 +694,6 @@ static void thread_edge_irq(irq_desc_t * desc->status &= ~IRQ_PENDING; spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); - cond_resched_hardirq_context(); spin_lock_irq(&desc->lock); if (!noirqdebug) note_interrupt(irq, desc, action_ret); @@ -723,7 +722,6 @@ static void thread_do_irq(irq_desc_t *de desc->status &= ~IRQ_PENDING; spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); - cond_resched_hardirq_context(); spin_lock_irq(&desc->lock); if (!noirqdebug) note_interrupt(irq, desc, action_ret); @@ -759,8 +757,6 @@ static void do_hardirq(struct irq_desc * wake_up(&desc->wait_for_handler); } -extern asmlinkage void __do_softirq(void); - static int do_irqd(void * __desc) { struct sched_param param = { 0, }; @@ -780,16 +776,13 @@ static int do_irqd(void * __desc) while (!kthread_should_stop()) { local_irq_disable_nort(); - set_current_state(TASK_INTERRUPTIBLE); -#ifndef CONFIG_PREEMPT_RT - irq_enter(); -#endif - do_hardirq(desc); -#ifndef CONFIG_PREEMPT_RT - irq_exit(); -#endif + do { + set_current_state(TASK_INTERRUPTIBLE); + do_hardirq(desc); + do_softirq_from_hardirq(); + } while (current->state == TASK_RUNNING); + local_irq_enable_nort(); - cond_resched(); #ifdef CONFIG_SMP /* * Did IRQ affinities change? Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -101,8 +101,26 @@ static void wakeup_softirqd(int softirq) /* Interrupts are disabled: no need to stop preemption */ struct task_struct *tsk = __get_cpu_var(ksoftirqd)[softirq].tsk; - if (tsk && tsk->state != TASK_RUNNING) - wake_up_process(tsk); + if (unlikely(!tsk)) + return; +#if defined(CONFIG_PREEMPT_SOFTIRQS) && defined(CONFIG_PREEMPT_HARDIRQS) + /* + * Optimization: if we are in a hardirq thread context, and + * if the priority of the softirq thread is the same as the + * priority of the hardirq thread, then 'merge' softirq + * processing into the hardirq context. (it will later on + * execute softirqs via do_softirq_from_hardirq()). + * So here we can skip the wakeup and can rely on the hardirq + * context processing it later on. + */ + if ((current->flags & PF_HARDIRQ) && !hardirq_count() && + (tsk->normal_prio == current->normal_prio)) + return; +#endif + /* + * Wake up the softirq task: + */ + wake_up_process(tsk); } /* @@ -251,50 +269,100 @@ EXPORT_SYMBOL(local_bh_enable_ip); * we want to handle softirqs as soon as possible, but they * should not be able to lock up the box. */ -#define MAX_SOFTIRQ_RESTART 10 +#define MAX_SOFTIRQ_RESTART 20 + +static DEFINE_PER_CPU(u32, softirq_running); -asmlinkage void ___do_softirq(void) +static void ___do_softirq(const int same_prio_only) { + int max_restart = MAX_SOFTIRQ_RESTART, max_loops = MAX_SOFTIRQ_RESTART; + __u32 pending, available_mask, same_prio_skipped; struct softirq_action *h; - __u32 pending; - int max_restart = MAX_SOFTIRQ_RESTART; - int cpu; + struct task_struct *tsk; + int cpu, softirq; pending = local_softirq_pending(); account_system_vtime(current); cpu = smp_processor_id(); restart: + available_mask = -1; + softirq = 0; + same_prio_skipped = 0; /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - local_irq_enable(); - h = softirq_vec; do { + u32 softirq_mask = 1 << softirq; + if (pending & 1) { - { - u32 preempt_count = preempt_count(); - h->action(h); - if (preempt_count != preempt_count()) { - print_symbol("BUG: softirq exited %s with wrong preemption count!\n", (unsigned long) h->action); - printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count()); - preempt_count() = preempt_count; + u32 preempt_count = preempt_count(); + +#if defined(CONFIG_PREEMPT_SOFTIRQS) && defined(CONFIG_PREEMPT_HARDIRQS) + /* + * If executed by a same-prio hardirq thread + * then skip pending softirqs that belong + * to softirq threads with different priority: + */ + if (same_prio_only) { + tsk = __get_cpu_var(ksoftirqd)[softirq].tsk; + if (tsk && tsk->normal_prio != + current->normal_prio) { + same_prio_skipped |= softirq_mask; + available_mask &= ~softirq_mask; + goto next; } } +#endif + /* + * Is this softirq already being processed? + */ + if (per_cpu(softirq_running, cpu) & softirq_mask) { + available_mask &= ~softirq_mask; + goto next; + } + per_cpu(softirq_running, cpu) |= softirq_mask; + local_irq_enable(); + + h->action(h); + if (preempt_count != preempt_count()) { + print_symbol("BUG: softirq exited %s with wrong preemption count!\n", (unsigned long) h->action); + printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count()); + preempt_count() = preempt_count; + } rcu_bh_qsctr_inc(cpu); cond_resched_softirq_context(); + local_irq_disable(); + per_cpu(softirq_running, cpu) &= ~softirq_mask; } +next: h++; + softirq++; pending >>= 1; } while (pending); - local_irq_disable(); - + or_softirq_pending(same_prio_skipped); pending = local_softirq_pending(); - if (pending && --max_restart) - goto restart; + if (pending & available_mask) { + if (--max_restart) + goto restart; + /* + * With softirq threading there's no reason not to + * finish the workload we have: + */ +#ifdef CONFIG_PREEMPT_SOFTIRQS + if (--max_loops) { + if (printk_ratelimit()) + printk("INFO: softirq overload: %08x\n", pending); + max_restart = MAX_SOFTIRQ_RESTART; + goto restart; + } + if (printk_ratelimit()) + printk("BUG: softirq loop! %08x\n", pending); +#endif + } if (pending) trigger_softirqs(); @@ -322,7 +390,7 @@ asmlinkage void __do_softirq(void) p_flags = current->flags & PF_HARDIRQ; current->flags &= ~PF_HARDIRQ; - ___do_softirq(); + ___do_softirq(0); trace_softirq_exit(); @@ -346,20 +414,29 @@ void do_softirq_from_hardirq(void) if (!local_softirq_pending()) return; /* - * 'immediate' softirq execution: + * 'immediate' softirq execution, from hardirq context: */ + local_irq_disable(); __local_bh_disable((unsigned long)__builtin_return_address(0)); +#ifndef CONFIG_PREEMPT_SOFTIRQS + trace_softirq_enter(); +#endif p_flags = current->flags & PF_HARDIRQ; current->flags &= ~PF_HARDIRQ; + current->flags |= PF_SOFTIRQ; - ___do_softirq(); + ___do_softirq(1); +#ifndef CONFIG_PREEMPT_SOFTIRQS trace_softirq_exit(); - +#endif account_system_vtime(current); - _local_bh_enable(); current->flags |= p_flags; + current->flags &= ~PF_SOFTIRQ; + + _local_bh_enable(); + local_irq_enable(); } #ifndef __ARCH_HAS_DO_SOFTIRQ @@ -693,8 +770,9 @@ static int ksoftirqd(void * __data) { struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2 }; struct softirqdata *data = __data; - u32 mask = (1 << data->nr); + u32 softirq_mask = (1 << data->nr); struct softirq_action *h; + int cpu = data->cpu; #ifdef CONFIG_PREEMPT_SOFTIRQS init_waitqueue_head(&data->wait); @@ -706,7 +784,8 @@ static int ksoftirqd(void * __data) while (!kthread_should_stop()) { preempt_disable(); - if (!(local_softirq_pending() & mask)) { + if (!(local_softirq_pending() & softirq_mask)) { +sleep_more: __preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -718,16 +797,26 @@ static int ksoftirqd(void * __data) data->running = 1; #endif - while (local_softirq_pending() & mask) { + while (local_softirq_pending() & softirq_mask) { /* Preempt disable stops cpu going offline. If already offline, we'll be on wrong CPU: don't process */ - if (cpu_is_offline(data->cpu)) + if (cpu_is_offline(cpu)) goto wait_to_die; local_irq_disable(); + /* + * Is the softirq already being executed by + * a hardirq context? + */ + if (per_cpu(softirq_running, cpu) & softirq_mask) { + local_irq_enable(); + set_current_state(TASK_INTERRUPTIBLE); + goto sleep_more; + } + per_cpu(softirq_running, cpu) |= softirq_mask; __preempt_enable_no_resched(); - set_softirq_pending(local_softirq_pending() & ~mask); + set_softirq_pending(local_softirq_pending() & ~softirq_mask); local_bh_disable(); local_irq_enable(); @@ -737,6 +826,7 @@ static int ksoftirqd(void * __data) rcu_bh_qsctr_inc(data->cpu); local_irq_disable(); + per_cpu(softirq_running, cpu) &= ~softirq_mask; _local_bh_enable(); local_irq_enable(); @@ -879,19 +969,19 @@ static int __cpuinit cpu_callback(struct } #endif case CPU_DEAD: - case CPU_DEAD_FROZEN: { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - - sched_setscheduler(p, SCHED_FIFO, ¶m); + case CPU_DEAD_FROZEN: for (i = 0; i < MAX_SOFTIRQ; i++) { + struct sched_param param; + + param.sched_priority = MAX_RT_PRIO-1; p = per_cpu(ksoftirqd, hotcpu)[i].tsk; + sched_setscheduler(p, SCHED_FIFO, ¶m); per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; kthread_stop(p); } takeover_tasklets(hotcpu); break; #endif /* CONFIG_HOTPLUG_CPU */ - } } return NOTIFY_OK; } patches/rt-mutex-irq-flags-checking.patch0000664000077200007720000000462710655544573020012 0ustar mingomingo--- include/linux/irqflags.h | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) Index: linux-rt-rebase.q/include/linux/irqflags.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/irqflags.h +++ linux-rt-rebase.q/include/linux/irqflags.h @@ -11,6 +11,12 @@ #ifndef _LINUX_TRACE_IRQFLAGS_H #define _LINUX_TRACE_IRQFLAGS_H +#define BUILD_CHECK_IRQ_FLAGS(flags) \ + do { \ + BUILD_BUG_ON(sizeof(flags) != sizeof(unsigned long)); \ + typecheck(unsigned long, flags); \ + } while (0) + #ifdef CONFIG_TRACE_IRQFLAGS extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); @@ -59,10 +65,15 @@ #define local_irq_disable() \ do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) #define local_irq_save(flags) \ - do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0) + do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + raw_local_irq_save(flags); \ + trace_hardirqs_off(); \ + } while (0) #define local_irq_restore(flags) \ do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ if (raw_irqs_disabled_flags(flags)) { \ raw_local_irq_restore(flags); \ trace_hardirqs_off(); \ @@ -78,8 +89,16 @@ */ # define raw_local_irq_disable() local_irq_disable() # define raw_local_irq_enable() local_irq_enable() -# define raw_local_irq_save(flags) local_irq_save(flags) -# define raw_local_irq_restore(flags) local_irq_restore(flags) +# define raw_local_irq_save(flags) \ + do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + local_irq_save(flags); \ + } while (0) +# define raw_local_irq_restore(flags) \ + do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + local_irq_restore(flags); \ + } while (0) #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT @@ -89,7 +108,11 @@ raw_safe_halt(); \ } while (0) -#define local_save_flags(flags) raw_local_save_flags(flags) +#define local_save_flags(flags) \ + do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + raw_local_save_flags(flags); \ + } while (0) #define irqs_disabled() \ ({ \ @@ -99,7 +122,11 @@ raw_irqs_disabled_flags(flags); \ }) -#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) +#define irqs_disabled_flags(flags) \ +({ \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + raw_irqs_disabled_flags(flags); \ +}) #endif /* CONFIG_X86 */ #endif patches/mm-concurrent-pagecache.patch0000664000077200007720000003717610655544576017270 0ustar mingomingoSubject: mm: concurrent pagecache write side Remove the tree_lock, change address_space::nrpages to atomic_long_t because its not protected any longer and use the concurrent radix tree API to protect the modifying radix tree operations. The tree_lock is actually renamed to priv_lock and its only remaining user will be the __flush_dcache_page logic on arm an parisc. Another potential user would be the per address_space node mask allocation Christoph is working on. [ BUG: the NFS client code seems to rely on mapping->tree_lock in some hidden way, which makes it crash... ] Signed-off-by: Peter Zijlstra --- fs/buffer.c | 7 ++++--- fs/inode.c | 2 +- include/asm-arm/cacheflush.h | 4 ++-- include/asm-parisc/cacheflush.h | 4 ++-- include/linux/fs.h | 12 ++++++------ mm/filemap.c | 17 +++++++++-------- mm/migrate.c | 12 ++++++------ mm/page-writeback.c | 39 +++++++++++++++++++++++---------------- mm/swap_state.c | 18 ++++++++++-------- mm/swapfile.c | 2 -- mm/truncate.c | 3 --- mm/vmscan.c | 4 ---- 12 files changed, 63 insertions(+), 61 deletions(-) Index: linux-rt-rebase.q/fs/buffer.c =================================================================== --- linux-rt-rebase.q.orig/fs/buffer.c +++ linux-rt-rebase.q/fs/buffer.c @@ -685,18 +685,19 @@ static int __set_page_dirty(struct page return 0; lock_page_ref_irq(page); - spin_lock(&mapping->tree_lock); if (page->mapping) { /* Race with truncate? */ + DEFINE_RADIX_TREE_CONTEXT(ctx, &mapping->page_tree); WARN_ON_ONCE(warn && !PageUptodate(page)); if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); task_io_account_write(PAGE_CACHE_SIZE); } - radix_tree_tag_set(&mapping->page_tree, + radix_tree_lock(&ctx); + radix_tree_tag_set(ctx.tree, page_index(page), PAGECACHE_TAG_DIRTY); + radix_tree_unlock(&ctx); } - spin_unlock(&mapping->tree_lock); unlock_page_ref_irq(page); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); Index: linux-rt-rebase.q/fs/inode.c =================================================================== --- linux-rt-rebase.q.orig/fs/inode.c +++ linux-rt-rebase.q/fs/inode.c @@ -193,7 +193,7 @@ void inode_init_once(struct inode *inode mutex_init(&inode->i_mutex); init_rwsem(&inode->i_alloc_sem); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); - spin_lock_init(&inode->i_data.tree_lock); + spin_lock_init(&inode->i_data.priv_lock); spin_lock_init(&inode->i_data.i_mmap_lock); INIT_LIST_HEAD(&inode->i_data.private_list); spin_lock_init(&inode->i_data.private_lock); Index: linux-rt-rebase.q/include/asm-arm/cacheflush.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/cacheflush.h +++ linux-rt-rebase.q/include/asm-arm/cacheflush.h @@ -413,9 +413,9 @@ static inline void flush_anon_page(struc } #define flush_dcache_mmap_lock(mapping) \ - spin_lock_irq(&(mapping)->tree_lock) + spin_lock_irq(&(mapping)->priv_lock) #define flush_dcache_mmap_unlock(mapping) \ - spin_unlock_irq(&(mapping)->tree_lock) + spin_unlock_irq(&(mapping)->priv_lock) #define flush_icache_user_range(vma,page,addr,len) \ flush_dcache_page(page) Index: linux-rt-rebase.q/include/asm-parisc/cacheflush.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-parisc/cacheflush.h +++ linux-rt-rebase.q/include/asm-parisc/cacheflush.h @@ -45,9 +45,9 @@ void flush_cache_mm(struct mm_struct *mm extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) \ - spin_lock_irq(&(mapping)->tree_lock) + spin_lock_irq(&(mapping)->priv_lock) #define flush_dcache_mmap_unlock(mapping) \ - spin_unlock_irq(&(mapping)->tree_lock) + spin_unlock_irq(&(mapping)->priv_lock) #define flush_icache_page(vma,page) do { \ flush_kernel_dcache_page(page); \ Index: linux-rt-rebase.q/include/linux/fs.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/fs.h +++ linux-rt-rebase.q/include/linux/fs.h @@ -441,13 +441,13 @@ struct backing_dev_info; struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ - spinlock_t tree_lock; /* and lock protecting it */ + spinlock_t priv_lock; /* spinlock protecting various stuffs */ unsigned int i_mmap_writable;/* count VM_SHARED mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ spinlock_t i_mmap_lock; /* protect tree, count, list */ unsigned int truncate_count; /* Cover race condition with truncate */ - unsigned long __nrpages; /* number of total pages */ + atomic_long_t __nrpages; /* number of total pages */ pgoff_t writeback_index;/* writeback starts here */ const struct address_space_operations *a_ops; /* methods */ unsigned long flags; /* error bits/gfp mask */ @@ -464,22 +464,22 @@ struct address_space { static inline void mapping_nrpages_init(struct address_space *mapping) { - mapping->__nrpages = 0; + mapping->__nrpages = (atomic_long_t)ATOMIC_LONG_INIT(0); } static inline unsigned long mapping_nrpages(struct address_space *mapping) { - return mapping->__nrpages; + return (unsigned long)atomic_long_read(&mapping->__nrpages); } static inline void mapping_nrpages_inc(struct address_space *mapping) { - mapping->__nrpages++; + atomic_long_inc(&mapping->__nrpages); } static inline void mapping_nrpages_dec(struct address_space *mapping) { - mapping->__nrpages--; + atomic_long_dec(&mapping->__nrpages); } struct block_device { Index: linux-rt-rebase.q/mm/filemap.c =================================================================== --- linux-rt-rebase.q.orig/mm/filemap.c +++ linux-rt-rebase.q/mm/filemap.c @@ -115,8 +115,11 @@ generic_file_direct_IO(int rw, struct ki void __remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; + DEFINE_RADIX_TREE_CONTEXT(ctx, &mapping->page_tree); - radix_tree_delete(&mapping->page_tree, page->index); + radix_tree_lock(&ctx); + radix_tree_delete(ctx.tree, page->index); + radix_tree_unlock(&ctx); page->mapping = NULL; mapping_nrpages_dec(mapping); __dec_zone_page_state(page, NR_FILE_PAGES); @@ -125,14 +128,10 @@ void __remove_from_page_cache(struct pag void remove_from_page_cache(struct page *page) { - struct address_space *mapping = page->mapping; - BUG_ON(!PageLocked(page)); lock_page_ref_irq(page); - spin_lock(&mapping->tree_lock); __remove_from_page_cache(page); - spin_unlock(&mapping->tree_lock); unlock_page_ref_irq(page); } @@ -443,9 +442,12 @@ int add_to_page_cache(struct page *page, int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (error == 0) { + DEFINE_RADIX_TREE_CONTEXT(ctx, &mapping->page_tree); + lock_page_ref_irq(page); - spin_lock(&mapping->tree_lock); - error = radix_tree_insert(&mapping->page_tree, offset, page); + radix_tree_lock(&ctx); + error = radix_tree_insert(ctx.tree, offset, page); + radix_tree_unlock(&ctx); if (!error) { page_cache_get(page); SetPageLocked(page); @@ -454,7 +456,6 @@ int add_to_page_cache(struct page *page, mapping_nrpages_inc(mapping); __inc_zone_page_state(page, NR_FILE_PAGES); } - spin_unlock(&mapping->tree_lock); unlock_page_ref_irq(page); radix_tree_preload_end(); } Index: linux-rt-rebase.q/mm/migrate.c =================================================================== --- linux-rt-rebase.q.orig/mm/migrate.c +++ linux-rt-rebase.q/mm/migrate.c @@ -294,6 +294,7 @@ static int migrate_page_move_mapping(str struct page *newpage, struct page *page) { void **pslot; + struct radix_tree_context ctx; if (!mapping) { /* Anonymous page without mapping */ @@ -302,15 +303,14 @@ static int migrate_page_move_mapping(str return 0; } + init_radix_tree_context(&ctx, &mapping->page_tree); lock_page_ref_irq(page); - spin_lock(&mapping->tree_lock); - - pslot = radix_tree_lookup_slot(&mapping->page_tree, - page_index(page)); + radix_tree_lock(&ctx); + pslot = radix_tree_lookup_slot(ctx.tree, page_index(page)); if (page_count(page) != 2 + !!PagePrivate(page) || (struct page *)radix_tree_deref_slot(pslot) != page) { - spin_unlock(&mapping->tree_lock); + radix_tree_unlock(&ctx); unlock_page_ref_irq(page); return -EAGAIN; } @@ -328,7 +328,7 @@ static int migrate_page_move_mapping(str radix_tree_replace_slot(pslot, newpage); page->mapping = NULL; - spin_unlock(&mapping->tree_lock); + radix_tree_unlock(&ctx); /* * If moved to a different zone then also account Index: linux-rt-rebase.q/mm/page-writeback.c =================================================================== --- linux-rt-rebase.q.orig/mm/page-writeback.c +++ linux-rt-rebase.q/mm/page-writeback.c @@ -821,19 +821,21 @@ int __set_page_dirty_nobuffers(struct pa return 1; lock_page_ref_irq(page); - spin_lock(&mapping->tree_lock); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ + DEFINE_RADIX_TREE_CONTEXT(ctx, &mapping->page_tree); + BUG_ON(mapping2 != mapping); WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); task_io_account_write(PAGE_CACHE_SIZE); } - radix_tree_tag_set(&mapping->page_tree, + radix_tree_lock(&ctx); + radix_tree_tag_set(ctx.tree, page_index(page), PAGECACHE_TAG_DIRTY); + radix_tree_unlock(&ctx); } - spin_unlock(&mapping->tree_lock); unlock_page_ref_irq(page); if (mapping->host) { /* !PageAnon && !swapper_space */ @@ -980,13 +982,15 @@ int test_clear_page_writeback(struct pag unsigned long flags; lock_page_ref_irqsave(page, flags); - spin_lock(&mapping->tree_lock); ret = TestClearPageWriteback(page); - if (ret) - radix_tree_tag_clear(&mapping->page_tree, - page_index(page), + if (ret) { + DEFINE_RADIX_TREE_CONTEXT(ctx, &mapping->page_tree); + + radix_tree_lock(&ctx); + radix_tree_tag_clear(ctx.tree, page_index(page), PAGECACHE_TAG_WRITEBACK); - spin_unlock(&mapping->tree_lock); + radix_tree_unlock(&ctx); + } unlock_page_ref_irqrestore(page, flags); } else { ret = TestClearPageWriteback(page); @@ -1003,19 +1007,22 @@ int test_set_page_writeback(struct page if (mapping) { unsigned long flags; + DEFINE_RADIX_TREE_CONTEXT(ctx, &mapping->page_tree); lock_page_ref_irqsave(page, flags); - spin_lock(&mapping->tree_lock); ret = TestSetPageWriteback(page); - if (!ret) - radix_tree_tag_set(&mapping->page_tree, - page_index(page), + if (!ret) { + radix_tree_lock(&ctx); + radix_tree_tag_set(ctx.tree, page_index(page), PAGECACHE_TAG_WRITEBACK); - if (!PageDirty(page)) - radix_tree_tag_clear(&mapping->page_tree, - page_index(page), + radix_tree_unlock(&ctx); + } + if (!PageDirty(page)) { + radix_tree_lock(&ctx); + radix_tree_tag_clear(ctx.tree, page_index(page), PAGECACHE_TAG_DIRTY); - spin_unlock(&mapping->tree_lock); + radix_tree_unlock(&ctx); + } unlock_page_ref_irqrestore(page, flags); } else { ret = TestSetPageWriteback(page); Index: linux-rt-rebase.q/mm/swap_state.c =================================================================== --- linux-rt-rebase.q.orig/mm/swap_state.c +++ linux-rt-rebase.q/mm/swap_state.c @@ -38,7 +38,6 @@ static struct backing_dev_info swap_back struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), - .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, @@ -79,10 +78,12 @@ static int __add_to_swap_cache(struct pa BUG_ON(PagePrivate(page)); error = radix_tree_preload(gfp_mask); if (!error) { + DEFINE_RADIX_TREE_CONTEXT(ctx, &swapper_space.page_tree); + lock_page_ref_irq(page); - spin_lock(&swapper_space.tree_lock); - error = radix_tree_insert(&swapper_space.page_tree, - entry.val, page); + radix_tree_lock(&ctx); + error = radix_tree_insert(ctx.tree, entry.val, page); + radix_tree_unlock(&ctx); if (!error) { page_cache_get(page); SetPageSwapCache(page); @@ -90,7 +91,6 @@ static int __add_to_swap_cache(struct pa mapping_nrpages_inc(&swapper_space); __inc_zone_page_state(page, NR_FILE_PAGES); } - spin_unlock(&swapper_space.tree_lock); unlock_page_ref_irq(page); radix_tree_preload_end(); } @@ -125,12 +125,16 @@ static int add_to_swap_cache(struct page */ void __delete_from_swap_cache(struct page *page) { + DEFINE_RADIX_TREE_CONTEXT(ctx, &swapper_space.page_tree); + BUG_ON(!PageLocked(page)); BUG_ON(!PageSwapCache(page)); BUG_ON(PageWriteback(page)); BUG_ON(PagePrivate(page)); - radix_tree_delete(&swapper_space.page_tree, page_private(page)); + radix_tree_lock(&ctx); + radix_tree_delete(ctx.tree, page_private(page)); + radix_tree_unlock(&ctx); set_page_private(page, 0); ClearPageSwapCache(page); mapping_nrpages_dec(&swapper_space); @@ -203,9 +207,7 @@ void delete_from_swap_cache(struct page entry.val = page_private(page); lock_page_ref_irq(page); - spin_lock(&swapper_space.tree_lock); __delete_from_swap_cache(page); - spin_unlock(&swapper_space.tree_lock); unlock_page_ref_irq(page); swap_free(entry); Index: linux-rt-rebase.q/mm/swapfile.c =================================================================== --- linux-rt-rebase.q.orig/mm/swapfile.c +++ linux-rt-rebase.q/mm/swapfile.c @@ -368,13 +368,11 @@ int remove_exclusive_swap_page(struct pa if (p->swap_map[swp_offset(entry)] == 1) { /* Recheck the page count with the swapcache lock held.. */ lock_page_ref_irq(page); - spin_lock(&swapper_space.tree_lock); if ((page_count(page) == 2) && !PageWriteback(page)) { __delete_from_swap_cache(page); SetPageDirty(page); retval = 1; } - spin_unlock(&swapper_space.tree_lock); unlock_page_ref_irq(page); } spin_unlock(&swap_lock); Index: linux-rt-rebase.q/mm/truncate.c =================================================================== --- linux-rt-rebase.q.orig/mm/truncate.c +++ linux-rt-rebase.q/mm/truncate.c @@ -348,19 +348,16 @@ invalidate_complete_page2(struct address return 0; lock_page_ref_irq(page); - spin_lock(&mapping->tree_lock); if (PageDirty(page)) goto failed; BUG_ON(PagePrivate(page)); __remove_from_page_cache(page); - spin_unlock(&mapping->tree_lock); unlock_page_ref_irq(page); ClearPageUptodate(page); page_cache_release(page); /* pagecache ref */ return 1; failed: - spin_unlock(&mapping->tree_lock); unlock_page_ref_irq(page); return 0; } Index: linux-rt-rebase.q/mm/vmscan.c =================================================================== --- linux-rt-rebase.q.orig/mm/vmscan.c +++ linux-rt-rebase.q/mm/vmscan.c @@ -370,7 +370,6 @@ int remove_mapping(struct address_space BUG_ON(mapping != page_mapping(page)); lock_page_ref_irq(page); - spin_lock(&mapping->tree_lock); /* * The non racy check for a busy page. * @@ -405,13 +404,11 @@ int remove_mapping(struct address_space if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); - spin_unlock(&mapping->tree_lock); swap_free(swap); goto free_it; } __remove_from_page_cache(page); - spin_unlock(&mapping->tree_lock); free_it: unlock_page_ref_irq(page); @@ -419,7 +416,6 @@ free_it: return 1; cannot_free: - spin_unlock(&mapping->tree_lock); unlock_page_ref_irq(page); return 0; } patches/preempt-realtime-powerpc-a7.patch0000664000077200007720000001036310655544574020022 0ustar mingomingo To fix the following compile error by changing local_irq_restore() to raw_local_irq_restore(). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - include/asm-powerpc/hw_irq.h In file included from include/asm/system.h:9, from include/linux/list.h:9, from include/linux/signal.h:8, from arch/powerpc/kernel/asm-offsets.c:16: include/asm/hw_irq.h: In function 'local_get_flags': include/asm/hw_irq.h:23: error: expected expression before '<<' token include/asm/hw_irq.h:24: error: expected expression before '<<' token include/asm/hw_irq.h:25: error: expected expression before ':' token include/asm/hw_irq.h:25: error: expected statement before ')' token - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Signed-off-by: Tsutomu Owa -- owa --- arch/powerpc/kernel/head_64.S | 2 +- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/kernel/ppc_ksyms.c | 2 +- include/asm-powerpc/hw_irq.h | 18 ++++++++---------- 4 files changed, 11 insertions(+), 13 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/kernel/head_64.S =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/head_64.S +++ linux-rt-rebase.q/arch/powerpc/kernel/head_64.S @@ -1392,7 +1392,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISER * handles any interrupts pending at this point. */ ld r3,SOFTE(r1) - bl .local_irq_restore + bl .raw_local_irq_restore b 11f /* Here we have a page fault that hash_page can't handle. */ Index: linux-rt-rebase.q/arch/powerpc/kernel/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/irq.c +++ linux-rt-rebase.q/arch/powerpc/kernel/irq.c @@ -111,7 +111,7 @@ static inline void set_soft_enabled(unsi : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } -void notrace local_irq_restore(unsigned long en) +void notrace raw_local_irq_restore(unsigned long en) { /* * get_paca()->soft_enabled = en; Index: linux-rt-rebase.q/arch/powerpc/kernel/ppc_ksyms.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/ppc_ksyms.c +++ linux-rt-rebase.q/arch/powerpc/kernel/ppc_ksyms.c @@ -49,7 +49,7 @@ #endif #ifdef CONFIG_PPC64 -EXPORT_SYMBOL(local_irq_restore); +EXPORT_SYMBOL(raw_local_irq_restore); #endif #ifdef CONFIG_PPC32 Index: linux-rt-rebase.q/include/asm-powerpc/hw_irq.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/hw_irq.h +++ linux-rt-rebase.q/include/asm-powerpc/hw_irq.h @@ -16,18 +16,18 @@ extern void timer_interrupt(struct pt_re #ifdef CONFIG_PPC64 #include -static inline unsigned long local_get_flags(void) +static inline unsigned long raw_local_get_flags(void) { unsigned long flags; -<<<<<<< delete extern unsigned long local_get_flags(void); -<<<<<<< delete extern unsigned long local_irq_disable(void); + __asm__ __volatile__("lbz %0,%1(13)" + : "=r" (flags) : "i" (offsetof(struct paca_struct, soft_enabled))); return flags; } -static inline unsigned long local_irq_disable(void) +static inline unsigned long raw_local_irq_disable(void) { unsigned long flags, zero; @@ -53,8 +53,8 @@ extern void raw_local_irq_restore(unsign #define raw_irqs_disabled_flags(flags) ((flags) == 0) -#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) -#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) +#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) +#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) #define hard_irq_disable() \ do { \ @@ -63,17 +63,15 @@ extern void raw_local_irq_restore(unsign get_paca()->hard_enabled = 0; \ } while(0) -#else +#else /* CONFIG_PPC64 */ #if defined(CONFIG_BOOKE) #define SET_MSR_EE(x) mtmsr(x) #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") -<<<<<<< delete #define local_irq_restore(flags) do { \ -#define raw_local_irq_restore(flags) do { \ #else #define SET_MSR_EE(x) mtmsr(x) #define raw_local_irq_restore(flags) mtmsr(flags) -#endif +#endif /* CONFIG_BOOKE */ static inline void raw_local_irq_disable(void) { patches/timer-freq-tweaks.patch0000664000077200007720000000751510655544576016142 0ustar mingomingo--- kernel/rcutorture.c | 2 +- mm/slab.c | 26 +++++++++++++++----------- 2 files changed, 16 insertions(+), 12 deletions(-) Index: linux-rt-rebase.q/kernel/rcutorture.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rcutorture.c +++ linux-rt-rebase.q/kernel/rcutorture.c @@ -600,7 +600,7 @@ rcu_torture_reader(void *arg) if (p == NULL) { /* Wait for rcu_torture_writer to get underway */ cur_ops->readunlock(idx); - schedule_timeout_interruptible(HZ); + schedule_timeout_interruptible(round_jiffies_relative(HZ)); continue; } if (p->rtort_mbtest == 0) Index: linux-rt-rebase.q/mm/slab.c =================================================================== --- linux-rt-rebase.q.orig/mm/slab.c +++ linux-rt-rebase.q/mm/slab.c @@ -1051,7 +1051,7 @@ static int transfer_objects(struct array #ifndef CONFIG_NUMA #define drain_alien_cache(cachep, alien) do { } while (0) -#define reap_alien(cachep, l3, this_cpu) do { } while (0) +#define reap_alien(cachep, l3, this_cpu) 0 static inline struct array_cache **alloc_alien_cache(int node, int limit) { @@ -1149,7 +1149,7 @@ static void __drain_alien_cache(struct k /* * Called from cache_reap() to regularly drain alien caches round robin. */ -static void +static int reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3, int *this_cpu) { int node = per_cpu(reap_node, *this_cpu); @@ -1160,8 +1160,10 @@ reap_alien(struct kmem_cache *cachep, st if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { __drain_alien_cache(cachep, ac, node, this_cpu); spin_unlock_irq(&ac->lock); + return 1; } } + return 0; } static void drain_alien_cache(struct kmem_cache *cachep, @@ -2488,7 +2490,7 @@ static void check_spinlock_acquired_node #define check_spinlock_acquired_node(x, y) do { } while(0) #endif -static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, +static int drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node); @@ -4118,14 +4120,15 @@ static int enable_cpucache(struct kmem_c * Drain an array if it contains any elements taking the l3 lock only if * necessary. Note that the l3 listlock also protects the array_cache * if drain_array() is used on the shared array. + * returns non-zero if some work is done */ -void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, +int drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node) { int tofree, this_cpu; if (!ac || !ac->avail) - return; + return 0; if (ac->touched && !force) { ac->touched = 0; } else { @@ -4141,6 +4144,7 @@ void drain_array(struct kmem_cache *cach } slab_spin_unlock_irq(&l3->list_lock, this_cpu); } + return 1; } /** @@ -4178,10 +4182,10 @@ static void cache_reap(struct work_struc */ l3 = searchp->nodelists[node]; - reap_alien(searchp, l3, &this_cpu); + work_done += reap_alien(searchp, l3, &this_cpu); - drain_array(searchp, l3, cpu_cache_get(searchp, this_cpu), - 0, node); + work_done += drain_array(searchp, l3, + cpu_cache_get(searchp, this_cpu), 0, node); /* * These are racy checks but it does not matter @@ -4192,7 +4196,7 @@ static void cache_reap(struct work_struc l3->next_reap = jiffies + REAPTIMEOUT_LIST3; - drain_array(searchp, l3, l3->shared, 0, node); + work_done += drain_array(searchp, l3, l3->shared, 0, node); if (l3->free_touched) l3->free_touched = 0; @@ -4211,9 +4215,9 @@ next: next_reap_node(); out: /* Set up the next iteration */ - schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); + schedule_delayed_work(work, + round_jiffies_relative((1+!work_done) * REAPTIMEOUT_CPUC)); } - #ifdef CONFIG_PROC_FS static void print_slabinfo_header(struct seq_file *m) patches/ep93xx-clockevents-fix.patch0000664000077200007720000000251410655544571017022 0ustar mingomingoSubject: timer patch for ep93xx From: Manfred Gruber hi ! this patch is necessary to get latencies < 1ms for ep93xx armv4t with 2.6.21.5-rt18. Signed-off-by: Manfred Gruber --- arch/arm/mach-ep93xx/core.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) Index: linux/arch/arm/mach-ep93xx/core.c =================================================================== --- linux.orig/arch/arm/mach-ep93xx/core.c +++ linux/arch/arm/mach-ep93xx/core.c @@ -98,9 +98,9 @@ static struct clock_event_device clockev static int ep93xx_timer_interrupt(int irq, void *dev_id) { - __raw_writel(EP93XX_TC_CLEAR, EP93XX_TIMER1_CLEAR); + __raw_writel(EP93XX_TC_CLEAR, EP93XX_TIMER1_CLEAR); - clockevent_ep93xx.event_handler(&clockevent_ep93xx); + clockevent_ep93xx.event_handler(&clockevent_ep93xx); return IRQ_HANDLED; } @@ -108,7 +108,15 @@ static int ep93xx_timer_interrupt(int ir static int ep93xx_set_next_event(unsigned long evt, struct clock_event_device *unused) { + u32 tmode = __raw_readl(EP93XX_TIMER1_CONTROL); + + /* stop timer */ + __raw_writel(tmode & ~EP93XX_TC123_ENABLE, EP93XX_TIMER1_CONTROL); + /* program timer */ __raw_writel(evt, EP93XX_TIMER1_LOAD); + /* start timer */ + __raw_writel(tmode | EP93XX_TC123_ENABLE, EP93XX_TIMER1_CONTROL); + return 0; } patches/hrtimer-trace.patch0000664000077200007720000000436510655544572015335 0ustar mingomingo include/linux/hrtimer.h | 6 ++++++ kernel/hrtimer.c | 5 +++++ kernel/time/clockevents.c | 4 ++++ 3 files changed, 15 insertions(+) Index: linux/include/linux/hrtimer.h =================================================================== --- linux.orig/include/linux/hrtimer.h +++ linux/include/linux/hrtimer.h @@ -253,6 +253,12 @@ static inline ktime_t hrtimer_cb_get_tim extern ktime_t ktime_get(void); extern ktime_t ktime_get_real(void); +# if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) +# define hrtimer_trace(a,b) trace_special_u64((a).tv64,b) +# else +# define hrtimer_trace(a,b) trace_special((a).tv.sec,(a).tv.nsec,b) +# endif + /* Exported timer functions: */ /* Initialize timers: */ Index: linux/kernel/hrtimer.c =================================================================== --- linux.orig/kernel/hrtimer.c +++ linux/kernel/hrtimer.c @@ -706,6 +706,8 @@ static void enqueue_hrtimer(struct hrtim struct hrtimer *entry; int leftmost = 1; + hrtimer_trace(timer->expires, (unsigned long) timer); + /* * Find the right place in the rbtree: */ @@ -1039,6 +1041,7 @@ void hrtimer_interrupt(struct clock_even retry: now = ktime_get(); + hrtimer_trace(now, 0); expires_next.tv64 = KTIME_MAX; @@ -1067,6 +1070,8 @@ void hrtimer_interrupt(struct clock_even break; } + hrtimer_trace(timer->expires, (unsigned long) timer); + /* Move softirq callbacks to the pending list */ if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { __remove_hrtimer(timer, base, Index: linux/kernel/time/clockevents.c =================================================================== --- linux.orig/kernel/time/clockevents.c +++ linux/kernel/time/clockevents.c @@ -12,12 +12,14 @@ */ #include +#include #include #include #include #include #include #include +#include /* The registered clock event devices */ static LIST_HEAD(clockevent_devices); @@ -80,6 +82,8 @@ int clockevents_program_event(struct clo delta = ktime_to_ns(ktime_sub(expires, now)); + hrtimer_trace(expires, (unsigned long) delta); + if (delta <= 0) return -ETIME; patches/rt-mutex-arm.patch0000664000077200007720000002602410655544573015126 0ustar mingomingo--- arch/arm/kernel/entry-armv.S | 4 +- arch/arm/kernel/entry-common.S | 10 +++--- arch/arm/kernel/process.c | 10 ++++-- arch/arm/kernel/semaphore.c | 31 +++++++++++++++----- include/asm-arm/semaphore.h | 61 ++++++++++++++++++++++++++++------------- include/asm-arm/thread_info.h | 2 + 6 files changed, 80 insertions(+), 38 deletions(-) Index: linux-rt-rebase.q/arch/arm/kernel/entry-armv.S =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/entry-armv.S +++ linux-rt-rebase.q/arch/arm/kernel/entry-armv.S @@ -204,7 +204,7 @@ __irq_svc: irq_handler #ifdef CONFIG_PREEMPT ldr r0, [tsk, #TI_FLAGS] @ get flags - tst r0, #_TIF_NEED_RESCHED + tst r0, #_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_DELAYED blne svc_preempt preempt_return: ldr r0, [tsk, #TI_PREEMPT] @ read preempt value @@ -235,7 +235,7 @@ svc_preempt: str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS - tst r0, #_TIF_NEED_RESCHED + tst r0, #_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_DELAYED beq preempt_return @ go again b 1b #endif Index: linux-rt-rebase.q/arch/arm/kernel/entry-common.S =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/entry-common.S +++ linux-rt-rebase.q/arch/arm/kernel/entry-common.S @@ -46,7 +46,7 @@ ret_fast_syscall: fast_work_pending: str r0, [sp, #S_R0+S_OFF]! @ returned r0 work_pending: - tst r1, #_TIF_NEED_RESCHED + tst r1, #_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_DELAYED bne work_resched tst r1, #_TIF_SIGPENDING beq no_work_pending @@ -56,7 +56,8 @@ work_pending: b ret_slow_syscall @ Check work again work_resched: - bl schedule + bl __schedule + /* * "slow" syscall return path. "why" tells us if this was a real syscall. */ @@ -396,6 +397,7 @@ ENTRY(sys_oabi_call_table) #include "calls.S" #undef ABI #undef OBSOLETE +#endif #ifdef CONFIG_FRAME_POINTER @@ -445,7 +447,7 @@ mcount: ldr ip, =mcount_enabled @ leave early, if disabled ldr ip, [ip] cmp ip, #0 - moveq pc,lr + moveq pc, lr mov ip, sp stmdb sp!, {r0 - r3, fp, ip, lr, pc} @ create stack frame @@ -504,5 +506,3 @@ arm_return_addr: #endif -#endif - Index: linux-rt-rebase.q/arch/arm/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/process.c +++ linux-rt-rebase.q/arch/arm/kernel/process.c @@ -134,7 +134,7 @@ static void default_idle(void) cpu_relax(); else { local_irq_disable(); - if (!need_resched()) { + if (!need_resched() && !need_resched_delayed()) { timer_dyn_reprogram(); arch_idle(); } @@ -166,13 +166,15 @@ void cpu_idle(void) idle = default_idle; leds_event(led_idle_start); tick_nohz_stop_sched_tick(); - while (!need_resched()) + while (!need_resched() && !need_resched_delayed()) idle(); leds_event(led_idle_end); tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); + local_irq_disable(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); + local_irq_enable(); } } Index: linux-rt-rebase.q/arch/arm/kernel/semaphore.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/semaphore.c +++ linux-rt-rebase.q/arch/arm/kernel/semaphore.c @@ -49,14 +49,16 @@ * we cannot lose wakeup events. */ -void __up(struct semaphore *sem) +fastcall void __attribute_used__ __compat_up(struct compat_semaphore *sem) { wake_up(&sem->wait); } +EXPORT_SYMBOL(__compat_up); + static DEFINE_SPINLOCK(semaphore_lock); -void __sched __down(struct semaphore * sem) +fastcall void __attribute_used__ __sched __compat_down(struct compat_semaphore * sem) { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); @@ -89,7 +91,9 @@ void __sched __down(struct semaphore * s wake_up(&sem->wait); } -int __sched __down_interruptible(struct semaphore * sem) +EXPORT_SYMBOL(__compat_down); + +fastcall int __attribute_used__ __sched __compat_down_interruptible(struct compat_semaphore * sem) { int retval = 0; struct task_struct *tsk = current; @@ -140,6 +144,8 @@ int __sched __down_interruptible(struct return retval; } +EXPORT_SYMBOL(__compat_down_interruptible); + /* * Trylock failed - make sure we correct for * having decremented the count. @@ -148,7 +154,7 @@ int __sched __down_interruptible(struct * single "cmpxchg" without failure cases, * but then it wouldn't work on a 386. */ -int __down_trylock(struct semaphore * sem) +fastcall int __attribute_used__ __compat_down_trylock(struct compat_semaphore * sem) { int sleepers; unsigned long flags; @@ -168,6 +174,15 @@ int __down_trylock(struct semaphore * se return 1; } +EXPORT_SYMBOL(__compat_down_trylock); + +fastcall int compat_sem_is_locked(struct compat_semaphore *sem) +{ + return (int) atomic_read(&sem->count) < 0; +} + +EXPORT_SYMBOL(compat_sem_is_locked); + /* * The semaphore operations have a special calling sequence that * allow us to do a simpler in-line version of them. These routines @@ -185,7 +200,7 @@ asm(" .section .sched.text,\"ax\",%progb __down_failed: \n\ stmfd sp!, {r0 - r4, lr} \n\ mov r0, ip \n\ - bl __down \n\ + bl __compat_down \n\ ldmfd sp!, {r0 - r4, pc} \n\ \n\ .align 5 \n\ @@ -193,7 +208,7 @@ __down_failed: \n\ __down_interruptible_failed: \n\ stmfd sp!, {r0 - r4, lr} \n\ mov r0, ip \n\ - bl __down_interruptible \n\ + bl __compat_down_interruptible \n\ mov ip, r0 \n\ ldmfd sp!, {r0 - r4, pc} \n\ \n\ @@ -202,7 +217,7 @@ __down_interruptible_failed: \n\ __down_trylock_failed: \n\ stmfd sp!, {r0 - r4, lr} \n\ mov r0, ip \n\ - bl __down_trylock \n\ + bl __compat_down_trylock \n\ mov ip, r0 \n\ ldmfd sp!, {r0 - r4, pc} \n\ \n\ @@ -211,7 +226,7 @@ __down_trylock_failed: \n\ __up_wakeup: \n\ stmfd sp!, {r0 - r4, lr} \n\ mov r0, ip \n\ - bl __up \n\ + bl __compat_up \n\ ldmfd sp!, {r0 - r4, pc} \n\ "); Index: linux-rt-rebase.q/include/asm-arm/semaphore.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/semaphore.h +++ linux-rt-rebase.q/include/asm-arm/semaphore.h @@ -5,46 +5,66 @@ #define __ASM_ARM_SEMAPHORE_H #include + +#ifdef CONFIG_PREEMPT_RT +# include +#endif + #include #include #include +/* + * On !PREEMPT_RT all semaphores are compat: + */ +#ifndef CONFIG_PREEMPT_RT +# define semaphore compat_semaphore +#endif + #include #include -struct semaphore { +struct compat_semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; }; -#define __SEMAPHORE_INIT(name, cnt) \ +#define __COMPAT_SEMAPHORE_INITIALIZER(name, cnt) \ { \ .count = ATOMIC_INIT(cnt), \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ } -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ - struct semaphore name = __SEMAPHORE_INIT(name,count) +#define __COMPAT_MUTEX_INITIALIZER(name) \ + __COMPAT_SEMAPHORE_INITIALIZER(name,1) + +#define __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct compat_semaphore name = __COMPAT_SEMAPHORE_INITIALIZER(name,count) -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) +#define COMPAT_DECLARE_MUTEX(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,1) +#define COMPAT_DECLARE_MUTEX_LOCKED(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,0) -static inline void sema_init(struct semaphore *sem, int val) +static inline void compat_sema_init(struct compat_semaphore *sem, int val) { atomic_set(&sem->count, val); sem->sleepers = 0; init_waitqueue_head(&sem->wait); } -static inline void init_MUTEX(struct semaphore *sem) +static inline void compat_init_MUTEX(struct compat_semaphore *sem) +{ + compat_sema_init(sem, 1); +} + +static inline void compat_init_MUTEX_LOCKED(struct compat_semaphore *sem) { - sema_init(sem, 1); + compat_sema_init(sem, 0); } -static inline void init_MUTEX_LOCKED(struct semaphore *sem) +static inline int compat_sema_count(struct compat_semaphore *sem) { - sema_init(sem, 0); + return atomic_read(&sem->count); } /* @@ -55,16 +75,18 @@ asmlinkage int __down_interruptible_fai asmlinkage int __down_trylock_failed(void); asmlinkage void __up_wakeup(void); -extern void __down(struct semaphore * sem); -extern int __down_interruptible(struct semaphore * sem); -extern int __down_trylock(struct semaphore * sem); -extern void __up(struct semaphore * sem); +extern void __compat_up(struct compat_semaphore *sem); +extern int __compat_down_interruptible(struct compat_semaphore * sem); +extern int __compat_down_trylock(struct compat_semaphore * sem); +extern void __compat_down(struct compat_semaphore * sem); + +extern int compat_sem_is_locked(struct compat_semaphore *sem); /* * This is ugly, but we want the default case to fall through. * "__down" is the actual routine that waits... */ -static inline void down(struct semaphore * sem) +static inline void compat_down(struct compat_semaphore * sem) { might_sleep(); __down_op(sem, __down_failed); @@ -74,13 +96,13 @@ static inline void down(struct semaphore * This is ugly, but we want the default case to fall through. * "__down_interruptible" is the actual routine that waits... */ -static inline int down_interruptible (struct semaphore * sem) +static inline int compat_down_interruptible (struct compat_semaphore * sem) { might_sleep(); return __down_op_ret(sem, __down_interruptible_failed); } -static inline int down_trylock(struct semaphore *sem) +static inline int compat_down_trylock(struct compat_semaphore *sem) { return __down_op_ret(sem, __down_trylock_failed); } @@ -91,9 +113,10 @@ static inline int down_trylock(struct se * The default case (no contention) will result in NO * jumps for both down() and up(). */ -static inline void up(struct semaphore * sem) +static inline void compat_up(struct compat_semaphore * sem) { __up_op(sem, __up_wakeup); } +#include #endif Index: linux-rt-rebase.q/include/asm-arm/thread_info.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/thread_info.h +++ linux-rt-rebase.q/include/asm-arm/thread_info.h @@ -141,6 +141,7 @@ extern void iwmmxt_task_switch(struct th */ #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 +#define TIF_NEED_RESCHED_DELAYED 3 #define TIF_SYSCALL_TRACE 8 #define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 @@ -149,6 +150,7 @@ extern void iwmmxt_task_switch(struct th #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NEED_RESCHED_DELAYED (1<pstr->xrun_debug) { + user_trace_stop(); snd_printd(KERN_DEBUG "XRUN: pcmC%dD%d%c\n", substream->pcm->card->number, substream->pcm->device, patches/schedule_on_each_cpu-enhance.patch0000664000077200007720000001013310655544577020300 0ustar mingomingoIt always bothered me a bit that on_each_cpu() and schedule_on_each_cpu() had wildly different interfaces. Rectify this and convert the sole in-kernel user to the new interface. Signed-off-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/workqueue.h | 2 - kernel/workqueue.c | 63 ++++++++++++++++++++++++++++++++++++++-------- mm/swap.c | 4 +- 3 files changed, 56 insertions(+), 13 deletions(-) Index: linux-rt-rebase.q/include/linux/workqueue.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/workqueue.h +++ linux-rt-rebase.q/include/linux/workqueue.h @@ -145,7 +145,7 @@ extern int FASTCALL(schedule_delayed_wor extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); extern int schedule_on_each_cpu_wq(struct workqueue_struct *wq, work_func_t func); -extern int schedule_on_each_cpu(work_func_t func); +extern int schedule_on_each_cpu(void (*func)(void *info), void *info, int retry, int wait); extern int current_is_keventd(void); extern int keventd_up(void); Index: linux-rt-rebase.q/kernel/workqueue.c =================================================================== --- linux-rt-rebase.q.orig/kernel/workqueue.c +++ linux-rt-rebase.q/kernel/workqueue.c @@ -571,9 +571,28 @@ int schedule_delayed_work_on(int cpu, } EXPORT_SYMBOL(schedule_delayed_work_on); +struct schedule_on_each_cpu_work { + struct work_struct work; + void (*func)(void *info); + void *info; +}; + +static void schedule_on_each_cpu_func(struct work_struct *work) +{ + struct schedule_on_each_cpu_work *w; + + w = container_of(work, typeof(*w), work); + w->func(w->info); + + kfree(w); +} + /** * schedule_on_each_cpu - call a function on each online CPU from keventd * @func: the function to call + * @info: data to pass to function + * @retry: ignored + * @wait: wait for completion * * Returns zero on success. * Returns -ve errno on failure. @@ -582,27 +601,51 @@ EXPORT_SYMBOL(schedule_delayed_work_on); * * schedule_on_each_cpu() is very slow. */ -int schedule_on_each_cpu(work_func_t func) +int schedule_on_each_cpu(void (*func)(void *info), void *info, int retry, int wait) { int cpu; - struct work_struct *works; + struct schedule_on_each_cpu_work **works; + int err = 0; - works = alloc_percpu(struct work_struct); + works = kzalloc(sizeof(void *)*nr_cpu_ids, GFP_KERNEL); if (!works) return -ENOMEM; + for_each_possible_cpu(cpu) { + works[cpu] = kmalloc_node(sizeof(struct schedule_on_each_cpu_work), + GFP_KERNEL, cpu_to_node(cpu)); + if (!works[cpu]) { + err = -ENOMEM; + goto out; + } + } + preempt_disable(); /* CPU hotplug */ for_each_online_cpu(cpu) { - struct work_struct *work = per_cpu_ptr(works, cpu); + struct schedule_on_each_cpu_work *work; - INIT_WORK(work, func); - set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); - __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); + work = works[cpu]; + works[cpu] = NULL; + + work->func = func; + work->info = info; + INIT_WORK(&work->work, schedule_on_each_cpu_func); + set_bit(WORK_STRUCT_PENDING, work_data_bits(&work->work)); + __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), &work->work); } preempt_enable(); - flush_workqueue(keventd_wq); - free_percpu(works); - return 0; + +out: + for_each_possible_cpu(cpu) { + if (works[cpu]) + kfree(works[cpu]); + } + kfree(works); + + if (!err && wait) + flush_workqueue(keventd_wq); + + return err; } /** Index: linux-rt-rebase.q/mm/swap.c =================================================================== --- linux-rt-rebase.q.orig/mm/swap.c +++ linux-rt-rebase.q/mm/swap.c @@ -216,7 +216,7 @@ void lru_add_drain(void) } #ifdef CONFIG_NUMA -static void lru_add_drain_per_cpu(struct work_struct *dummy) +static void lru_add_drain_per_cpu(void *info) { lru_add_drain(); } @@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(struct */ int lru_add_drain_all(void) { - return schedule_on_each_cpu(lru_add_drain_per_cpu); + return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL, 0, 1); } #else patches/arm-cmpxchg.patch0000664000077200007720000000243310655544573014770 0ustar mingomingo include/asm-arm/atomic.h | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) Index: linux-rt-rebase.q/include/asm-arm/atomic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/atomic.h +++ linux-rt-rebase.q/include/asm-arm/atomic.h @@ -173,6 +173,41 @@ static inline void atomic_clear_mask(uns raw_local_irq_restore(flags); } +#ifndef CONFIG_SMP +/* + * Atomic compare and exchange. + */ +#define __HAVE_ARCH_CMPXCHG 1 + +extern unsigned long wrong_size_cmpxchg(volatile void *ptr); + +static inline unsigned long __cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long flags, prev; + volatile unsigned long *p = ptr; + + if (size == 4) { + local_irq_save(flags); + if ((prev = *p) == old) + *p = new; + local_irq_restore(flags); + return(prev); + } else + return wrong_size_cmpxchg(ptr); +} + +#define cmpxchg(ptr,o,n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ +}) + +#endif + #endif /* __LINUX_ARM_ARCH__ */ #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) patches/ich-force-hpet-restructure-hpet-generic-clock-code.patch0000664000077200007720000001446110655544570024316 0ustar mingomingoFrom: Venki Pallipadi Restructure and rename legacy replacement mode HPET timer support. Just the code structural changes and should be zero functionality change. Signed-off-by: Venkatesh Pallipadi Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andi Kleen Cc: john stultz Cc: Greg KH Signed-off-by: Andrew Morton --- arch/i386/kernel/hpet.c | 148 ++++++++++++++++++++++++++---------------------- 1 file changed, 83 insertions(+), 65 deletions(-) Index: linux/arch/i386/kernel/hpet.c =================================================================== --- linux.orig/arch/i386/kernel/hpet.c +++ linux/arch/i386/kernel/hpet.c @@ -149,9 +149,9 @@ static void hpet_reserve_platform_timers */ static unsigned long hpet_period; -static void hpet_set_mode(enum clock_event_mode mode, +static void hpet_legacy_set_mode(enum clock_event_mode mode, struct clock_event_device *evt); -static int hpet_next_event(unsigned long delta, +static int hpet_legacy_next_event(unsigned long delta, struct clock_event_device *evt); /* @@ -160,8 +160,8 @@ static int hpet_next_event(unsigned long static struct clock_event_device hpet_clockevent = { .name = "hpet", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = hpet_set_mode, - .set_next_event = hpet_next_event, + .set_mode = hpet_legacy_set_mode, + .set_next_event = hpet_legacy_next_event, .shift = 32, .irq = 0, }; @@ -178,7 +178,7 @@ static void hpet_start_counter(void) hpet_writel(cfg, HPET_CFG); } -static void hpet_enable_int(void) +static void hpet_enable_legacy_int(void) { unsigned long cfg = hpet_readl(HPET_CFG); @@ -187,7 +187,39 @@ static void hpet_enable_int(void) hpet_legacy_int_enabled = 1; } -static void hpet_set_mode(enum clock_event_mode mode, +static void hpet_legacy_clockevent_register(void) +{ + uint64_t hpet_freq; + + /* Start HPET legacy interrupts */ + hpet_enable_legacy_int(); + + /* + * The period is a femto seconds value. We need to calculate the + * scaled math multiplication factor for nanosecond to hpet tick + * conversion. + */ + hpet_freq = 1000000000000000ULL; + do_div(hpet_freq, hpet_period); + hpet_clockevent.mult = div_sc((unsigned long) hpet_freq, + NSEC_PER_SEC, 32); + /* Calculate the min / max delta */ + hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, + &hpet_clockevent); + hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, + &hpet_clockevent); + + /* + * Start hpet with the boot cpu mask and make it + * global after the IO_APIC has been initialized. + */ + hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); + clockevents_register_device(&hpet_clockevent); + global_clock_event = &hpet_clockevent; + printk(KERN_DEBUG "hpet clockevent registered\n"); +} + +static void hpet_legacy_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { unsigned long cfg, cmp, now; @@ -228,12 +260,12 @@ static void hpet_set_mode(enum clock_eve break; case CLOCK_EVT_MODE_RESUME: - hpet_enable_int(); + hpet_enable_legacy_int(); break; } } -static int hpet_next_event(unsigned long delta, +static int hpet_legacy_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned long cnt; @@ -273,58 +305,11 @@ static struct clocksource clocksource_hp #endif }; -/* - * Try to setup the HPET timer - */ -int __init hpet_enable(void) +static int hpet_clocksource_register(void) { - unsigned long id; - uint64_t hpet_freq; u64 tmp, start, now; cycle_t t1; - if (!is_hpet_capable()) - return 0; - - hpet_set_mapping(); - - /* - * Read the period and check for a sane value: - */ - hpet_period = hpet_readl(HPET_PERIOD); - if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) - goto out_nohpet; - - /* - * The period is a femto seconds value. We need to calculate the - * scaled math multiplication factor for nanosecond to hpet tick - * conversion. - */ - hpet_freq = 1000000000000000ULL; - do_div(hpet_freq, hpet_period); - hpet_clockevent.mult = div_sc((unsigned long) hpet_freq, - NSEC_PER_SEC, 32); - /* Calculate the min / max delta */ - hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, - &hpet_clockevent); - hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, - &hpet_clockevent); - - /* - * Read the HPET ID register to retrieve the IRQ routing - * information and the number of channels - */ - id = hpet_readl(HPET_ID); - -#ifdef CONFIG_HPET_EMULATE_RTC - /* - * The legacy routing mode needs at least two channels, tick timer - * and the rtc emulation channel. - */ - if (!(id & HPET_ID_NUMBER)) - goto out_nohpet; -#endif - /* Start the counter */ hpet_start_counter(); @@ -346,7 +331,7 @@ int __init hpet_enable(void) if (t1 == read_hpet()) { printk(KERN_WARNING "HPET counter not counting. HPET disabled\n"); - goto out_nohpet; + return -ENODEV; } /* Initialize and register HPET clocksource @@ -367,15 +352,48 @@ int __init hpet_enable(void) clocksource_register(&clocksource_hpet); + return 0; +} + +/* + * Try to setup the HPET timer + */ +int __init hpet_enable(void) +{ + unsigned long id; + + if (!is_hpet_capable()) + return 0; + + hpet_set_mapping(); + + /* + * Read the period and check for a sane value: + */ + hpet_period = hpet_readl(HPET_PERIOD); + if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) + goto out_nohpet; + + /* + * Read the HPET ID register to retrieve the IRQ routing + * information and the number of channels + */ + id = hpet_readl(HPET_ID); + +#ifdef CONFIG_HPET_EMULATE_RTC + /* + * The legacy routing mode needs at least two channels, tick timer + * and the rtc emulation channel. + */ + if (!(id & HPET_ID_NUMBER)) + goto out_nohpet; +#endif + + if (hpet_clocksource_register()) + goto out_nohpet; + if (id & HPET_ID_LEGSUP) { - hpet_enable_int(); - /* - * Start hpet with the boot cpu mask and make it - * global after the IO_APIC has been initialized. - */ - hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); - clockevents_register_device(&hpet_clockevent); - global_clock_event = &hpet_clockevent; + hpet_legacy_clockevent_register(); return 1; } return 0; patches/apic-dumpstack.patch0000664000077200007720000000067010655544571015466 0ustar mingomingo arch/i386/kernel/apic.c | 1 + 1 file changed, 1 insertion(+) Index: linux/arch/i386/kernel/apic.c =================================================================== --- linux.orig/arch/i386/kernel/apic.c +++ linux/arch/i386/kernel/apic.c @@ -1309,6 +1309,7 @@ void smp_error_interrupt(struct pt_regs */ printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n", smp_processor_id(), v , v1); + dump_stack(); irq_exit(); } patches/preempt-realtime-drivers-pci-hotplug.patch0000664000077200007720000000143310655544575021744 0ustar mingomingoSubject: pci/hotplug/cpqphp_ctrl.c: remove stale BKL use From: Ingo Molnar remove stale BKL use from drivers/pci/hotplug/cpqphp_ctrl.c. Signed-off-by: Ingo Molnar --- drivers/pci/hotplug/cpqphp_ctrl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) Index: linux-rt-rebase.q/drivers/pci/hotplug/cpqphp_ctrl.c =================================================================== --- linux-rt-rebase.q.orig/drivers/pci/hotplug/cpqphp_ctrl.c +++ linux-rt-rebase.q/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1746,10 +1746,8 @@ static void pushbutton_helper_thread(uns static int event_thread(void* data) { struct controller *ctrl; - lock_kernel(); + daemonize("phpd_event"); - - unlock_kernel(); while (1) { dbg("!!!!event_thread sleeping\n"); patches/remove-check-pgt-cache-calls.patch0000664000077200007720000000256110655544574020062 0ustar mingomingo--- arch/i386/kernel/process.c | 1 - arch/x86_64/kernel/process.c | 1 - arch/x86_64/kernel/smp.c | 1 - 3 files changed, 3 deletions(-) Index: linux-rt-rebase.q/arch/i386/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/process.c +++ linux-rt-rebase.q/arch/i386/kernel/process.c @@ -185,7 +185,6 @@ void cpu_idle(void) if (__get_cpu_var(cpu_idle_state)) __get_cpu_var(cpu_idle_state) = 0; - check_pgt_cache(); tick_nohz_stop_sched_tick(); rmb(); idle = pm_idle; Index: linux-rt-rebase.q/arch/x86_64/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/process.c +++ linux-rt-rebase.q/arch/x86_64/kernel/process.c @@ -211,7 +211,6 @@ void cpu_idle (void) tick_nohz_stop_sched_tick(); - check_pgt_cache(); rmb(); idle = pm_idle; if (!idle) Index: linux-rt-rebase.q/arch/x86_64/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/smp.c +++ linux-rt-rebase.q/arch/x86_64/kernel/smp.c @@ -241,7 +241,6 @@ void flush_tlb_mm (struct mm_struct * mm } if (!cpus_empty(cpu_mask)) flush_tlb_others(cpu_mask, mm, FLUSH_ALL); - check_pgt_cache(); preempt_enable(); } EXPORT_SYMBOL(flush_tlb_mm); patches/ich-force-hpet-make-generic-time-capable-of-switching-broadcast-timer.patch0000664000077200007720000000467410655544570027710 0ustar mingomingoFrom: Venki Pallipadi Auto-detect the presence of HPET on ICH5 or newer platforms and enable HPET for broadcast timer. This gives a bigger upperlimit for tickless time tick and improves the power consumption in comparison to PIT as broadcast timer. This patch: Change the broadcast timer, if a timer with higher rating becomes available. Signed-off-by: Venkatesh Pallipadi Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andi Kleen Cc: john stultz Cc: Greg KH Signed-off-by: Andrew Morton --- kernel/time/tick-broadcast.c | 13 ++++++------- kernel/time/tick-common.c | 4 ++-- 2 files changed, 8 insertions(+), 9 deletions(-) Index: linux/kernel/time/tick-broadcast.c =================================================================== --- linux.orig/kernel/time/tick-broadcast.c +++ linux/kernel/time/tick-broadcast.c @@ -64,8 +64,9 @@ static void tick_broadcast_start_periodi */ int tick_check_broadcast_device(struct clock_event_device *dev) { - if (tick_broadcast_device.evtdev || - (dev->features & CLOCK_EVT_FEAT_C3STOP)) + if ((tick_broadcast_device.evtdev && + tick_broadcast_device.evtdev->rating >= dev->rating) || + (dev->features & CLOCK_EVT_FEAT_C3STOP)) return 0; clockevents_exchange_device(NULL, dev); @@ -519,11 +520,9 @@ static void tick_broadcast_clear_oneshot */ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { - if (bc->mode != CLOCK_EVT_MODE_ONESHOT) { - bc->event_handler = tick_handle_oneshot_broadcast; - clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); - bc->next_event.tv64 = KTIME_MAX; - } + bc->event_handler = tick_handle_oneshot_broadcast; + clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + bc->next_event.tv64 = KTIME_MAX; } /* Index: linux/kernel/time/tick-common.c =================================================================== --- linux.orig/kernel/time/tick-common.c +++ linux/kernel/time/tick-common.c @@ -200,7 +200,7 @@ static int tick_check_new_device(struct cpu = smp_processor_id(); if (!cpu_isset(cpu, newdev->cpumask)) - goto out; + goto out_bc; td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; @@ -265,7 +265,7 @@ out_bc: */ if (tick_check_broadcast_device(newdev)) ret = NOTIFY_STOP; -out: + spin_unlock_irqrestore(&tick_device_lock, flags); return ret; patches/preempt-irqs-mips.patch0000664000077200007720000000211510655544573016155 0ustar mingomingo--- arch/mips/kernel/time.c | 2 +- arch/mips/sibyte/sb1250/irq.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/arch/mips/kernel/time.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/time.c +++ linux-rt-rebase.q/arch/mips/kernel/time.c @@ -281,7 +281,7 @@ unsigned int mips_hpt_frequency; static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_PERCPU, + .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_NODELAY, .name = "timer", }; Index: linux-rt-rebase.q/arch/mips/sibyte/sb1250/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/sibyte/sb1250/irq.c +++ linux-rt-rebase.q/arch/mips/sibyte/sb1250/irq.c @@ -242,7 +242,7 @@ static irqreturn_t sb1250_dummy_handler static struct irqaction sb1250_dummy_action = { .handler = sb1250_dummy_handler, - .flags = 0, + .flags = IRQF_NODELAY, .mask = CPU_MASK_NONE, .name = "sb1250-private", .next = NULL, patches/2.6.21-rc6-lockless4-__add_to_swap_cache-stuff.patch0000664000077200007720000000267410655544576022743 0ustar mingomingoFrom: Nick Piggin Subject: [patch 4/9] mm: __add_to_swap_cache stuff __add_to_swap_cache unconditionally sets the page locked. Instead, just ensure that the page is locked (which is a usual invariant for manipulating swapcache). Signed-off-by: Nick Piggin --- mm/swap_state.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/mm/swap_state.c =================================================================== --- linux-rt-rebase.q.orig/mm/swap_state.c +++ linux-rt-rebase.q/mm/swap_state.c @@ -74,6 +74,7 @@ static int __add_to_swap_cache(struct pa { int error; + BUG_ON(!PageLocked(page)); BUG_ON(PageSwapCache(page)); BUG_ON(PagePrivate(page)); error = radix_tree_preload(gfp_mask); @@ -83,7 +84,6 @@ static int __add_to_swap_cache(struct pa entry.val, page); if (!error) { page_cache_get(page); - SetPageLocked(page); SetPageSwapCache(page); set_page_private(page, entry.val); total_swapcache_pages++; @@ -338,6 +338,7 @@ struct page *read_swap_cache_async(swp_e vma, addr); if (!new_page) break; /* Out of memory */ + SetPageLocked(new_page);/* could be non-atomic op */ } /* @@ -361,7 +362,9 @@ struct page *read_swap_cache_async(swp_e } } while (err != -ENOENT && err != -ENOMEM); - if (new_page) + if (new_page) { + ClearPageLocked(new_page); page_cache_release(new_page); + } return found_page; } patches/hrtimer-no-printk.patch0000664000077200007720000000211210655544575016147 0ustar mingomingo--- kernel/hrtimer.c | 3 +-- kernel/time/timekeeping.c | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/kernel/hrtimer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/hrtimer.c +++ linux-rt-rebase.q/kernel/hrtimer.c @@ -578,8 +578,7 @@ static int hrtimer_switch_to_hres(void) /* "Retrigger" the interrupt to get things going */ retrigger_next_event(NULL); local_irq_restore(flags); - printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", - smp_processor_id()); + return 1; } Index: linux-rt-rebase.q/kernel/time/timekeeping.c =================================================================== --- linux-rt-rebase.q.orig/kernel/time/timekeeping.c +++ linux-rt-rebase.q/kernel/time/timekeeping.c @@ -239,8 +239,10 @@ static void change_clocksource(void) tick_clock_notify(); +#ifndef CONFIG_PREEMPT_RT printk(KERN_INFO "Time: %s clocksource has been installed.\n", clock->name); +#endif } #else static inline void change_clocksource(void) { } patches/idle-stop-critical-timing.patch0000664000077200007720000000163010655544572017534 0ustar mingomingo--- drivers/acpi/processor_idle.c | 8 ++++++++ 1 file changed, 8 insertions(+) Index: linux/drivers/acpi/processor_idle.c =================================================================== --- linux.orig/drivers/acpi/processor_idle.c +++ linux/drivers/acpi/processor_idle.c @@ -837,6 +837,12 @@ static inline void acpi_idle_update_bm_r */ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) { + /* + * We have irqs disabled here, so stop latency tracing + * at this point and restart it after we return: + */ + stop_critical_timing(); + if (cx->space_id == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); @@ -849,6 +855,8 @@ static inline void acpi_idle_do_entry(st gets asserted in time to freeze execution properly. */ unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } + + touch_critical_timing(); } /** patches/mm-fix-latency.patch0000664000077200007720000000554010655544571015414 0ustar mingomingoFrom: Hugh Dickins Subject: reduce pagetable-freeing latencies 2.6.15-rc1 moved the unlinking of a vma from its prio_tree and anon_vma into free_pgtables: so the vma is hidden from rmap and vmtruncate before freeing its page tables, allowing safe descent without page table lock. But free_pgtables is still called with preemption disabled, and Lee Revell has now detected high latency there. The right fix will be to rework the mmu_gathering, not to need preemption disabled; but for now an ugly CONFIG_PREEMPT block in free_pgtables, to make an initial unlinking pass with preemption enabled - made uglier by CONFIG_IA64 definitions (only ia64 actually uses the start and end given to tlb_finish_mmu, and our floor and ceiling don't quite work for those). These CONFIG choices being to minimize the additional TLB flushing. Signed-off-by: Hugh Dickins Signed-off-by: Ingo Molnar -- mm/memory.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) Index: linux/mm/memory.c =================================================================== --- linux.orig/mm/memory.c +++ linux/mm/memory.c @@ -264,18 +264,48 @@ void free_pgd_range(struct mmu_gather ** flush_tlb_pgtables((*tlb)->mm, start, end); } +#ifdef CONFIG_IA64 +#define tlb_start_addr(tlb) (tlb)->start_addr +#define tlb_end_addr(tlb) (tlb)->end_addr +#else +#define tlb_start_addr(tlb) 0UL /* only ia64 really uses it */ +#define tlb_end_addr(tlb) 0UL /* only ia64 really uses it */ +#endif + void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling) { +#ifdef CONFIG_PREEMPT + struct vm_area_struct *unlink = vma; + int fullmm = (*tlb)->fullmm; + + if (!vma) /* Sometimes when exiting after an oops */ + return; + if (vma->vm_next) + tlb_finish_mmu(*tlb, tlb_start_addr(*tlb), tlb_end_addr(*tlb)); + /* + * Hide vma from rmap and vmtruncate before freeeing pgtables, + * with preemption enabled, except when unmapping just one area. + */ + while (unlink) { + anon_vma_unlink(unlink); + unlink_file_vma(unlink); + unlink = unlink->vm_next; + } + if (vma->vm_next) + *tlb = tlb_gather_mmu(vma->vm_mm, fullmm); +#endif while (vma) { struct vm_area_struct *next = vma->vm_next; unsigned long addr = vma->vm_start; +#ifndef CONFIG_PREEMPT /* * Hide vma from rmap and vmtruncate before freeing pgtables */ anon_vma_unlink(vma); unlink_file_vma(vma); +#endif if (is_vm_hugetlb_page(vma)) { hugetlb_free_pgd_range(tlb, addr, vma->vm_end, @@ -288,8 +318,10 @@ void free_pgtables(struct mmu_gather **t && !is_vm_hugetlb_page(next)) { vma = next; next = vma->vm_next; +#ifndef CONFIG_PREEMPT anon_vma_unlink(vma); unlink_file_vma(vma); +#endif } free_pgd_range(tlb, addr, vma->vm_end, floor, next? next->vm_start: ceiling); patches/preempt-irqs-ppc-fix-more-fasteoi.patch0000664000077200007720000000704710655544573021154 0ustar mingomingoFrom sshtylyov@ru.mvista.com Thu May 17 15:18:39 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from imap.sh.mvista.com (unknown [63.81.120.155]) by mail.tglx.de (Postfix) with ESMTP id BFD3A65C065 for ; Thu, 17 May 2007 15:18:39 +0200 (CEST) Received: from wasted.dev.rtsoft.ru (unknown [10.150.0.9]) by imap.sh.mvista.com (Postfix) with ESMTP id 8E3CB3EC9; Thu, 17 May 2007 06:18:35 -0700 (PDT) From: Sergei Shtylyov Organization: MontaVista Software Inc. To: mingo@elte.hu, tglx@linutronix.de Subject: [PATCH 2.6.21-rt2] PowerPC: revert fix for threaded fasteoi IRQ handlers Date: Thu, 17 May 2007 17:20:08 +0400 User-Agent: KMail/1.5 Cc: linux-kernel@vger.kernel.org, linuxppc-dev@ozlabs.org, dwalker@mvista.com References: <200611192243.34850.sshtylyov@ru.mvista.com> In-Reply-To: <200611192243.34850.sshtylyov@ru.mvista.com> MIME-Version: 1.0 Content-Disposition: inline Message-Id: <200705171719.34968.sshtylyov@ru.mvista.com> Content-Type: text/plain; charset="us-ascii" X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Revert the change to the "fasteoi" type chips as after handle_fasteoi_irq() had been fixed, they've become meaningless (and even dangerous -- as was the case with Celleb that has been fixed earlier)... Signed-off-by: Sergei Shtylyov --- The patch in question wasn't even initially accepted but then was erroneously restored along with the TOD patch. I've asked to revert it but to no avail, so here's the formal patch to revert it at last... arch/powerpc/platforms/iseries/irq.c | 1 - arch/powerpc/platforms/pseries/xics.c | 2 -- arch/powerpc/sysdev/mpic.c | 1 - 3 files changed, 4 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/platforms/iseries/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/iseries/irq.c +++ linux-rt-rebase.q/arch/powerpc/platforms/iseries/irq.c @@ -278,7 +278,6 @@ static struct irq_chip iseries_pic = { .shutdown = iseries_shutdown_IRQ, .unmask = iseries_enable_IRQ, .mask = iseries_disable_IRQ, - .ack = iseries_end_IRQ, .eoi = iseries_end_IRQ }; Index: linux-rt-rebase.q/arch/powerpc/platforms/pseries/xics.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/pseries/xics.c +++ linux-rt-rebase.q/arch/powerpc/platforms/pseries/xics.c @@ -461,7 +461,6 @@ static struct irq_chip xics_pic_direct = .startup = xics_startup, .mask = xics_mask_irq, .unmask = xics_unmask_irq, - .ack = xics_eoi_direct, .eoi = xics_eoi_direct, .set_affinity = xics_set_affinity }; @@ -472,7 +471,6 @@ static struct irq_chip xics_pic_lpar = { .startup = xics_startup, .mask = xics_mask_irq, .unmask = xics_unmask_irq, - .ack = xics_eoi_lpar, .eoi = xics_eoi_lpar, .set_affinity = xics_set_affinity }; Index: linux-rt-rebase.q/arch/powerpc/sysdev/mpic.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/sysdev/mpic.c +++ linux-rt-rebase.q/arch/powerpc/sysdev/mpic.c @@ -835,7 +835,6 @@ int mpic_set_irq_type(unsigned int virq, static struct irq_chip mpic_irq_chip = { .mask = mpic_mask_irq, .unmask = mpic_unmask_irq, - .ack = mpic_end_irq, .eoi = mpic_end_irq, .set_type = mpic_set_irq_type, }; patches/preempt-realtime-console.patch0000664000077200007720000000352010655544575017476 0ustar mingomingo--- drivers/video/console/fbcon.c | 5 +++-- include/linux/console.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/drivers/video/console/fbcon.c =================================================================== --- linux-rt-rebase.q.orig/drivers/video/console/fbcon.c +++ linux-rt-rebase.q/drivers/video/console/fbcon.c @@ -1295,7 +1295,6 @@ static void fbcon_clear(struct vc_data * { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; - struct display *p = &fb_display[vc->vc_num]; u_int y_break; @@ -1324,10 +1323,11 @@ static void fbcon_putcs(struct vc_data * struct display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; - if (!fbcon_is_inactive(vc, info)) + if (!fbcon_is_inactive(vc, info)) { ops->putcs(vc, info, s, count, real_y(p, ypos), xpos, get_color(vc, info, scr_readw(s), 1), get_color(vc, info, scr_readw(s), 0)); + } } static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos) @@ -3311,6 +3311,7 @@ static const struct consw fb_con = { .con_screen_pos = fbcon_screen_pos, .con_getxy = fbcon_getxy, .con_resize = fbcon_resize, + .con_preemptible = 1, }; static struct notifier_block fbcon_event_notifier = { Index: linux-rt-rebase.q/include/linux/console.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/console.h +++ linux-rt-rebase.q/include/linux/console.h @@ -54,6 +54,7 @@ struct consw { void (*con_invert_region)(struct vc_data *, u16 *, int); u16 *(*con_screen_pos)(struct vc_data *, int); unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); + int con_preemptible; // can it reschedule from within printk? }; extern const struct consw *conswitchp; patches/kprobes-preempt-fix.patch0000664000077200007720000000257310655544571016470 0ustar mingomingo arch/i386/kernel/kprobes.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) Index: linux/arch/i386/kernel/kprobes.c =================================================================== --- linux.orig/arch/i386/kernel/kprobes.c +++ linux/arch/i386/kernel/kprobes.c @@ -326,7 +326,7 @@ ss_probe: /* Boost up -- we can execute copied instructions directly */ reset_current_kprobe(); regs->eip = (unsigned long)p->ainsn.insn; - preempt_enable_no_resched(); + preempt_enable(); return 1; } #endif @@ -335,7 +335,7 @@ ss_probe: return 1; no_kprobe: - preempt_enable_no_resched(); + preempt_enable(); return ret; } @@ -566,7 +566,7 @@ static int __kprobes post_kprobe_handler } reset_current_kprobe(); out: - preempt_enable_no_resched(); + preempt_enable(); /* * if somebody else is singlestepping across a probe point, eflags @@ -600,7 +600,7 @@ static int __kprobes kprobe_fault_handle restore_previous_kprobe(kcb); else reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: @@ -734,7 +734,7 @@ int __kprobes longjmp_break_handler(stru *regs = kcb->jprobe_saved_regs; memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, MIN_STACK_SIZE(stack_addr)); - preempt_enable_no_resched(); + preempt_enable(); return 1; } return 0; patches/rt-slab-new.patch0000664000077200007720000011314410655544573014717 0ustar mingomingo new slab port. Signed-off-by: Ingo Molnar --- mm/slab.c | 496 +++++++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 319 insertions(+), 177 deletions(-) Index: linux-rt-rebase.q/mm/slab.c =================================================================== --- linux-rt-rebase.q.orig/mm/slab.c +++ linux-rt-rebase.q/mm/slab.c @@ -116,6 +116,63 @@ #include /* + * On !PREEMPT_RT, raw irq flags are used as a per-CPU locking + * mechanism. + * + * On PREEMPT_RT, we use per-CPU locks for this. That's why the + * calling convention is changed slightly: a new 'flags' argument + * is passed to 'irq disable/enable' - the PREEMPT_RT code stores + * the CPU number of the lock there. + */ +#ifndef CONFIG_PREEMPT_RT +# define slab_irq_disable(cpu) \ + do { local_irq_disable(); (cpu) = smp_processor_id(); } while (0) +# define slab_irq_enable(cpu) local_irq_enable() +# define slab_irq_save(flags, cpu) \ + do { local_irq_save(flags); (cpu) = smp_processor_id(); } while (0) +# define slab_irq_restore(flags, cpu) local_irq_restore(flags) +/* + * In the __GFP_WAIT case we enable/disable interrupts on !PREEMPT_RT, + * which has no per-CPU locking effect since we are holding the cache + * lock in that case already. + * + * (On PREEMPT_RT, these are NOPs, but we have to drop/get the irq locks.) + */ +# define slab_irq_disable_nort() local_irq_disable() +# define slab_irq_enable_nort() local_irq_enable() +# define slab_irq_disable_rt(flags) do { (void)(flags); } while (0) +# define slab_irq_enable_rt(flags) do { (void)(flags); } while (0) +# define slab_spin_lock_irq(lock, cpu) \ + do { spin_lock_irq(lock); (cpu) = smp_processor_id(); } while (0) +# define slab_spin_unlock_irq(lock, cpu) \ + spin_unlock_irq(lock) +# define slab_spin_lock_irqsave(lock, flags, cpu) \ + do { spin_lock_irqsave(lock, flags); (cpu) = smp_processor_id(); } while (0) +# define slab_spin_unlock_irqrestore(lock, flags, cpu) \ + do { spin_unlock_irqrestore(lock, flags); } while (0) +#else +DEFINE_PER_CPU_LOCKED(int, slab_irq_locks) = { 0, }; +# define slab_irq_disable(cpu) (void)get_cpu_var_locked(slab_irq_locks, &(cpu)) +# define slab_irq_enable(cpu) put_cpu_var_locked(slab_irq_locks, cpu) +# define slab_irq_save(flags, cpu) \ + do { slab_irq_disable(cpu); (void) (flags); } while (0) +# define slab_irq_restore(flags, cpu) \ + do { slab_irq_enable(cpu); (void) (flags); } while (0) +# define slab_irq_disable_rt(cpu) slab_irq_disable(cpu) +# define slab_irq_enable_rt(cpu) slab_irq_enable(cpu) +# define slab_irq_disable_nort() do { } while (0) +# define slab_irq_enable_nort() do { } while (0) +# define slab_spin_lock_irq(lock, cpu) \ + do { slab_irq_disable(cpu); spin_lock(lock); } while (0) +# define slab_spin_unlock_irq(lock, cpu) \ + do { spin_unlock(lock); slab_irq_enable(cpu); } while (0) +# define slab_spin_lock_irqsave(lock, flags, cpu) \ + do { slab_irq_disable(cpu); spin_lock_irqsave(lock, flags); } while (0) +# define slab_spin_unlock_irqrestore(lock, flags, cpu) \ + do { spin_unlock_irqrestore(lock, flags); slab_irq_enable(cpu); } while (0) +#endif + +/* * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). * @@ -314,7 +371,7 @@ struct kmem_list3 __initdata initkmem_li static int drain_freelist(struct kmem_cache *cache, struct kmem_list3 *l3, int tofree); static void free_block(struct kmem_cache *cachep, void **objpp, int len, - int node); + int node, int *this_cpu); static int enable_cpucache(struct kmem_cache *cachep); static void cache_reap(struct work_struct *unused); @@ -758,9 +815,10 @@ int slab_is_available(void) static DEFINE_PER_CPU(struct delayed_work, reap_work); -static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) +static inline struct array_cache * +cpu_cache_get(struct kmem_cache *cachep, int this_cpu) { - return cachep->array[smp_processor_id()]; + return cachep->array[this_cpu]; } static inline struct kmem_cache *__find_general_cachep(size_t size, @@ -993,7 +1051,7 @@ static int transfer_objects(struct array #ifndef CONFIG_NUMA #define drain_alien_cache(cachep, alien) do { } while (0) -#define reap_alien(cachep, l3) do { } while (0) +#define reap_alien(cachep, l3, this_cpu) do { } while (0) static inline struct array_cache **alloc_alien_cache(int node, int limit) { @@ -1004,7 +1062,8 @@ static inline void free_alien_cache(stru { } -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +static inline int +cache_free_alien(struct kmem_cache *cachep, void *objp, int *this_cpu) { return 0; } @@ -1016,14 +1075,15 @@ static inline void *alternate_node_alloc } static inline void *____cache_alloc_node(struct kmem_cache *cachep, - gfp_t flags, int nodeid) + gfp_t flags, int nodeid, int *this_cpu) { return NULL; } #else /* CONFIG_NUMA */ -static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); +static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, + int nodeid, int *this_cpu); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); static struct array_cache **alloc_alien_cache(int node, int limit) @@ -1065,7 +1125,8 @@ static void free_alien_cache(struct arra } static void __drain_alien_cache(struct kmem_cache *cachep, - struct array_cache *ac, int node) + struct array_cache *ac, int node, + int *this_cpu) { struct kmem_list3 *rl3 = cachep->nodelists[node]; @@ -1079,7 +1140,7 @@ static void __drain_alien_cache(struct k if (rl3->shared) transfer_objects(rl3->shared, ac, ac->limit); - free_block(cachep, ac->entry, ac->avail, node); + free_block(cachep, ac->entry, ac->avail, node, this_cpu); ac->avail = 0; spin_unlock(&rl3->list_lock); } @@ -1088,15 +1149,16 @@ static void __drain_alien_cache(struct k /* * Called from cache_reap() to regularly drain alien caches round robin. */ -static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) +static void +reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3, int *this_cpu) { - int node = __get_cpu_var(reap_node); + int node = per_cpu(reap_node, *this_cpu); if (l3->alien) { struct array_cache *ac = l3->alien[node]; if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { - __drain_alien_cache(cachep, ac, node); + __drain_alien_cache(cachep, ac, node, this_cpu); spin_unlock_irq(&ac->lock); } } @@ -1105,21 +1167,22 @@ static void reap_alien(struct kmem_cache static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) { - int i = 0; + int i = 0, this_cpu; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { ac = alien[i]; if (ac) { - spin_lock_irqsave(&ac->lock, flags); - __drain_alien_cache(cachep, ac, i); - spin_unlock_irqrestore(&ac->lock, flags); + slab_spin_lock_irqsave(&ac->lock, flags, this_cpu); + __drain_alien_cache(cachep, ac, i, &this_cpu); + slab_spin_unlock_irqrestore(&ac->lock, flags, this_cpu); } } } -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +static inline int +cache_free_alien(struct kmem_cache *cachep, void *objp, int *this_cpu) { struct slab *slabp = virt_to_slab(objp); int nodeid = slabp->nodeid; @@ -1143,17 +1206,18 @@ static inline int cache_free_alien(struc spin_lock(&alien->lock); if (unlikely(alien->avail == alien->limit)) { STATS_INC_ACOVERFLOW(cachep); - __drain_alien_cache(cachep, alien, nodeid); + __drain_alien_cache(cachep, alien, nodeid, this_cpu); } alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); } else { spin_lock(&(cachep->nodelists[nodeid])->list_lock); - free_block(cachep, &objp, 1, nodeid); + free_block(cachep, &objp, 1, nodeid, this_cpu); spin_unlock(&(cachep->nodelists[nodeid])->list_lock); } return 1; } + #endif static int __cpuinit cpuup_callback(struct notifier_block *nfb, @@ -1164,6 +1228,7 @@ static int __cpuinit cpuup_callback(stru struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); const int memsize = sizeof(struct kmem_list3); + int this_cpu; switch (action) { case CPU_LOCK_ACQUIRE: @@ -1200,11 +1265,11 @@ static int __cpuinit cpuup_callback(stru cachep->nodelists[node] = l3; } - spin_lock_irq(&cachep->nodelists[node]->list_lock); + slab_spin_lock_irq(&cachep->nodelists[node]->list_lock, this_cpu); cachep->nodelists[node]->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; - spin_unlock_irq(&cachep->nodelists[node]->list_lock); + slab_spin_unlock_irq(&cachep->nodelists[node]->list_lock, this_cpu); } /* @@ -1236,7 +1301,7 @@ static int __cpuinit cpuup_callback(stru l3 = cachep->nodelists[node]; BUG_ON(!l3); - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); if (!l3->shared) { /* * We are serialised from CPU_DEAD or @@ -1251,7 +1316,7 @@ static int __cpuinit cpuup_callback(stru alien = NULL; } #endif - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); kfree(shared); free_alien_cache(alien); } @@ -1295,6 +1360,7 @@ static int __cpuinit cpuup_callback(stru struct array_cache *nc; struct array_cache *shared; struct array_cache **alien; + int this_cpu; cpumask_t mask; mask = node_to_cpumask(node); @@ -1306,29 +1372,31 @@ static int __cpuinit cpuup_callback(stru if (!l3) goto free_array_cache; - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); /* Free limit for this kmem_list3 */ l3->free_limit -= cachep->batchcount; if (nc) - free_block(cachep, nc->entry, nc->avail, node); + free_block(cachep, nc->entry, nc->avail, node, + &this_cpu); if (!cpus_empty(mask)) { - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, + this_cpu); goto free_array_cache; } shared = l3->shared; if (shared) { free_block(cachep, shared->entry, - shared->avail, node); + shared->avail, node, &this_cpu); l3->shared = NULL; } alien = l3->alien; l3->alien = NULL; - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); kfree(shared); if (alien) { @@ -1370,11 +1438,13 @@ static void init_list(struct kmem_cache int nodeid) { struct kmem_list3 *ptr; + int this_cpu; ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); BUG_ON(!ptr); - local_irq_disable(); + WARN_ON(spin_is_locked(&list->list_lock)); + slab_irq_disable(this_cpu); memcpy(ptr, list, sizeof(struct kmem_list3)); /* * Do not assume that spinlocks can be initialized via memcpy: @@ -1383,7 +1453,7 @@ static void init_list(struct kmem_cache MAKE_ALL_LISTS(cachep, ptr, nodeid); cachep->nodelists[nodeid] = ptr; - local_irq_enable(); + slab_irq_enable(this_cpu); } /* @@ -1527,36 +1597,34 @@ void __init kmem_cache_init(void) /* 4) Replace the bootstrap head arrays */ { struct array_cache *ptr; + int this_cpu; ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); - local_irq_disable(); - BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); - memcpy(ptr, cpu_cache_get(&cache_cache), - sizeof(struct arraycache_init)); + slab_irq_disable(this_cpu); + BUG_ON(cpu_cache_get(&cache_cache, this_cpu) != &initarray_cache.cache); + memcpy(ptr, cpu_cache_get(&cache_cache, this_cpu), + sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); - - cache_cache.array[smp_processor_id()] = ptr; - local_irq_enable(); + cache_cache.array[this_cpu] = ptr; + slab_irq_enable(this_cpu); ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); - local_irq_disable(); - BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) - != &initarray_generic.cache); - memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), - sizeof(struct arraycache_init)); + slab_irq_disable(this_cpu); + BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep, this_cpu) + != &initarray_generic.cache); + memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep, this_cpu), + sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); - - malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = - ptr; - local_irq_enable(); + malloc_sizes[INDEX_AC].cs_cachep->array[this_cpu] = ptr; + slab_irq_enable(this_cpu); } /* 5) Replace the bootstrap kmem_list3's */ { @@ -1707,7 +1775,7 @@ static void store_stackinfo(struct kmem_ *addr++ = 0x12345678; *addr++ = caller; - *addr++ = smp_processor_id(); + *addr++ = raw_smp_processor_id(); size -= 3 * sizeof(unsigned long); { unsigned long *sptr = &caller; @@ -1862,7 +1930,11 @@ static void check_poison_obj(struct kmem } #endif +static void +__cache_free(struct kmem_cache *cachep, void *objp, int *this_cpu); + #if DEBUG + /** * slab_destroy_objs - destroy a slab and its objects * @cachep: cache pointer being destroyed @@ -1871,7 +1943,8 @@ static void check_poison_obj(struct kmem * Call the registered destructor for each object in a slab that is being * destroyed. */ -static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) +static void +slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) { int i; for (i = 0; i < cachep->num; i++) { @@ -1914,7 +1987,8 @@ static void slab_destroy_objs(struct kme * Before calling the slab must have been unlinked from the cache. The * cache-lock is not held/needed. */ -static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) +static void +slab_destroy(struct kmem_cache *cachep, struct slab *slabp, int *this_cpu) { void *addr = slabp->s_mem - slabp->colouroff; @@ -1928,8 +2002,12 @@ static void slab_destroy(struct kmem_cac call_rcu(&slab_rcu->head, kmem_rcu_free); } else { kmem_freepages(cachep, addr); - if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->slabp_cache, slabp); + if (OFF_SLAB(cachep)) { + if (this_cpu) + __cache_free(cachep->slabp_cache, slabp, this_cpu); + else + kmem_cache_free(cachep->slabp_cache, slabp); + } } } @@ -2042,6 +2120,8 @@ static size_t calculate_slab_order(struc static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) { + int this_cpu; + if (g_cpucache_up == FULL) return enable_cpucache(cachep); @@ -2085,10 +2165,12 @@ static int __init_refok setup_cpu_cache( jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; - cpu_cache_get(cachep)->avail = 0; - cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; - cpu_cache_get(cachep)->batchcount = 1; - cpu_cache_get(cachep)->touched = 0; + this_cpu = raw_smp_processor_id(); + + cpu_cache_get(cachep, this_cpu)->avail = 0; + cpu_cache_get(cachep, this_cpu)->limit = BOOT_CPUCACHE_ENTRIES; + cpu_cache_get(cachep, this_cpu)->batchcount = 1; + cpu_cache_get(cachep, this_cpu)->touched = 0; cachep->batchcount = 1; cachep->limit = BOOT_CPUCACHE_ENTRIES; return 0; @@ -2376,19 +2458,19 @@ EXPORT_SYMBOL(kmem_cache_create); #if DEBUG static void check_irq_off(void) { +/* + * On PREEMPT_RT we use locks to protect the per-CPU lists, + * and keep interrupts enabled. + */ +#ifndef CONFIG_PREEMPT_RT BUG_ON(!irqs_disabled()); +#endif } static void check_irq_on(void) { +#ifndef CONFIG_PREEMPT_RT BUG_ON(irqs_disabled()); -} - -static void check_spinlock_acquired(struct kmem_cache *cachep) -{ -#ifdef CONFIG_SMP - check_irq_off(); - assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); #endif } @@ -2403,7 +2485,6 @@ static void check_spinlock_acquired_node #else #define check_irq_off() do { } while(0) #define check_irq_on() do { } while(0) -#define check_spinlock_acquired(x) do { } while(0) #define check_spinlock_acquired_node(x, y) do { } while(0) #endif @@ -2411,26 +2492,60 @@ static void drain_array(struct kmem_cach struct array_cache *ac, int force, int node); -static void do_drain(void *arg) +static void __do_drain(void *arg, int this_cpu) { struct kmem_cache *cachep = arg; + int node = cpu_to_node(this_cpu); struct array_cache *ac; - int node = numa_node_id(); check_irq_off(); - ac = cpu_cache_get(cachep); + ac = cpu_cache_get(cachep, this_cpu); spin_lock(&cachep->nodelists[node]->list_lock); - free_block(cachep, ac->entry, ac->avail, node); + free_block(cachep, ac->entry, ac->avail, node, &this_cpu); spin_unlock(&cachep->nodelists[node]->list_lock); ac->avail = 0; } +#ifdef CONFIG_PREEMPT_RT +static void do_drain(void *arg, int this_cpu) +{ + __do_drain(arg, this_cpu); +} +#else +static void do_drain(void *arg) +{ + __do_drain(arg, smp_processor_id()); +} +#endif + +#ifdef CONFIG_PREEMPT_RT +/* + * execute func() for all CPUs. On PREEMPT_RT we dont actually have + * to run on the remote CPUs - we only have to take their CPU-locks. + * (This is a rare operation, so cacheline bouncing is not an issue.) + */ +static void +slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg) +{ + unsigned int i; + + check_irq_on(); + for_each_online_cpu(i) { + spin_lock(&__get_cpu_lock(slab_irq_locks, i)); + func(arg, i); + spin_unlock(&__get_cpu_lock(slab_irq_locks, i)); + } +} +#else +# define slab_on_each_cpu(func, cachep) on_each_cpu(func, cachep, 1, 1) +#endif + static void drain_cpu_caches(struct kmem_cache *cachep) { struct kmem_list3 *l3; int node; - on_each_cpu(do_drain, cachep, 1, 1); + slab_on_each_cpu(do_drain, cachep); check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; @@ -2455,16 +2570,16 @@ static int drain_freelist(struct kmem_ca struct kmem_list3 *l3, int tofree) { struct list_head *p; - int nr_freed; + int nr_freed, this_cpu; struct slab *slabp; nr_freed = 0; while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); p = l3->slabs_free.prev; if (p == &l3->slabs_free) { - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); goto out; } @@ -2473,13 +2588,9 @@ static int drain_freelist(struct kmem_ca BUG_ON(slabp->inuse); #endif list_del(&slabp->list); - /* - * Safe to drop the lock. The slab is no longer linked - * to the cache. - */ l3->free_objects -= cache->num; - spin_unlock_irq(&l3->list_lock); - slab_destroy(cache, slabp); + slab_destroy(cache, slabp, &this_cpu); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); nr_freed++; } out: @@ -2731,8 +2842,8 @@ static void slab_map_pages(struct kmem_c * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow(struct kmem_cache *cachep, - gfp_t flags, int nodeid, void *objp) +static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid, + void *objp, int *this_cpu) { struct slab *slabp; size_t offset; @@ -2761,7 +2872,8 @@ static int cache_grow(struct kmem_cache offset *= cachep->colour_off; if (local_flags & __GFP_WAIT) - local_irq_enable(); + slab_irq_enable_nort(); + slab_irq_enable_rt(*this_cpu); /* * The test for missing atomic flag is performed here, rather than @@ -2791,8 +2903,10 @@ static int cache_grow(struct kmem_cache cache_init_objs(cachep, slabp); + slab_irq_disable_rt(*this_cpu); if (local_flags & __GFP_WAIT) - local_irq_disable(); + slab_irq_disable_nort(); + check_irq_off(); spin_lock(&l3->list_lock); @@ -2805,8 +2919,9 @@ static int cache_grow(struct kmem_cache opps1: kmem_freepages(cachep, objp); failed: + slab_irq_disable_rt(*this_cpu); if (local_flags & __GFP_WAIT) - local_irq_disable(); + slab_irq_disable_nort(); return 0; } @@ -2926,7 +3041,8 @@ bad: #define check_slabp(x,y) do { } while(0) #endif -static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) +static void * +cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, int *this_cpu) { int batchcount; struct kmem_list3 *l3; @@ -2936,7 +3052,7 @@ static void *cache_alloc_refill(struct k node = numa_node_id(); check_irq_off(); - ac = cpu_cache_get(cachep); + ac = cpu_cache_get(cachep, *this_cpu); retry: batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { @@ -2947,7 +3063,7 @@ retry: */ batchcount = BATCHREFILL_LIMIT; } - l3 = cachep->nodelists[node]; + l3 = cachep->nodelists[cpu_to_node(*this_cpu)]; BUG_ON(ac->avail > 0 || !l3); spin_lock(&l3->list_lock); @@ -2970,7 +3086,7 @@ retry: slabp = list_entry(entry, struct slab, list); check_slabp(cachep, slabp); - check_spinlock_acquired(cachep); + check_spinlock_acquired_node(cachep, cpu_to_node(*this_cpu)); /* * The slab was either on partial or free list so @@ -2984,8 +3100,9 @@ retry: STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); - ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, - node); + ac->entry[ac->avail++] = + slab_get_obj(cachep, slabp, + cpu_to_node(*this_cpu)); } check_slabp(cachep, slabp); @@ -3004,10 +3121,10 @@ alloc_done: if (unlikely(!ac->avail)) { int x; - x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); + x = cache_grow(cachep, flags | GFP_THISNODE, cpu_to_node(*this_cpu), NULL, this_cpu); /* cache_grow can reenable interrupts, then ac could change. */ - ac = cpu_cache_get(cachep); + ac = cpu_cache_get(cachep, *this_cpu); if (!x && ac->avail == 0) /* no objects in sight? abort */ return NULL; @@ -3159,21 +3276,22 @@ static inline int should_failslab(struct #endif /* CONFIG_FAILSLAB */ -static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) +static inline void * +____cache_alloc(struct kmem_cache *cachep, gfp_t flags, int *this_cpu) { void *objp; struct array_cache *ac; check_irq_off(); - ac = cpu_cache_get(cachep); + ac = cpu_cache_get(cachep, *this_cpu); if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); ac->touched = 1; objp = ac->entry[--ac->avail]; } else { STATS_INC_ALLOCMISS(cachep); - objp = cache_alloc_refill(cachep, flags); + objp = cache_alloc_refill(cachep, flags, this_cpu); } return objp; } @@ -3187,7 +3305,7 @@ static inline void *____cache_alloc(stru */ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) { - int nid_alloc, nid_here; + int nid_alloc, nid_here, this_cpu = raw_smp_processor_id(); if (in_interrupt() || (flags & __GFP_THISNODE)) return NULL; @@ -3197,7 +3315,7 @@ static void *alternate_node_alloc(struct else if (current->mempolicy) nid_alloc = slab_node(current->mempolicy); if (nid_alloc != nid_here) - return ____cache_alloc_node(cachep, flags, nid_alloc); + return ____cache_alloc_node(cachep, flags, nid_alloc, &this_cpu); return NULL; } @@ -3209,7 +3327,7 @@ static void *alternate_node_alloc(struct * allocator to do its reclaim / fallback magic. We then insert the * slab into the proper nodelist and then allocate from it. */ -static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) +static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags, int *this_cpu) { struct zonelist *zonelist; gfp_t local_flags; @@ -3235,8 +3353,10 @@ retry: if (cpuset_zone_allowed_hardwall(*z, flags) && cache->nodelists[nid] && cache->nodelists[nid]->free_objects) - obj = ____cache_alloc_node(cache, - flags | GFP_THISNODE, nid); + + obj = ____cache_alloc_node(cache, + flags | GFP_THISNODE, nid, + this_cpu); } if (!obj) { @@ -3247,19 +3367,24 @@ retry: * set and go into memory reserves if necessary. */ if (local_flags & __GFP_WAIT) - local_irq_enable(); + slab_irq_enable_nort(); + slab_irq_enable_rt(*this_cpu); + kmem_flagcheck(cache, flags); obj = kmem_getpages(cache, flags, -1); + + slab_irq_disable_rt(*this_cpu); if (local_flags & __GFP_WAIT) - local_irq_disable(); + slab_irq_disable_nort(); + if (obj) { /* * Insert into the appropriate per node queues */ nid = page_to_nid(virt_to_page(obj)); - if (cache_grow(cache, flags, nid, obj)) { + if (cache_grow(cache, flags, nid, obj, this_cpu)) { obj = ____cache_alloc_node(cache, - flags | GFP_THISNODE, nid); + flags | GFP_THISNODE, nid, this_cpu); if (!obj) /* * Another processor may allocate the @@ -3280,7 +3405,7 @@ retry: * A interface to enable slab creation on nodeid */ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, - int nodeid) + int nodeid, int *this_cpu) { struct list_head *entry; struct slab *slabp; @@ -3328,11 +3453,11 @@ retry: must_grow: spin_unlock(&l3->list_lock); - x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); + x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL, this_cpu); if (x) goto retry; - return fallback_alloc(cachep, flags); + return fallback_alloc(cachep, flags, this_cpu); done: return obj; @@ -3354,39 +3479,41 @@ static __always_inline void * __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *caller) { - unsigned long save_flags; + unsigned long irqflags; + int this_cpu; void *ptr; if (should_failslab(cachep, flags)) return NULL; cache_alloc_debugcheck_before(cachep, flags); - local_irq_save(save_flags); + + slab_irq_save(irqflags, this_cpu); if (unlikely(nodeid == -1)) - nodeid = numa_node_id(); + nodeid = cpu_to_node(this_cpu); if (unlikely(!cachep->nodelists[nodeid])) { /* Node not bootstrapped yet */ - ptr = fallback_alloc(cachep, flags); + ptr = fallback_alloc(cachep, flags, &this_cpu); goto out; } - if (nodeid == numa_node_id()) { + if (nodeid == cpu_to_node(this_cpu)) { /* * Use the locally cached objects if possible. * However ____cache_alloc does not allow fallback * to other nodes. It may fail while we still have * objects on other nodes available. */ - ptr = ____cache_alloc(cachep, flags); + ptr = ____cache_alloc(cachep, flags, &this_cpu); if (ptr) goto out; } /* ___cache_alloc_node can fall back to other nodes */ - ptr = ____cache_alloc_node(cachep, flags, nodeid); + ptr = ____cache_alloc_node(cachep, flags, nodeid, &this_cpu); out: - local_irq_restore(save_flags); + slab_irq_restore(irqflags, this_cpu); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); if (unlikely((flags & __GFP_ZERO) && ptr)) @@ -3396,7 +3523,7 @@ __cache_alloc_node(struct kmem_cache *ca } static __always_inline void * -__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) +__do_cache_alloc(struct kmem_cache *cache, gfp_t flags, int *this_cpu) { void *objp; @@ -3405,24 +3532,24 @@ __do_cache_alloc(struct kmem_cache *cach if (objp) goto out; } - objp = ____cache_alloc(cache, flags); + objp = ____cache_alloc(cache, flags, this_cpu); /* * We may just have run out of memory on the local node. * ____cache_alloc_node() knows how to locate memory on other nodes */ - if (!objp) - objp = ____cache_alloc_node(cache, flags, numa_node_id()); - + if (!objp) + objp = ____cache_alloc_node(cache, flags, + cpu_to_node(*this_cpu), this_cpu); out: return objp; } #else static __always_inline void * -__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int *this_cpu) { - return ____cache_alloc(cachep, flags); + return ____cache_alloc(cachep, flags, this_cpu); } #endif /* CONFIG_NUMA */ @@ -3431,15 +3558,16 @@ static __always_inline void * __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) { unsigned long save_flags; + int this_cpu; void *objp; if (should_failslab(cachep, flags)) return NULL; cache_alloc_debugcheck_before(cachep, flags); - local_irq_save(save_flags); - objp = __do_cache_alloc(cachep, flags); - local_irq_restore(save_flags); + slab_irq_save(save_flags, this_cpu); + objp = __do_cache_alloc(cachep, flags, &this_cpu); + slab_irq_restore(save_flags, this_cpu); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); prefetchw(objp); @@ -3453,7 +3581,7 @@ __cache_alloc(struct kmem_cache *cachep, * Caller needs to acquire correct kmem_list's list_lock */ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, - int node) + int node, int *this_cpu) { int i; struct kmem_list3 *l3; @@ -3482,7 +3610,7 @@ static void free_block(struct kmem_cache * a different cache, refer to comments before * alloc_slabmgmt. */ - slab_destroy(cachep, slabp); + slab_destroy(cachep, slabp, this_cpu); } else { list_add(&slabp->list, &l3->slabs_free); } @@ -3496,11 +3624,12 @@ static void free_block(struct kmem_cache } } -static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) +static void +cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac, int *this_cpu) { int batchcount; struct kmem_list3 *l3; - int node = numa_node_id(); + int node = cpu_to_node(*this_cpu); batchcount = ac->batchcount; #if DEBUG @@ -3522,7 +3651,7 @@ static void cache_flusharray(struct kmem } } - free_block(cachep, ac->entry, batchcount, node); + free_block(cachep, ac->entry, batchcount, node, this_cpu); free_done: #if STATS { @@ -3551,14 +3680,15 @@ free_done: * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ -static inline void __cache_free(struct kmem_cache *cachep, void *objp) +static void +__cache_free(struct kmem_cache *cachep, void *objp, int *this_cpu) { - struct array_cache *ac = cpu_cache_get(cachep); + struct array_cache *ac = cpu_cache_get(cachep, *this_cpu); check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); - if (cache_free_alien(cachep, objp)) + if (cache_free_alien(cachep, objp, this_cpu)) return; if (likely(ac->avail < ac->limit)) { @@ -3567,7 +3697,7 @@ static inline void __cache_free(struct k return; } else { STATS_INC_FREEMISS(cachep); - cache_flusharray(cachep, ac); + cache_flusharray(cachep, ac, this_cpu); ac->entry[ac->avail++] = objp; } } @@ -3725,13 +3855,14 @@ EXPORT_SYMBOL(__kmalloc); void kmem_cache_free(struct kmem_cache *cachep, void *objp) { unsigned long flags; + int this_cpu; BUG_ON(virt_to_cache(objp) != cachep); - local_irq_save(flags); + slab_irq_save(flags, this_cpu); debug_check_no_locks_freed(objp, obj_size(cachep)); - __cache_free(cachep, objp); - local_irq_restore(flags); + __cache_free(cachep, objp, &this_cpu); + slab_irq_restore(flags, this_cpu); } EXPORT_SYMBOL(kmem_cache_free); @@ -3748,15 +3879,16 @@ void kfree(const void *objp) { struct kmem_cache *c; unsigned long flags; + int this_cpu; if (unlikely(ZERO_OR_NULL_PTR(objp))) return; - local_irq_save(flags); + slab_irq_save(flags, this_cpu); kfree_debugcheck(objp); c = virt_to_cache(objp); debug_check_no_locks_freed(objp, obj_size(c)); - __cache_free(c, (void *)objp); - local_irq_restore(flags); + __cache_free(c, (void *)objp, &this_cpu); + slab_irq_restore(flags, this_cpu); } EXPORT_SYMBOL(kfree); @@ -3777,7 +3909,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name); */ static int alloc_kmemlist(struct kmem_cache *cachep) { - int node; + int node, this_cpu; struct kmem_list3 *l3; struct array_cache *new_shared; struct array_cache **new_alien = NULL; @@ -3805,11 +3937,11 @@ static int alloc_kmemlist(struct kmem_ca if (l3) { struct array_cache *shared = l3->shared; - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); if (shared) free_block(cachep, shared->entry, - shared->avail, node); + shared->avail, node, &this_cpu); l3->shared = new_shared; if (!l3->alien) { @@ -3818,7 +3950,7 @@ static int alloc_kmemlist(struct kmem_ca } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); kfree(shared); free_alien_cache(new_alien); continue; @@ -3865,42 +3997,50 @@ struct ccupdate_struct { struct array_cache *new[NR_CPUS]; }; -static void do_ccupdate_local(void *info) +static void __do_ccupdate_local(void *info, int this_cpu) { struct ccupdate_struct *new = info; struct array_cache *old; check_irq_off(); - old = cpu_cache_get(new->cachep); + old = cpu_cache_get(new->cachep, this_cpu); + + new->cachep->array[this_cpu] = new->new[this_cpu]; + new->new[this_cpu] = old; +} - new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; - new->new[smp_processor_id()] = old; +#ifdef CONFIG_PREEMPT_RT +static void do_ccupdate_local(void *arg, int this_cpu) +{ + __do_ccupdate_local(arg, this_cpu); } +#else +static void do_ccupdate_local(void *arg) +{ + __do_ccupdate_local(arg, smp_processor_id()); +} +#endif /* Always called with the cache_chain_mutex held */ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared) { - struct ccupdate_struct *new; - int i; - - new = kzalloc(sizeof(*new), GFP_KERNEL); - if (!new) - return -ENOMEM; + struct ccupdate_struct new; + int i, this_cpu; + memset(&new.new, 0, sizeof(new.new)); for_each_online_cpu(i) { - new->new[i] = alloc_arraycache(cpu_to_node(i), limit, + new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount); - if (!new->new[i]) { + if (!new.new[i]) { for (i--; i >= 0; i--) - kfree(new->new[i]); - kfree(new); + kfree(new.new[i]); return -ENOMEM; } } - new->cachep = cachep; + new.cachep = cachep; - on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); + slab_on_each_cpu(do_ccupdate_local, (void *)&new); check_irq_on(); cachep->batchcount = batchcount; @@ -3908,15 +4048,15 @@ static int do_tune_cpucache(struct kmem_ cachep->shared = shared; for_each_online_cpu(i) { - struct array_cache *ccold = new->new[i]; + struct array_cache *ccold = new.new[i]; if (!ccold) continue; - spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); - free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); - spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); + slab_spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock, this_cpu); + free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i), &this_cpu); + slab_spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock, this_cpu); kfree(ccold); } - kfree(new); + return alloc_kmemlist(cachep); } @@ -3980,26 +4120,26 @@ static int enable_cpucache(struct kmem_c * if drain_array() is used on the shared array. */ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, - struct array_cache *ac, int force, int node) + struct array_cache *ac, int force, int node) { - int tofree; + int tofree, this_cpu; if (!ac || !ac->avail) return; if (ac->touched && !force) { ac->touched = 0; } else { - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); if (ac->avail) { tofree = force ? ac->avail : (ac->limit + 4) / 5; if (tofree > ac->avail) tofree = (ac->avail + 1) / 2; - free_block(cachep, ac->entry, tofree, node); + free_block(cachep, ac->entry, tofree, node, &this_cpu); ac->avail -= tofree; memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); } - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); } } @@ -4017,11 +4157,12 @@ void drain_array(struct kmem_cache *cach */ static void cache_reap(struct work_struct *w) { + int this_cpu = raw_smp_processor_id(), node = cpu_to_node(this_cpu); struct kmem_cache *searchp; struct kmem_list3 *l3; - int node = numa_node_id(); struct delayed_work *work = container_of(w, struct delayed_work, work); + int work_done = 0; if (!mutex_trylock(&cache_chain_mutex)) /* Give up. Setup the next iteration. */ @@ -4037,9 +4178,10 @@ static void cache_reap(struct work_struc */ l3 = searchp->nodelists[node]; - reap_alien(searchp, l3); + reap_alien(searchp, l3, &this_cpu); - drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); + drain_array(searchp, l3, cpu_cache_get(searchp, this_cpu), + 0, node); /* * These are racy checks but it does not matter @@ -4128,7 +4270,7 @@ static int s_show(struct seq_file *m, vo unsigned long num_slabs, free_objects = 0, shared_avail = 0; const char *name; char *error = NULL; - int node; + int this_cpu, node; struct kmem_list3 *l3; active_objs = 0; @@ -4139,7 +4281,7 @@ static int s_show(struct seq_file *m, vo continue; check_irq_on(); - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); list_for_each_entry(slabp, &l3->slabs_full, list) { if (slabp->inuse != cachep->num && !error) @@ -4164,7 +4306,7 @@ static int s_show(struct seq_file *m, vo if (l3->shared) shared_avail += l3->shared->avail; - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); } num_slabs += active_slabs; num_objs = num_slabs * cachep->num; @@ -4360,7 +4502,7 @@ static int leaks_show(struct seq_file *m struct kmem_list3 *l3; const char *name; unsigned long *n = m->private; - int node; + int node, this_cpu; int i; if (!(cachep->flags & SLAB_STORE_USER)) @@ -4378,13 +4520,13 @@ static int leaks_show(struct seq_file *m continue; check_irq_on(); - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); list_for_each_entry(slabp, &l3->slabs_full, list) handle_slab(n, cachep, slabp); list_for_each_entry(slabp, &l3->slabs_partial, list) handle_slab(n, cachep, slabp); - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); } name = cachep->name; if (n[0] == n[1]) { patches/dont-unmask-io_apic.patch0000664000077200007720000000131710655544571016421 0ustar mingomingo--- arch/x86_64/kernel/io_apic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) Index: linux/arch/x86_64/kernel/io_apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/io_apic.c +++ linux/arch/x86_64/kernel/io_apic.c @@ -1431,7 +1431,8 @@ static void ack_apic_level(unsigned int irq_complete_move(irq); #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) /* If we are moving the irq we need to mask it */ - if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { + if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING) && + !(irq_desc[irq].status & IRQ_INPROGRESS)) { do_unmask_irq = 1; mask_IO_APIC_irq(irq); } patches/hpet-force-enable-on-vt8235-37-chipsets.patch0000664000077200007720000001053510655544571021474 0ustar mingomingoFrom us15@os.inf.tu-dresden.de Tue Jun 12 14:31:48 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.2 required=5.0 tests=AWL,MAILTO_TO_SPAM_ADDR autolearn=no version=3.1.7-deb Received: from os.inf.tu-dresden.de (os.inf.tu-dresden.de [141.76.48.99]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by mail.tglx.de (Postfix) with ESMTP id 13C2565C292 for ; Tue, 12 Jun 2007 14:31:48 +0200 (CEST) Received: from nova.inf.tu-dresden.de ([141.76.48.73] helo=laptop.hypervisor.org) by os.inf.tu-dresden.de with esmtpsa (TLSv1:AES256-SHA:256) (Exim 4.67) id 1Hy5XI-0008Nr-CO for tglx@linutronix.de; Tue, 12 Jun 2007 14:31:48 +0200 Date: Tue, 12 Jun 2007 14:31:47 +0200 From: "Udo A. Steinberg" To: Thomas Gleixner Subject: [PATCH]: Force enable HPET on VT8235/8237 chipsets Message-ID: <20070612143147.2a6199c2@laptop.hypervisor.org> X-Mailer: X-Mailer 5.0 Gold Mime-Version: 1.0 Content-Type: multipart/signed; boundary=Sig_48zpQdK28xw1yvtSEbZ9tfp; protocol="application/pgp-signature"; micalg=PGP-SHA1 X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ --Sig_48zpQdK28xw1yvtSEbZ9tfp Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: quoted-printable This patch adds quirks to force enable HPET on Via VT8235 and VT8237 chipse= ts. The datasheet for 8237 documents HPET functionality (although wrongly) wher= eas HPET is undocumented for 8235. Tested on A7V880 (8237) and K7VT4A+ (8235) boards. Signed-off-by: Udo A. Steinberg --- quirks.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++= +++- 1 file changed, 66 insertions(+), 1 deletion(-) --- arch/i386/kernel/quirks.c | 68 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) Index: linux/arch/i386/kernel/quirks.c =================================================================== --- linux.orig/arch/i386/kernel/quirks.c +++ linux/arch/i386/kernel/quirks.c @@ -56,7 +56,8 @@ unsigned long force_hpet_address; static enum { NONE_FORCE_HPET_RESUME, OLD_ICH_FORCE_HPET_RESUME, - ICH_FORCE_HPET_RESUME + ICH_FORCE_HPET_RESUME, + VT8237_FORCE_HPET_RESUME } force_hpet_resume_type; static void __iomem *rcba_base; @@ -245,6 +246,68 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, old_ich_force_enable_hpet); + +static void vt8237_force_hpet_resume(void) +{ + u32 val; + + if (!force_hpet_address || !cached_dev) + return; + + val = 0xfed00000 | 0x80; + pci_write_config_dword(cached_dev, 0x68, val); + + pci_read_config_dword(cached_dev, 0x68, &val); + if (val & 0x80) + printk(KERN_DEBUG "Force enabled HPET at resume\n"); + else + BUG(); +} + +static void vt8237_force_enable_hpet(struct pci_dev *dev) +{ + u32 val; + + if (hpet_address || force_hpet_address) + return; + + pci_read_config_dword(dev, 0x68, &val); + /* + * Bit 7 is HPET enable bit. + * Bit 31:10 is HPET base address (contrary to what datasheet claims) + */ + if (val & 0x80) { + force_hpet_address = (val & ~0x3ff); + printk(KERN_DEBUG "HPET at base address 0x%lx\n", + force_hpet_address); + return; + } + + /* + * HPET is disabled. Trying enabling at FED00000 and check + * whether it sticks + */ + val = 0xfed00000 | 0x80; + pci_write_config_dword(dev, 0x68, val); + + pci_read_config_dword(dev, 0x68, &val); + if (val & 0x80) { + force_hpet_address = (val & ~0x3ff); + printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", + force_hpet_address); + cached_dev = dev; + force_hpet_resume_type = VT8237_FORCE_HPET_RESUME; + return; + } + + printk(KERN_DEBUG "Failed to force enable HPET\n"); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, + vt8237_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, + vt8237_force_enable_hpet); + void force_hpet_resume(void) { switch (force_hpet_resume_type) { @@ -254,6 +317,9 @@ void force_hpet_resume(void) case OLD_ICH_FORCE_HPET_RESUME: return old_ich_force_hpet_resume(); + case VT8237_FORCE_HPET_RESUME: + return vt8237_force_hpet_resume(); + default: break; } patches/ich-force-hpet-late-initialization-of-hpet-after-quirk.patch0000664000077200007720000000467410655544570025125 0ustar mingomingoFrom: Venki Pallipadi Enable HPET later during boot, after the force detect in PCI quirks. Also add a call to repeat the force enabling at resume time. Signed-off-by: Venkatesh Pallipadi Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andi Kleen Cc: john stultz Cc: Greg KH Signed-off-by: Andrew Morton --- arch/i386/kernel/hpet.c | 27 +++++++++++++++++++++++++-- include/asm-i386/hpet.h | 1 + 2 files changed, 26 insertions(+), 2 deletions(-) Index: linux/arch/i386/kernel/hpet.c =================================================================== --- linux.orig/arch/i386/kernel/hpet.c +++ linux/arch/i386/kernel/hpet.c @@ -164,6 +164,7 @@ static struct clock_event_device hpet_cl .set_next_event = hpet_legacy_next_event, .shift = 32, .irq = 0, + .rating = 50, }; static void hpet_start_counter(void) @@ -178,6 +179,17 @@ static void hpet_start_counter(void) hpet_writel(cfg, HPET_CFG); } +static void hpet_resume_device(void) +{ + ich_force_hpet_resume(); +} + +static void hpet_restart_counter(void) +{ + hpet_resume_device(); + hpet_start_counter(); +} + static void hpet_enable_legacy_int(void) { unsigned long cfg = hpet_readl(HPET_CFG); @@ -299,7 +311,7 @@ static struct clocksource clocksource_hp .mask = HPET_MASK, .shift = HPET_SHIFT, .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .resume = hpet_start_counter, + .resume = hpet_restart_counter, #ifdef CONFIG_X86_64 .vread = vread_hpet, #endif @@ -412,10 +424,21 @@ out_nohpet: */ static __init int hpet_late_init(void) { - if (!is_hpet_capable()) + if (boot_hpet_disable) return -ENODEV; + if (!hpet_address) { + if (!force_hpet_address) + return -ENODEV; + + hpet_address = force_hpet_address; + hpet_enable(); + if (!hpet_virt_address) + return -ENODEV; + } + hpet_reserve_platform_timers(hpet_readl(HPET_ID)); + return 0; } fs_initcall(hpet_late_init); Index: linux/include/asm-i386/hpet.h =================================================================== --- linux.orig/include/asm-i386/hpet.h +++ linux/include/asm-i386/hpet.h @@ -64,6 +64,7 @@ /* hpet memory map physical address */ extern unsigned long hpet_address; +extern unsigned long force_hpet_address; extern int is_hpet_enabled(void); extern int hpet_enable(void); extern unsigned long hpet_readl(unsigned long a); patches/write-try-lock-irqsave.patch0000664000077200007720000000113510655544571017122 0ustar mingomingo--- include/linux/spinlock.h | 7 +++++++ 1 file changed, 7 insertions(+) Index: linux/include/linux/spinlock.h =================================================================== --- linux.orig/include/linux/spinlock.h +++ linux/include/linux/spinlock.h @@ -289,6 +289,13 @@ do { \ 1 : ({ local_irq_restore(flags); 0; }); \ }) +#define write_trylock_irqsave(lock, flags) \ +({ \ + local_irq_save(flags); \ + write_trylock(lock) ? \ + 1 : ({ local_irq_restore(flags); 0; }); \ +}) + /* * Locks two spinlocks l1 and l2. * l1_first indicates if spinlock l1 should be taken first. patches/x86_64-consolidate-tsc-calibration.patch0000664000077200007720000001527310655544570021101 0ustar mingomingoSubject: x86_64: Consolidate tsc calibration Move the TSC calibration code to tsc.c. Reimplement it so the pm timer can be used as a reference as well. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/hpet.c | 49 ------------------------- arch/x86_64/kernel/time.c | 33 +--------------- arch/x86_64/kernel/tsc.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++ include/asm-i386/tsc.h | 4 ++ 4 files changed, 96 insertions(+), 80 deletions(-) Index: linux/arch/x86_64/kernel/hpet.c =================================================================== --- linux.orig/arch/x86_64/kernel/hpet.c +++ linux/arch/x86_64/kernel/hpet.c @@ -184,55 +184,6 @@ int hpet_reenable(void) return hpet_timer_stop_set_go(hpet_tick); } -/* - * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing - * it to the HPET timer of known frequency. - */ - -#define TICK_COUNT 100000000 -#define SMI_THRESHOLD 50000 -#define MAX_TRIES 5 - -/* - * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none - * occurs between the reads of the hpet & TSC. - */ -static void __init read_hpet_tsc(int *hpet, int *tsc) -{ - int tsc1, tsc2, hpet1, i; - - for (i = 0; i < MAX_TRIES; i++) { - tsc1 = get_cycles_sync(); - hpet1 = hpet_readl(HPET_COUNTER); - tsc2 = get_cycles_sync(); - if ((tsc2 - tsc1) < SMI_THRESHOLD) - break; - } - *hpet = hpet1; - *tsc = tsc2; -} - -unsigned int __init hpet_calibrate_tsc(void) -{ - int tsc_start, hpet_start; - int tsc_now, hpet_now; - unsigned long flags; - - local_irq_save(flags); - - read_hpet_tsc(&hpet_start, &tsc_start); - - do { - local_irq_disable(); - read_hpet_tsc(&hpet_now, &tsc_now); - local_irq_restore(flags); - } while ((tsc_now - tsc_start) < TICK_COUNT && - (hpet_now - hpet_start) < TICK_COUNT); - - return (tsc_now - tsc_start) * 1000000000L - / ((hpet_now - hpet_start) * hpet_period / 1000); -} - #ifdef CONFIG_HPET_EMULATE_RTC /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET * is enabled, we support RTC interrupt functionality in software. Index: linux/arch/x86_64/kernel/time.c =================================================================== --- linux.orig/arch/x86_64/kernel/time.c +++ linux/arch/x86_64/kernel/time.c @@ -292,35 +292,6 @@ static unsigned int __init tsc_calibrate return pmc_now * tsc_khz / (tsc_now - tsc_start); } -/* - * pit_calibrate_tsc() uses the speaker output (channel 2) of - * the PIT. This is better than using the timer interrupt output, - * because we can read the value of the speaker with just one inb(), - * where we need three i/o operations for the interrupt channel. - * We count how many ticks the TSC does in 50 ms. - */ - -static unsigned int __init pit_calibrate_tsc(void) -{ - unsigned long start, end; - unsigned long flags; - - spin_lock_irqsave(&i8253_lock, flags); - - outb((inb(0x61) & ~0x02) | 0x01, 0x61); - - outb(0xb0, 0x43); - outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42); - outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42); - start = get_cycles_sync(); - while ((inb(0x61) & 0x20) == 0); - end = get_cycles_sync(); - - spin_unlock_irqrestore(&i8253_lock, flags); - - return (end - start) / 50; -} - #define PIT_MODE 0x43 #define PIT_CH0 0x40 @@ -376,14 +347,14 @@ void __init time_init(void) if (hpet_use_timer) { /* set tick_nsec to use the proper rate for HPET */ tick_nsec = TICK_NSEC_HPET; - tsc_khz = hpet_calibrate_tsc(); timename = "HPET"; } else { pit_init(); - tsc_khz = pit_calibrate_tsc(); timename = "PIT"; } + tsc_calibrate(); + cpu_khz = tsc_khz; if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && boot_cpu_data.x86_vendor == X86_VENDOR_AMD && Index: linux/arch/x86_64/kernel/tsc.c =================================================================== --- linux.orig/arch/x86_64/kernel/tsc.c +++ linux/arch/x86_64/kernel/tsc.c @@ -6,7 +6,9 @@ #include #include #include +#include +#include #include static int notsc __initdata = 0; @@ -118,6 +120,94 @@ core_initcall(cpufreq_tsc); #endif +#define MAX_RETRIES 5 +#define SMI_TRESHOLD 50000 + +/* + * Read TSC and the reference counters. Take care of SMI disturbance + */ +static unsigned long __init tsc_read_refs(unsigned long *pm, + unsigned long *hpet) +{ + unsigned long t1, t2; + int i; + + for (i = 0; i < MAX_RETRIES; i++) { + t1 = get_cycles_sync(); + if (hpet) + *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; + else + *pm = acpi_pm_read_early(); + t2 = get_cycles_sync(); + if ((t2 - t1) < SMI_TRESHOLD) + return t2; + } + return ULONG_MAX; +} + +/** + * tsc_calibrate - calibrate the tsc on boot + */ +void __init tsc_calibrate(void) +{ + unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2; + int hpet = is_hpet_enabled(); + + local_irq_save(flags); + + tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); + + outb((inb(0x61) & ~0x02) | 0x01, 0x61); + + outb(0xb0, 0x43); + outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); + outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); + tr1 = get_cycles_sync(); + while ((inb(0x61) & 0x20) == 0); + tr2 = get_cycles_sync(); + + tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); + + local_irq_restore(flags); + + /* + * Preset the result with the raw and inaccurate PIT + * calibration value + */ + tsc_khz = (tr2 - tr1) / 50; + + /* hpet or pmtimer available ? */ + if (!hpet && !pm1 && !pm2) { + printk(KERN_INFO "TSC calibrated against PIT\n"); + return; + } + + /* Check, whether the sampling was disturbed by an SMI */ + if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { + printk(KERN_WARNING "TSC calibration disturbed by SMI, " + "using PIT calibration result\n"); + return; + } + + tsc2 = (tsc2 - tsc1) * 1000000L; + + if (hpet) { + printk(KERN_INFO "TSC calibrated against HPET\n"); + if (hpet2 < hpet1) + hpet2 += 0x100000000; + hpet2 -= hpet1; + tsc1 = (hpet2 * hpet_readl(HPET_PERIOD)) / 1000000; + } else { + printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); + if (pm2 < pm1) + pm2 += ACPI_PM_OVRRUN; + pm2 -= pm1; + tsc1 = (pm2 * 1000000000) / PMTMR_TICKS_PER_SEC; + } + + tsc_khz = tsc2 / tsc1; +} + /* * Make an educated guess if the TSC is trustworthy and synchronized * over all CPUs. Index: linux/include/asm-i386/tsc.h =================================================================== --- linux.orig/include/asm-i386/tsc.h +++ linux/include/asm-i386/tsc.h @@ -72,4 +72,8 @@ int check_tsc_unstable(void); extern void check_tsc_sync_source(int cpu); extern void check_tsc_sync_target(void); +#ifdef CONFIG_X86_64 +extern void tsc_calibrate(void); +#endif + #endif patches/preempt-irqs-softirq-in-hardirq.patch0000664000077200007720000000415610655544573020737 0ustar mingomingo--- include/linux/interrupt.h | 1 + kernel/softirq.c | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) Index: linux-rt-rebase.q/include/linux/interrupt.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/interrupt.h +++ linux-rt-rebase.q/include/linux/interrupt.h @@ -290,6 +290,7 @@ struct softirq_action asmlinkage void do_softirq(void); extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); extern void softirq_init(void); +extern void do_softirq_from_hardirq(void); #ifdef CONFIG_PREEMPT_HARDIRQS # define __raise_softirq_irqoff(nr) raise_softirq_irqoff(nr) Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -296,6 +296,8 @@ restart: asmlinkage void __do_softirq(void) { + unsigned long p_flags; + #ifdef CONFIG_PREEMPT_SOFTIRQS /* * 'preempt harder'. Push all softirq processing off to ksoftirqd. @@ -311,6 +313,38 @@ asmlinkage void __do_softirq(void) */ __local_bh_disable((unsigned long)__builtin_return_address(0)); trace_softirq_enter(); + p_flags = current->flags & PF_HARDIRQ; + current->flags &= ~PF_HARDIRQ; + + ___do_softirq(); + + trace_softirq_exit(); + + account_system_vtime(current); + _local_bh_enable(); + + current->flags |= p_flags; +} + +/* + * Process softirqs straight from hardirq context, + * without having to switch to a softirq thread. + * This can reduce the context-switch rate. + * + * NOTE: this is unused right now. + */ +void do_softirq_from_hardirq(void) +{ + unsigned long p_flags; + + if (!local_softirq_pending()) + return; + /* + * 'immediate' softirq execution: + */ + __local_bh_disable((unsigned long)__builtin_return_address(0)); + p_flags = current->flags & PF_HARDIRQ; + current->flags &= ~PF_HARDIRQ; ___do_softirq(); @@ -319,6 +353,7 @@ asmlinkage void __do_softirq(void) account_system_vtime(current); _local_bh_enable(); + current->flags |= p_flags; } #ifndef __ARCH_HAS_DO_SOFTIRQ patches/nmi-profiling-base.patch0000664000077200007720000003026010655544571016241 0ustar mingomingoSubject: [patch] nmi-driven profiling for /proc/profile From: Ingo Molnar nmi-driven profiling for /proc/profile Signed-off-by: Ingo Molnar --- arch/i386/kernel/crash.c | 8 --- arch/i386/kernel/nmi.c | 91 +++++++++++++++++++++++++++++++++++++++++---- arch/x86_64/kernel/crash.c | 5 -- arch/x86_64/kernel/irq.c | 2 arch/x86_64/kernel/nmi.c | 67 +++++++++++++++++++++++++++++++-- include/asm-i386/apic.h | 2 include/asm-x86_64/apic.h | 2 include/linux/profile.h | 1 kernel/profile.c | 9 ++-- kernel/time/tick-common.c | 1 kernel/time/tick-sched.c | 2 11 files changed, 159 insertions(+), 31 deletions(-) Index: linux/arch/i386/kernel/crash.c =================================================================== --- linux.orig/arch/i386/kernel/crash.c +++ linux/arch/i386/kernel/crash.c @@ -70,14 +70,6 @@ static int crash_nmi_callback(struct not return 1; } -static void smp_send_nmi_allbutself(void) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(safe_smp_processor_id(), mask); - if (!cpus_empty(mask)) - send_IPI_mask(mask, NMI_VECTOR); -} - static struct notifier_block crash_nmi_nb = { .notifier_call = crash_nmi_callback, }; Index: linux/arch/i386/kernel/nmi.c =================================================================== --- linux.orig/arch/i386/kernel/nmi.c +++ linux/arch/i386/kernel/nmi.c @@ -28,6 +28,8 @@ #include #include +#include + #include "mach_traps.h" int unknown_nmi_panic; @@ -44,7 +46,7 @@ static cpumask_t backtrace_mask = CPU_MA atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ unsigned int nmi_watchdog = NMI_DEFAULT; -static unsigned int nmi_hz = HZ; +static unsigned int nmi_hz = 1000; static DEFINE_PER_CPU(short, wd_enabled); @@ -95,7 +97,7 @@ static int __init check_nmi_watchdog(voi for_each_possible_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); - mdelay((20*1000)/nmi_hz); // wait 20 ticks + mdelay((100*1000)/nmi_hz); /* wait 100 ticks */ for_each_possible_cpu(cpu) { #ifdef CONFIG_SMP @@ -319,9 +321,48 @@ EXPORT_SYMBOL(touch_nmi_watchdog); extern void die_nmi(struct pt_regs *, const char *msg); -__kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) +int nmi_show_regs[NR_CPUS]; + +void nmi_show_all_regs(void) { + int i; + + if (system_state == SYSTEM_BOOTING) + return; + + printk(KERN_WARNING "nmi_show_all_regs(): start on CPU#%d.\n", + raw_smp_processor_id()); + dump_stack(); + + for_each_online_cpu(i) + nmi_show_regs[i] = 1; + + smp_send_nmi_allbutself(); + + for_each_online_cpu(i) { + while (nmi_show_regs[i] == 1) + barrier(); + } +} + +static DEFINE_SPINLOCK(nmi_print_lock); + +void irq_show_regs_callback(int cpu, struct pt_regs *regs) +{ + if (!nmi_show_regs[cpu]) + return; + nmi_show_regs[cpu] = 0; + spin_lock(&nmi_print_lock); + printk(KERN_WARNING "NMI show regs on CPU#%d:\n", cpu); + printk(KERN_WARNING "apic_timer_irqs: %d\n", + per_cpu(irq_stat, cpu).apic_timer_irqs); + show_regs(regs); + spin_unlock(&nmi_print_lock); +} + +__kprobes int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) +{ /* * Since current_thread_info()-> is always on the stack, and we * always switch the stack NMI-atomically, it's safe to use @@ -332,6 +373,8 @@ __kprobes int nmi_watchdog_tick(struct p int cpu = smp_processor_id(); int rc=0; + __profile_tick(CPU_PROFILING, regs); + /* check for other users first */ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) { @@ -355,6 +398,9 @@ __kprobes int nmi_watchdog_tick(struct p */ sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_cpu(cpu).irqs[0]; + irq_show_regs_callback(cpu, regs); + + /* if the apic timer isn't firing, this cpu isn't doing much */ /* if the none of the timers isn't firing, this cpu isn't doing much */ if (!touched && last_irq_sums[cpu] == sum) { /* @@ -362,11 +408,30 @@ __kprobes int nmi_watchdog_tick(struct p * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; - if (alert_counter[cpu] == 5*nmi_hz) - /* - * die_nmi will return ONLY if NOTIFY_STOP happens.. - */ - die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP"); + if (alert_counter[cpu] && !(alert_counter[cpu] % (5*nmi_hz))) { + int i; + + spin_lock(&nmi_print_lock); + printk(KERN_WARNING "NMI watchdog detected lockup on " + "CPU#%d (%d/%d)\n", cpu, alert_counter[cpu], + 5*nmi_hz); + show_regs(regs); + spin_unlock(&nmi_print_lock); + + for_each_online_cpu(i) { + if (i == cpu) + continue; + nmi_show_regs[i] = 1; + while (nmi_show_regs[i] == 1) + cpu_relax(); + } + printk(KERN_WARNING "NMI watchdog running again ...\n"); + for_each_online_cpu(i) + alert_counter[i] = 0; + + + } + } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; @@ -464,5 +529,15 @@ void __trigger_all_cpu_backtrace(void) } } +void smp_send_nmi_allbutself(void) +{ +#ifdef CONFIG_SMP + cpumask_t mask = cpu_online_map; + cpu_clear(safe_smp_processor_id(), mask); + if (!cpus_empty(mask)) + send_IPI_mask(mask, NMI_VECTOR); +#endif +} + EXPORT_SYMBOL(nmi_active); EXPORT_SYMBOL(nmi_watchdog); Index: linux/arch/x86_64/kernel/crash.c =================================================================== --- linux.orig/arch/x86_64/kernel/crash.c +++ linux/arch/x86_64/kernel/crash.c @@ -62,11 +62,6 @@ static int crash_nmi_callback(struct not return 1; } -static void smp_send_nmi_allbutself(void) -{ - send_IPI_allbutself(NMI_VECTOR); -} - /* * This code is a best effort heuristic to get the * other cpus to stop executing. So races with Index: linux/arch/x86_64/kernel/irq.c =================================================================== --- linux.orig/arch/x86_64/kernel/irq.c +++ linux/arch/x86_64/kernel/irq.c @@ -111,6 +111,8 @@ asmlinkage unsigned int do_IRQ(struct pt unsigned vector = ~regs->orig_rax; unsigned irq; + irq_show_regs_callback(smp_processor_id(), regs); + exit_idle(); irq_enter(); irq = __get_cpu_var(vector_irq)[vector]; Index: linux/arch/x86_64/kernel/nmi.c =================================================================== --- linux.orig/arch/x86_64/kernel/nmi.c +++ linux/arch/x86_64/kernel/nmi.c @@ -22,11 +22,13 @@ #include #include #include +#include #include #include #include #include +#include int unknown_nmi_panic; int nmi_watchdog_enabled; @@ -44,7 +46,7 @@ atomic_t nmi_active = ATOMIC_INIT(0); / int panic_on_timeout; unsigned int nmi_watchdog = NMI_DEFAULT; -static unsigned int nmi_hz = HZ; +static unsigned int nmi_hz = 1000; static DEFINE_PER_CPU(short, wd_enabled); @@ -302,7 +304,7 @@ void touch_nmi_watchdog(void) unsigned cpu; /* - * Tell other CPUs to reset their alert counters. We cannot + * Tell other CPUs to reset their alert counters. We cannot * do it ourselves because the alert count increase is not * atomic. */ @@ -312,7 +314,42 @@ void touch_nmi_watchdog(void) } } - touch_softlockup_watchdog(); + touch_softlockup_watchdog(); +} + +int nmi_show_regs[NR_CPUS]; + +void nmi_show_all_regs(void) +{ + int i; + + if (system_state == SYSTEM_BOOTING) + return; + + smp_send_nmi_allbutself(); + + for_each_online_cpu(i) + nmi_show_regs[i] = 1; + + for_each_online_cpu(i) { + while (nmi_show_regs[i] == 1) + barrier(); + } +} + +static DEFINE_SPINLOCK(nmi_print_lock); + +void irq_show_regs_callback(int cpu, struct pt_regs *regs) +{ + if (!nmi_show_regs[cpu]) + return; + + nmi_show_regs[cpu] = 0; + spin_lock(&nmi_print_lock); + printk(KERN_WARNING "NMI show regs on CPU#%d:\n", cpu); + printk(KERN_WARNING "apic_timer_irqs: %d\n", read_pda(apic_timer_irqs)); + show_regs(regs); + spin_unlock(&nmi_print_lock); } int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) @@ -322,6 +359,9 @@ int __kprobes nmi_watchdog_tick(struct p int cpu = smp_processor_id(); int rc = 0; + irq_show_regs_callback(cpu, regs); + __profile_tick(CPU_PROFILING, regs); + /* check for other users first */ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) { @@ -330,6 +370,7 @@ int __kprobes nmi_watchdog_tick(struct p } sum = read_pda(apic_timer_irqs); + if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; @@ -358,9 +399,20 @@ int __kprobes nmi_watchdog_tick(struct p * wait a few IRQs (5 seconds) before doing the oops ... */ local_inc(&__get_cpu_var(alert_counter)); - if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) + if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) { + int i; + + for_each_online_cpu(i) { + if (i == cpu) + continue; + nmi_show_regs[i] = 1; + while (nmi_show_regs[i] == 1) + cpu_relax(); + } + die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs, panic_on_timeout); + } } else { __get_cpu_var(last_irq_sum) = sum; local_set(&__get_cpu_var(alert_counter), 0); @@ -478,6 +530,13 @@ void __trigger_all_cpu_backtrace(void) } } +void smp_send_nmi_allbutself(void) +{ +#ifdef CONFIG_SMP + send_IPI_allbutself(NMI_VECTOR); +#endif +} + EXPORT_SYMBOL(nmi_active); EXPORT_SYMBOL(nmi_watchdog); EXPORT_SYMBOL(touch_nmi_watchdog); Index: linux/include/asm-i386/apic.h =================================================================== --- linux.orig/include/asm-i386/apic.h +++ linux/include/asm-i386/apic.h @@ -116,6 +116,8 @@ extern void enable_NMI_through_LVT0 (voi extern int timer_over_8254; extern int local_apic_timer_c2_ok; +extern void smp_send_nmi_allbutself(void); + #else /* !CONFIG_X86_LOCAL_APIC */ static inline void lapic_shutdown(void) { } Index: linux/include/asm-x86_64/apic.h =================================================================== --- linux.orig/include/asm-x86_64/apic.h +++ linux/include/asm-x86_64/apic.h @@ -85,6 +85,8 @@ extern void setup_APIC_extended_lvt(unsi extern int apic_is_clustered_box(void); +extern void smp_send_nmi_allbutself(void); + #define K8_APIC_EXT_LVT_BASE 0x500 #define K8_APIC_EXT_INT_MSG_FIX 0x0 #define K8_APIC_EXT_INT_MSG_SMI 0x2 Index: linux/include/linux/profile.h =================================================================== --- linux.orig/include/linux/profile.h +++ linux/include/linux/profile.h @@ -23,6 +23,7 @@ struct notifier_block; /* init basic kernel profiler */ void __init profile_init(void); +void __profile_tick(int type, struct pt_regs *regs); void profile_tick(int); /* Index: linux/kernel/profile.c =================================================================== --- linux.orig/kernel/profile.c +++ linux/kernel/profile.c @@ -407,16 +407,19 @@ void profile_hits(int type, void *__pc, EXPORT_SYMBOL_GPL(profile_hits); -void profile_tick(int type) +void __profile_tick(int type, struct pt_regs *regs) { - struct pt_regs *regs = get_irq_regs(); - if (type == CPU_PROFILING && timer_hook) timer_hook(regs); if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) profile_hit(type, (void *)profile_pc(regs)); } +void profile_tick(int type) +{ + return __profile_tick(type, get_irq_regs()); +} + #ifdef CONFIG_PROC_FS #include #include Index: linux/kernel/time/tick-common.c =================================================================== --- linux.orig/kernel/time/tick-common.c +++ linux/kernel/time/tick-common.c @@ -68,7 +68,6 @@ static void tick_periodic(int cpu) } update_process_times(user_mode(get_irq_regs())); - profile_tick(CPU_PROFILING); } /* Index: linux/kernel/time/tick-sched.c =================================================================== --- linux.orig/kernel/time/tick-sched.c +++ linux/kernel/time/tick-sched.c @@ -439,7 +439,6 @@ static void tick_nohz_handler(struct clo } update_process_times(user_mode(regs)); - profile_tick(CPU_PROFILING); /* Do not restart, when we are in the idle loop */ if (ts->tick_stopped) @@ -553,7 +552,6 @@ static enum hrtimer_restart tick_sched_t */ spin_unlock(&base->lock); update_process_times(user_mode(regs)); - profile_tick(CPU_PROFILING); spin_lock(&base->lock); } patches/arm-preempt-config.patch0000664000077200007720000000200710655544573016253 0ustar mingomingo arch/arm/Kconfig | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) Index: linux-rt-rebase.q/arch/arm/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/arm/Kconfig +++ linux-rt-rebase.q/arch/arm/Kconfig @@ -613,18 +613,7 @@ config LOCAL_TIMERS accounting to be spread across the timer interval, preventing a "thundering herd" at every timer tick. -config PREEMPT - bool "Preemptible Kernel (EXPERIMENTAL)" - depends on EXPERIMENTAL - help - This option reduces the latency of the kernel when reacting to - real-time or interactive events by allowing a low priority process to - be preempted even if it is in kernel mode executing a system call. - This allows applications to run more reliably even when the system is - under load. - - Say Y here if you are building a kernel for a desktop, embedded - or real-time system. Say N if you are unsure. +source kernel/Kconfig.preempt config NO_IDLE_HZ bool "Dynamic tick timer" patches/radix-tree-optimistic-hist.patch0000664000077200007720000001037610655544576017765 0ustar mingomingoSubject: debug: optimistic lock histogram A simple histogram measuring the efficiency of the optimistic locking Signed-off-by: Peter Zijlstra --- fs/proc/proc_misc.c | 22 +++++++++++ lib/radix-tree.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 124 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/fs/proc/proc_misc.c =================================================================== --- linux-rt-rebase.q.orig/fs/proc/proc_misc.c +++ linux-rt-rebase.q/fs/proc/proc_misc.c @@ -289,6 +289,25 @@ static const struct file_operations proc .release = seq_release, }; +#ifdef CONFIG_RADIX_TREE_OPTIMISTIC +extern struct seq_operations optimistic_op; +static int optimistic_open(struct inode *inode, struct file *file) +{ + (void)inode; + return seq_open(file, &optimistic_op); +} + +extern ssize_t optimistic_write(struct file *, const char __user *, size_t, loff_t *); + +static struct file_operations optimistic_file_operations = { + .open = optimistic_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = optimistic_write, +}; +#endif + static int devinfo_show(struct seq_file *f, void *v) { int i = *(loff_t *) v; @@ -785,6 +804,9 @@ void __init proc_misc_init(void) entry->proc_fops = &proc_kmsg_operations; } #endif +#ifdef CONFIG_RADIX_TREE_OPTIMISTIC + create_seq_entry("radix_optimistic", 0, &optimistic_file_operations); +#endif create_seq_entry("devices", 0, &proc_devinfo_operations); create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations); #ifdef CONFIG_BLOCK Index: linux-rt-rebase.q/lib/radix-tree.c =================================================================== --- linux-rt-rebase.q.orig/lib/radix-tree.c +++ linux-rt-rebase.q/lib/radix-tree.c @@ -75,6 +75,105 @@ static unsigned long height_to_maxindex[ static struct lock_class_key radix_node_class[RADIX_TREE_MAX_PATH]; #endif +#ifdef CONFIG_RADIX_TREE_OPTIMISTIC +static DEFINE_PER_CPU(unsigned long[RADIX_TREE_MAX_PATH+1], optimistic_histogram); + +static void optimistic_hit(unsigned long height) +{ + if (height > RADIX_TREE_MAX_PATH) + height = RADIX_TREE_MAX_PATH; + + __get_cpu_var(optimistic_histogram)[height]++; +} + +#ifdef CONFIG_PROC_FS + +#include +#include + +static void *frag_start(struct seq_file *m, loff_t *pos) +{ + if (*pos < 0 || *pos > RADIX_TREE_MAX_PATH) + return NULL; + + m->private = (void *)(unsigned long)*pos; + return pos; +} + +static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) +{ + if (*pos < RADIX_TREE_MAX_PATH) { + (*pos)++; + (*((unsigned long *)&m->private))++; + return pos; + } + return NULL; +} + +static void frag_stop(struct seq_file *m, void *arg) +{ +} + +unsigned long get_optimistic_stat(unsigned long index) +{ + unsigned long total = 0; + int cpu; + + for_each_possible_cpu(cpu) { + total += per_cpu(optimistic_histogram, cpu)[index]; + } + return total; +} + +static int frag_show(struct seq_file *m, void *arg) +{ + unsigned long index = (unsigned long)m->private; + unsigned long hits = get_optimistic_stat(index); + + if (index == 0) + seq_printf(m, "levels skipped\thits\n"); + + if (index < RADIX_TREE_MAX_PATH) + seq_printf(m, "%9lu\t%9lu\n", index, hits); + else + seq_printf(m, "failed\t%9lu\n", hits); + + return 0; +} + +struct seq_operations optimistic_op = { + .start = frag_start, + .next = frag_next, + .stop = frag_stop, + .show = frag_show, +}; + +static void optimistic_reset(void) +{ + int cpu; + int height; + for_each_possible_cpu(cpu) { + for (height = 0; height <= RADIX_TREE_MAX_PATH; height++) + per_cpu(optimistic_histogram, cpu)[height] = 0; + } +} + +ssize_t optimistic_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + if (count) { + char c; + if (get_user(c, buf)) + return -EFAULT; + if (c == '0') + optimistic_reset(); + } + return count; +} + +#endif // CONFIG_PROC_FS +#endif // CONFIG_RADIX_TREE_OPTIMISTIC + /* * Radix tree node cache. */ @@ -461,7 +560,9 @@ radix_optimistic_lock(struct radix_tree_ BUG_ON(context->locked); spin_lock(&context->root->lock); context->locked = &context->root->lock; - } + optimistic_hit(RADIX_TREE_MAX_PATH); + } else + optimistic_hit(context->root->height - node->height); } return node; } patches/latency-tracer-variable-threshold.patch0000664000077200007720000000741210655544572021255 0ustar mingomingoFrom ce@ceag.ch Sun Jun 3 17:30:11 2007 Return-Path: Received: from toro.web-alm.net (toro.web-alm.net [62.245.132.31]) by mail.tglx.de (Postfix) with ESMTP id DC0AF65C065 for ; Sun, 3 Jun 2007 17:30:11 +0200 (CEST) Received: from toro.web-alm.net (localhost.localdomain [127.0.0.1]) by toro.web-alm.net (8.12.11.20060308/8.12.11/Web-Alm-2003112001) with ESMTP id l53FU9Dp010764 for ; Sun, 3 Jun 2007 17:30:09 +0200 Received: from thllin.ceag.ch (uucp@localhost) by toro.web-alm.net (8.12.11.20060308/8.12.10/Submit/Web-Alm-2003112001) with bsmtp id l53FU8ol010731 for tglx@linutronix.de; Sun, 3 Jun 2007 17:30:08 +0200 Received: from [192.168.255.76] (thlblade.ceag.ch [192.168.255.76]) by thllin.ceag.ch (8.12.11.20060308/8.12.11/CE-2005091901) with ESMTP id l53FMsUX003540 for ; Sun, 3 Jun 2007 17:22:55 +0200 Message-ID: <4662DCCE.8070002@ceag.ch> Date: Sun, 03 Jun 2007 17:22:54 +0200 From: Carsten Emde Organization: CE Computer Experts AG User-Agent: Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:1.8.1.2) Gecko/20070301 SeaMonkey/1.1.1 MIME-Version: 1.0 To: Thomas Gleixner Subject: [PATCH] Make threshold to print '!' in latency trace variable Content-Type: multipart/mixed; boundary="------------020807010006040805040904" X-Virus-Scanned: ClamAV 0.90.1/3340/Sun Jun 3 00:40:38 2007 on thllin.ceag.ch X-Virus-Status: Clean X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ This is a multi-part message in MIME format. --------------020807010006040805040904 Content-Type: text/plain; charset=ISO-8859-1; format=flowed Content-Transfer-Encoding: 8bit Thomas, this patch introduces a variable threshold to print the exclamation mark in the latency_trace output instead of the constant 100 microseconds. --cbe --------------020807010006040805040904 Content-Type: text/plain; name="linux-2.6.21.3-rt9-mark_thresh.patch" Content-Disposition: inline; filename="linux-2.6.21.3-rt9-mark_thresh.patch" Content-Transfer-Encoding: 8bit --- include/linux/clocksource.h | 1 + kernel/latency_trace.c | 4 +++- kernel/sysctl.c | 8 ++++++++ 3 files changed, 12 insertions(+), 1 deletion(-) Index: linux/include/linux/clocksource.h =================================================================== --- linux.orig/include/linux/clocksource.h +++ linux/include/linux/clocksource.h @@ -23,6 +23,7 @@ struct clocksource; extern unsigned long preempt_max_latency; extern unsigned long preempt_thresh; +extern unsigned long preempt_mark_thresh; /** * struct clocksource - hardware abstraction for a free running counter Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -1294,11 +1294,13 @@ static void notrace l_stop(struct seq_fi up(&out_mutex); } +unsigned long preempt_mark_thresh = 100; + static void print_timestamp(struct seq_file *m, unsigned long abs_usecs, unsigned long rel_usecs) { seq_printf(m, " %4ldus", abs_usecs); - if (rel_usecs > 100) + if (rel_usecs > preempt_mark_thresh) seq_puts(m, "!: "); else if (rel_usecs > 1) seq_puts(m, "+: "); Index: linux/kernel/sysctl.c =================================================================== --- linux.orig/kernel/sysctl.c +++ linux/kernel/sysctl.c @@ -354,6 +354,14 @@ static ctl_table kern_table[] = { #ifdef CONFIG_EVENT_TRACE { .ctl_name = CTL_UNNUMBERED, + .procname = "preempt_mark_thresh", + .data = &preempt_mark_thresh, + .maxlen = sizeof(preempt_mark_thresh), + .mode = 0644, + .proc_handler = &proc_doulongvec_minmax, + }, + { + .ctl_name = CTL_UNNUMBERED, .procname = "trace_enabled", .data = &trace_enabled, .maxlen = sizeof(int), patches/rt-time-starvation-fix.patch0000664000077200007720000002232410655544576017123 0ustar mingomingoHey Ingo, Noticed -rt has been updated a few times and this is still missing so I figured I'd resend it just in case you missed it: We've worked around this before, but its cropped up again. Since update_wall_time is now called from a softirq, it can be preempted by a high priority process. If its preempted for long enough, the clocksource can wrap, causing time to stop incrementing, which if the preempting process is checking the time, can cause a hard lockup. This patch forces the clocksource to be read each tick, and accumulate only the cycle count. This allows the update_wall_time to be deferred w/o fear of hardware overflow. thanks -john arch/x86_64/kernel/vsyscall.c | 5 ++++- include/linux/clocksource.h | 40 ++++++++++++++++++++++++++++++++++++++-- include/linux/time.h | 1 + kernel/time/timekeeping.c | 34 ++++++++++++++++++---------------- kernel/timer.c | 1 + 5 files changed, 62 insertions(+), 19 deletions(-) linux-2.6.21-rc5_cycles-accumulated_C7.patch ============================================ Index: linux-rt-rebase.q/arch/x86_64/kernel/vsyscall.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/vsyscall.c +++ linux-rt-rebase.q/arch/x86_64/kernel/vsyscall.c @@ -77,6 +77,7 @@ void update_vsyscall(struct timespec *wa vsyscall_gtod_data.clock.mask = clock->mask; vsyscall_gtod_data.clock.mult = clock->mult; vsyscall_gtod_data.clock.shift = clock->shift; + vsyscall_gtod_data.clock.cycle_accumulated = clock->cycle_accumulated; vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vsyscall_gtod_data.sys_tz = sys_tz; @@ -114,7 +115,7 @@ static __always_inline long time_syscall static __always_inline void do_vgettimeofday(struct timeval * tv) { - cycle_t now, base, mask, cycle_delta; + cycle_t now, base, accumulated, mask, cycle_delta; unsigned seq; unsigned long mult, shift, nsec; cycle_t (*vread)(void); @@ -147,6 +148,7 @@ static __always_inline void do_vgettimeo } now = vread(); base = __vsyscall_gtod_data.clock.cycle_last; + accumulated = __vsyscall_gtod_data.clock.cycle_accumulated; mask = __vsyscall_gtod_data.clock.mask; mult = __vsyscall_gtod_data.clock.mult; shift = __vsyscall_gtod_data.clock.shift; @@ -157,6 +159,7 @@ static __always_inline void do_vgettimeo /* calculate interval: */ cycle_delta = (now - base) & mask; + cycle_delta += accumulated; /* convert to nsecs: */ nsec += (cycle_delta * mult) >> shift; Index: linux-rt-rebase.q/include/linux/clocksource.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/clocksource.h +++ linux-rt-rebase.q/include/linux/clocksource.h @@ -54,8 +54,12 @@ extern unsigned long preempt_mark_thresh * @flags: flags describing special properties * @vread: vsyscall based read * @resume: resume function for the clocksource, if necessary + * @cycle_last: Used internally by timekeeping core, please ignore. + * @cycle_accumulated: Used internally by timekeeping core, please ignore. * @cycle_interval: Used internally by timekeeping core, please ignore. * @xtime_interval: Used internally by timekeeping core, please ignore. + * @xtime_nsec: Used internally by timekeeping core, please ignore. + * @error: Used internally by timekeeping core, please ignore. */ struct clocksource { /* @@ -79,7 +83,7 @@ struct clocksource { #endif /* timekeeping specific data, ignore */ - cycle_t cycle_interval; + cycle_t cycle_accumulated, cycle_interval; u64 xtime_interval; /* * Second part is written at each timer interrupt @@ -172,11 +176,43 @@ static inline cycle_t clocksource_read(s } /** + * clocksource_get_cycles: - Access the clocksource's accumulated cycle value + * @cs: pointer to clocksource being read + * @now: current cycle value + * + * Uses the clocksource to return the current cycle_t value. + * NOTE!!!: This is different from clocksource_read, because it + * returns the accumulated cycle value! Must hold xtime lock! + */ +static inline cycle_t clocksource_get_cycles(struct clocksource *cs, cycle_t now) +{ + cycle_t offset = (now - cs->cycle_last) & cs->mask; + offset += cs->cycle_accumulated; + return offset; +} + +/** + * clocksource_accumulate: - Accumulates clocksource cycles + * @cs: pointer to clocksource being read + * @now: current cycle value + * + * Used to avoids clocksource hardware overflow by periodically + * accumulating the current cycle delta. Must hold xtime write lock! + */ +static inline void clocksource_accumulate(struct clocksource *cs, cycle_t now) +{ + cycle_t offset = (now - cs->cycle_last) & cs->mask; + cs->cycle_last = now; + cs->cycle_accumulated += offset; +} + +/** * cyc2ns - converts clocksource cycles to nanoseconds * @cs: Pointer to clocksource * @cycles: Cycles * * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. + * Must hold xtime lock! * * XXX - This could use some mult_lxl_ll() asm optimization */ @@ -206,7 +242,7 @@ static inline cycle_t ns2cyc(struct cloc * @length_nsec: Desired interval length in nanoseconds. * * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment - * pair and interval request. + * pair and interval request. Must hold xtime_lock! * * Unless you're the timekeeping code, you should not be using this! */ Index: linux-rt-rebase.q/include/linux/time.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/time.h +++ linux-rt-rebase.q/include/linux/time.h @@ -98,6 +98,7 @@ extern unsigned long read_persistent_clo extern int update_persistent_clock(struct timespec now); extern int no_sync_cmos_clock __read_mostly; void timekeeping_init(void); +extern void timekeeping_accumulate(void); unsigned long get_seconds(void); struct timespec current_kernel_time(void); Index: linux-rt-rebase.q/kernel/time/timekeeping.c =================================================================== --- linux-rt-rebase.q.orig/kernel/time/timekeeping.c +++ linux-rt-rebase.q/kernel/time/timekeeping.c @@ -76,16 +76,10 @@ static struct clocksource *clock; /* poi */ static inline s64 __get_nsec_offset(void) { - cycle_t cycle_now, cycle_delta; + cycle_t cycle_delta; s64 ns_offset; - /* read clocksource: */ - cycle_now = clocksource_read(clock); - - /* calculate the delta since the last update_wall_time: */ - cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - - /* convert to nanoseconds: */ + cycle_delta = clocksource_get_cycles(clock, clocksource_read(clock)); ns_offset = cyc2ns(clock, cycle_delta); return ns_offset; @@ -232,7 +226,7 @@ static void change_clocksource(void) clock = new; clock->cycle_last = now; - + clock->cycle_accumulated = 0; clock->error = 0; clock->xtime_nsec = 0; clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); @@ -244,8 +238,14 @@ static void change_clocksource(void) clock->name); #endif } + +void timekeeping_accumulate(void) +{ + clocksource_accumulate(clock, clocksource_read(clock)); +} #else static inline void change_clocksource(void) { } +void timekeeping_accumulate(void) { } #endif /** @@ -336,6 +336,7 @@ static int timekeeping_resume(struct sys } /* re-base the last cycle value */ clock->cycle_last = clocksource_read(clock); + clock->cycle_accumulated = 0; clock->error = 0; timekeeping_suspended = 0; write_sequnlock_irqrestore(&xtime_lock, flags); @@ -483,27 +484,28 @@ static void clocksource_adjust(s64 offse */ void update_wall_time(void) { - cycle_t offset; + cycle_t cycle_now; /* Make sure we're fully resumed: */ if (unlikely(timekeeping_suspended)) return; #ifdef CONFIG_GENERIC_TIME - offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; + cycle_now = clocksource_read(clock); #else - offset = clock->cycle_interval; + cycle_now = clock->cycle_last + clock->cycle_interval; #endif + clocksource_accumulate(clock, cycle_now); + clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; /* normally this loop will run just once, however in the * case of lost or late ticks, it will accumulate correctly. */ - while (offset >= clock->cycle_interval) { + while (clock->cycle_accumulated >= clock->cycle_interval) { /* accumulate one interval */ clock->xtime_nsec += clock->xtime_interval; - clock->cycle_last += clock->cycle_interval; - offset -= clock->cycle_interval; + clock->cycle_accumulated -= clock->cycle_interval; if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; @@ -517,7 +519,7 @@ void update_wall_time(void) } /* correct the clock when NTP error is too big */ - clocksource_adjust(offset); + clocksource_adjust(clock->cycle_accumulated); /* store full nanoseconds into xtime */ xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; Index: linux-rt-rebase.q/kernel/timer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/timer.c +++ linux-rt-rebase.q/kernel/timer.c @@ -1031,6 +1031,7 @@ static void run_timer_softirq(struct sof void do_timer(unsigned long ticks) { jiffies_64 += ticks; + timekeeping_accumulate(); } #ifdef __ARCH_WANT_SYS_ALARM patches/pagefault-disable-cleanup.patch0000664000077200007720000001365210655544576017570 0ustar mingomingoSubject: [patch] clean up the page fault disabling logic From: Ingo Molnar decouple the pagefault-disabled logic from the preempt count. Signed-off-by: Ingo Molnar --- arch/arm/mm/fault.c | 2 +- arch/i386/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/x86_64/mm/fault.c | 2 +- include/linux/sched.h | 1 + include/linux/uaccess.h | 33 +++------------------------------ kernel/fork.c | 1 + mm/memory.c | 22 ++++++++++++++++++++++ 9 files changed, 32 insertions(+), 35 deletions(-) Index: linux-rt-rebase.q/arch/arm/mm/fault.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mm/fault.c +++ linux-rt-rebase.q/arch/arm/mm/fault.c @@ -229,7 +229,7 @@ do_page_fault(unsigned long addr, unsign * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; /* Index: linux-rt-rebase.q/arch/i386/mm/fault.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mm/fault.c +++ linux-rt-rebase.q/arch/i386/mm/fault.c @@ -354,7 +354,7 @@ fastcall notrace void __kprobes do_page_ * If we're in an interrupt, have no user context or are running in an * atomic region then we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto bad_area_nosemaphore; /* When running in the kernel we expect faults to occur only to Index: linux-rt-rebase.q/arch/mips/mm/fault.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/mm/fault.c +++ linux-rt-rebase.q/arch/mips/mm/fault.c @@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_ * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto bad_area_nosemaphore; down_read(&mm->mmap_sem); Index: linux-rt-rebase.q/arch/powerpc/mm/fault.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/mm/fault.c +++ linux-rt-rebase.q/arch/powerpc/mm/fault.c @@ -184,7 +184,7 @@ int __kprobes do_page_fault(struct pt_re } #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ - if (in_atomic() || mm == NULL) { + if (in_atomic() || mm == NULL || current->pagefault_disabled) { if (!user_mode(regs)) return SIGSEGV; /* in_atomic() in user mode is really bad, Index: linux-rt-rebase.q/arch/x86_64/mm/fault.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/mm/fault.c +++ linux-rt-rebase.q/arch/x86_64/mm/fault.c @@ -371,7 +371,7 @@ asmlinkage void __kprobes do_page_fault( * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (unlikely(in_atomic() || !mm)) + if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) goto bad_area_nosemaphore; again: Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -1281,6 +1281,7 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif + int pagefault_disabled; #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; int hardirqs_enabled; Index: linux-rt-rebase.q/include/linux/uaccess.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/uaccess.h +++ linux-rt-rebase.q/include/linux/uaccess.h @@ -6,37 +6,10 @@ /* * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. - * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. + * it will not take any MM locks and go straight to the fixup table. */ -static inline void pagefault_disable(void) -{ - inc_preempt_count(); - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} - -static inline void pagefault_enable(void) -{ - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - dec_preempt_count(); - /* - * make sure we do.. - */ - barrier(); - preempt_check_resched(); -} +extern void pagefault_disable(void); +extern void pagefault_enable(void); #ifndef ARCH_HAS_NOCACHE_UACCESS Index: linux-rt-rebase.q/kernel/fork.c =================================================================== --- linux-rt-rebase.q.orig/kernel/fork.c +++ linux-rt-rebase.q/kernel/fork.c @@ -1140,6 +1140,7 @@ static struct task_struct *copy_process( p->hardirq_context = 0; p->softirq_context = 0; #endif + p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; Index: linux-rt-rebase.q/mm/memory.c =================================================================== --- linux-rt-rebase.q.orig/mm/memory.c +++ linux-rt-rebase.q/mm/memory.c @@ -2666,6 +2666,28 @@ unlock: return 0; } +void pagefault_disable(void) +{ + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} +EXPORT_SYMBOL(pagefault_disable); + +void pagefault_enable(void) +{ + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; +} +EXPORT_SYMBOL(pagefault_enable); + /* * By the time we get here, we already hold the mm semaphore */ patches/x86_64-apic-remove-bogus-pit-synchronization.patch0000664000077200007720000000267110655544570023074 0ustar mingomingoSubject: x86_64: remove pit synchronization The APIC timer setup code synchronizes the local APIC timer to the PIT/HPET. This is pointless as the PIT and the local APIC timer frequency are not correlated and the APIC timer calibration can never be accurate enough to avoid that the local APIC timer and the PIT/HPET drift apart. Simply remove it. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 20 -------------------- 1 file changed, 20 deletions(-) Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -791,26 +791,6 @@ static void setup_APIC_timer(unsigned in local_irq_save(flags); - /* wait for irq slice */ - if (hpet_address && hpet_use_timer) { - int trigger = hpet_readl(HPET_T0_CMP); - while (hpet_readl(HPET_COUNTER) >= trigger) - /* do nothing */ ; - while (hpet_readl(HPET_COUNTER) < trigger) - /* do nothing */ ; - } else { - int c1, c2; - outb_p(0x00, 0x43); - c2 = inb_p(0x40); - c2 |= inb_p(0x40) << 8; - do { - c1 = c2; - outb_p(0x00, 0x43); - c2 = inb_p(0x40); - c2 |= inb_p(0x40) << 8; - } while (c2 - c1 < 300); - } - irqen = ! cpu_isset(smp_processor_id(), timer_interrupt_broadcast_ipi_mask); __setup_APIC_LVTT(clocks, 0, irqen); patches/smaller-trace.patch0000664000077200007720000000071510655544572015315 0ustar mingomingo--- kernel/latency_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -191,7 +191,7 @@ static int report_latency(cycle_t delta) /* * Number of per-CPU trace entries: */ -#define MAX_TRACE (65536UL*16UL) +#define MAX_TRACE (65536UL) #define CMDLINE_BYTES 16 patches/i386-prepare-sharing-hpet-code.patch0000664000077200007720000000474210655544570020210 0ustar mingomingoSubject: i386: prepare sharing the hpet code with x86_64 The hpet implementations of i386 and x8664 has been mostly the same before the clock events conversion of i386. The clock events conversion of i386 hpet is already done. So it makes sense to share the code for the x86_64 clock events conversion. Abstract out the mapping functions. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/i386/kernel/hpet.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) Index: linux/arch/i386/kernel/hpet.c =================================================================== --- linux.orig/arch/i386/kernel/hpet.c +++ linux/arch/i386/kernel/hpet.c @@ -8,10 +8,9 @@ #include #include +#include #include -extern struct clock_event_device *global_clock_event; - #define HPET_MASK CLOCKSOURCE_MASK(32) #define HPET_SHIFT 22 @@ -22,7 +21,7 @@ extern struct clock_event_device *global * HPET address is set in acpi/boot.c, when an ACPI entry exists */ unsigned long hpet_address; -static void __iomem * hpet_virt_address; +static void __iomem *hpet_virt_address; static inline unsigned long hpet_readl(unsigned long a) { @@ -34,6 +33,17 @@ static inline void hpet_writel(unsigned writel(d, hpet_virt_address + a); } +static inline void hpet_set_mapping(void) +{ + hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); +} + +static inline void hpet_clear_mapping(void) +{ + iounmap(hpet_virt_address); + hpet_virt_address = NULL; +} + /* * HPET command line enable / disable */ @@ -83,7 +93,7 @@ static void hpet_reserve_platform_timers memset(&hd, 0, sizeof (hd)); hd.hd_phys_address = hpet_address; - hd.hd_address = hpet_virt_address; + hd.hd_address = hpet; hd.hd_nirqs = nrtimers; hd.hd_flags = HPET_DATA_PLATFORM; hpet_reserve_timer(&hd, 0); @@ -238,7 +248,7 @@ int __init hpet_enable(void) if (!is_hpet_capable()) return 0; - hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); + hpet_set_mapping(); /* * Read the period and check for a sane value: @@ -334,13 +344,11 @@ int __init hpet_enable(void) return 0; out_nohpet: - iounmap(hpet_virt_address); - hpet_virt_address = NULL; + hpet_clear_mapping(); boot_hpet_disable = 1; return 0; } - #ifdef CONFIG_HPET_EMULATE_RTC /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET patches/undo-latency-tracing-raw-spinlock-hack.patch0000664000077200007720000000101210655544572022113 0ustar mingomingo--- kernel/latency_trace.c | 6 ------ 1 file changed, 6 deletions(-) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -38,12 +38,6 @@ int trace_use_raw_cycles = 0; -#define __raw_spinlock_t raw_spinlock_t -#define need_resched_delayed() 0 - -#define __raw_spinlock_t raw_spinlock_t -#define need_resched_delayed() 0 - #ifdef CONFIG_EVENT_TRACE /* * Convert raw cycles to usecs. patches/x86-64-tscless-vgettimeofday.patch0000664000077200007720000000354210655544576017766 0ustar mingomingoSubject: [patch] x86_64 GTOD: offer scalable vgettimeofday From: Ingo Molnar offer scalable vgettimeofday independently of whether the TSC is synchronous or not. Off by default. this patch also fixes an SMP bug in sys_vtime(): we should read __vsyscall_gtod_data.wall_time_tv.tv_sec only once. Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/vsyscall.c | 19 +++++++++++++++++++ include/asm-x86_64/vgtod.h | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/x86_64/kernel/vsyscall.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/vsyscall.c +++ linux-rt-rebase.q/arch/x86_64/kernel/vsyscall.c @@ -118,6 +118,25 @@ static __always_inline void do_vgettimeo unsigned seq; unsigned long mult, shift, nsec; cycle_t (*vread)(void); + + if (likely(__vsyscall_gtod_data.sysctl_enabled == 2)) { + struct timeval tmp; + + do { + barrier(); + tv->tv_sec = __vsyscall_gtod_data.wall_time_sec; + tv->tv_usec = __vsyscall_gtod_data.wall_time_nsec; + barrier(); + tmp.tv_sec = __vsyscall_gtod_data.wall_time_sec; + tmp.tv_usec = __vsyscall_gtod_data.wall_time_nsec; + + } while (tmp.tv_usec != tv->tv_usec || + tmp.tv_sec != tv->tv_sec); + + tv->tv_usec /= NSEC_PER_USEC; + return; + } + do { seq = read_seqbegin(&__vsyscall_gtod_data.lock); Index: linux-rt-rebase.q/include/asm-x86_64/vgtod.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/vgtod.h +++ linux-rt-rebase.q/include/asm-x86_64/vgtod.h @@ -15,7 +15,7 @@ struct vsyscall_gtod_data { struct timezone sys_tz; struct { /* extract of a clocksource struct */ cycle_t (*vread)(void); - cycle_t cycle_last; + cycle_t cycle_last, cycle_accumulated; cycle_t mask; u32 mult; u32 shift; patches/preempt-realtime-arm-bagde4.patch0000664000077200007720000000227310655544574017742 0ustar mingomingo--- arch/arm/mach-sa1100/badge4.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/arch/arm/mach-sa1100/badge4.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mach-sa1100/badge4.c +++ linux-rt-rebase.q/arch/arm/mach-sa1100/badge4.c @@ -240,15 +240,22 @@ void badge4_set_5V(unsigned subsystem, i /* detect on->off and off->on transitions */ if ((!old_5V_bitmap) && (badge4_5V_bitmap)) { /* was off, now on */ - printk(KERN_INFO "%s: enabling 5V supply rail\n", __FUNCTION__); GPSR = BADGE4_GPIO_PCMEN5V; } else if ((old_5V_bitmap) && (!badge4_5V_bitmap)) { /* was on, now off */ - printk(KERN_INFO "%s: disabling 5V supply rail\n", __FUNCTION__); GPCR = BADGE4_GPIO_PCMEN5V; } local_irq_restore(flags); + + /* detect on->off and off->on transitions */ + if ((!old_5V_bitmap) && (badge4_5V_bitmap)) { + /* was off, now on */ + printk(KERN_INFO "%s: enabling 5V supply rail\n", __FUNCTION__); + } else if ((old_5V_bitmap) && (!badge4_5V_bitmap)) { + /* was on, now off */ + printk(KERN_INFO "%s: disabling 5V supply rail\n", __FUNCTION__); + } } EXPORT_SYMBOL(badge4_set_5V); patches/2.6.21-rc6-lockless2-radix-tree-use-indirect-bit.patch0000664000077200007720000002325310655544576023114 0ustar mingomingoFrom: Nick Piggin Subject: [patch 2/9] radix-tree: use indirect bit Rather than sign direct radix-tree pointers with a special bit, sign the indirect one that hangs off the root. This means that, given a lookup_slot operation, the invalid result will be differentiated from the valid (previously, valid results could have the bit either set or clear). This does not affect slot lookups which occur under lock -- they can never return an invalid result. Is needed in future for lockless pagecache. Signed-off-by: Nick Piggin --- include/linux/radix-tree.h | 40 ++++++++++++++------------ lib/radix-tree.c | 69 ++++++++++++++++++++++++++++----------------- 2 files changed, 65 insertions(+), 44 deletions(-) Index: linux-rt-rebase.q/include/linux/radix-tree.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/radix-tree.h +++ linux-rt-rebase.q/include/linux/radix-tree.h @@ -26,28 +26,31 @@ #include /* - * A direct pointer (root->rnode pointing directly to a data item, - * rather than another radix_tree_node) is signalled by the low bit - * set in the root->rnode pointer. - * - * In this case root->height is also NULL, but the direct pointer tests are - * needed for RCU lookups when root->height is unreliable. + * An indirect pointer (root->rnode pointing to a radix_tree_node, rather + * than a data item) is signalled by the low bit set in the root->rnode + * pointer. + * + * In this case root->height is > 0, but the indirect pointer tests are + * needed for RCU lookups (because root->height is unreliable). The only + * time callers need worry about this is when doing a lookup_slot under + * RCU. */ -#define RADIX_TREE_DIRECT_PTR 1 +#define RADIX_TREE_INDIRECT_PTR 1 +#define RADIX_TREE_RETRY ((void *)-1UL) -static inline void *radix_tree_ptr_to_direct(void *ptr) +static inline void *radix_tree_ptr_to_indirect(void *ptr) { - return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR); + return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); } -static inline void *radix_tree_direct_to_ptr(void *ptr) +static inline void *radix_tree_indirect_to_ptr(void *ptr) { - return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR); + return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); } -static inline int radix_tree_is_direct_ptr(void *ptr) +static inline int radix_tree_is_indirect_ptr(void *ptr) { - return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR); + return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); } /*** radix-tree API starts here ***/ @@ -130,7 +133,10 @@ do { \ */ static inline void *radix_tree_deref_slot(void **pslot) { - return radix_tree_direct_to_ptr(*pslot); + void *ret = *pslot; + if (unlikely(radix_tree_is_indirect_ptr(ret))) + ret = RADIX_TREE_RETRY; + return ret; } /** * radix_tree_replace_slot - replace item in a slot @@ -142,10 +148,8 @@ static inline void *radix_tree_deref_slo */ static inline void radix_tree_replace_slot(void **pslot, void *item) { - BUG_ON(radix_tree_is_direct_ptr(item)); - rcu_assign_pointer(*pslot, - (void *)((unsigned long)item | - ((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR))); + BUG_ON(radix_tree_is_indirect_ptr(item)); + rcu_assign_pointer(*pslot, item); } int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); Index: linux-rt-rebase.q/lib/radix-tree.c =================================================================== --- linux-rt-rebase.q.orig/lib/radix-tree.c +++ linux-rt-rebase.q/lib/radix-tree.c @@ -105,7 +105,7 @@ radix_tree_node_alloc(struct radix_tree_ } put_cpu_var(radix_tree_preloads); } - BUG_ON(radix_tree_is_direct_ptr(ret)); + BUG_ON(radix_tree_is_indirect_ptr(ret)); return ret; } @@ -245,7 +245,7 @@ static int radix_tree_extend(struct radi return -ENOMEM; /* Increase the height. */ - node->slots[0] = radix_tree_direct_to_ptr(root->rnode); + node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); /* Propagate the aggregated tag info into the new root */ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { @@ -256,6 +256,7 @@ static int radix_tree_extend(struct radi newheight = root->height+1; node->height = newheight; node->count = 1; + node = radix_tree_ptr_to_indirect(node); rcu_assign_pointer(root->rnode, node); root->height = newheight; } while (height > root->height); @@ -279,7 +280,7 @@ int radix_tree_insert(struct radix_tree_ int offset; int error; - BUG_ON(radix_tree_is_direct_ptr(item)); + BUG_ON(radix_tree_is_indirect_ptr(item)); /* Make sure the tree is high enough. */ if (index > radix_tree_maxindex(root->height)) { @@ -288,7 +289,8 @@ int radix_tree_insert(struct radix_tree_ return error; } - slot = root->rnode; + slot = radix_tree_indirect_to_ptr(root->rnode); + height = root->height; shift = (height-1) * RADIX_TREE_MAP_SHIFT; @@ -303,7 +305,8 @@ int radix_tree_insert(struct radix_tree_ rcu_assign_pointer(node->slots[offset], slot); node->count++; } else - rcu_assign_pointer(root->rnode, slot); + rcu_assign_pointer(root->rnode, + radix_tree_ptr_to_indirect(slot)); } /* Go a level down */ @@ -323,7 +326,7 @@ int radix_tree_insert(struct radix_tree_ BUG_ON(tag_get(node, 0, offset)); BUG_ON(tag_get(node, 1, offset)); } else { - rcu_assign_pointer(root->rnode, radix_tree_ptr_to_direct(item)); + rcu_assign_pointer(root->rnode, item); BUG_ON(root_tag_get(root, 0)); BUG_ON(root_tag_get(root, 1)); } @@ -355,11 +358,12 @@ void **radix_tree_lookup_slot(struct rad if (node == NULL) return NULL; - if (radix_tree_is_direct_ptr(node)) { + if (!radix_tree_is_indirect_ptr(node)) { if (index > 0) return NULL; return (void **)&root->rnode; } + node = radix_tree_indirect_to_ptr(node); height = node->height; if (index > radix_tree_maxindex(height)) @@ -403,11 +407,12 @@ void *radix_tree_lookup(struct radix_tre if (node == NULL) return NULL; - if (radix_tree_is_direct_ptr(node)) { + if (!radix_tree_is_indirect_ptr(node)) { if (index > 0) return NULL; - return radix_tree_direct_to_ptr(node); + return node; } + node = radix_tree_indirect_to_ptr(node); height = node->height; if (index > radix_tree_maxindex(height)) @@ -452,7 +457,7 @@ void *radix_tree_tag_set(struct radix_tr height = root->height; BUG_ON(index > radix_tree_maxindex(height)); - slot = root->rnode; + slot = radix_tree_indirect_to_ptr(root->rnode); shift = (height - 1) * RADIX_TREE_MAP_SHIFT; while (height > 0) { @@ -502,7 +507,7 @@ void *radix_tree_tag_clear(struct radix_ shift = (height - 1) * RADIX_TREE_MAP_SHIFT; pathp->node = NULL; - slot = root->rnode; + slot = radix_tree_indirect_to_ptr(root->rnode); while (height > 0) { int offset; @@ -567,8 +572,9 @@ int radix_tree_tag_get(struct radix_tree if (node == NULL) return 0; - if (radix_tree_is_direct_ptr(node)) + if (!radix_tree_is_indirect_ptr(node)) return (index == 0); + node = radix_tree_indirect_to_ptr(node); height = node->height; if (index > radix_tree_maxindex(height)) @@ -685,13 +691,13 @@ radix_tree_gang_lookup(struct radix_tree if (!node) return 0; - if (radix_tree_is_direct_ptr(node)) { + if (!radix_tree_is_indirect_ptr(node)) { if (first_index > 0) return 0; - node = radix_tree_direct_to_ptr(node); - results[0] = rcu_dereference(node); + results[0] = node; return 1; } + node = radix_tree_indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -813,13 +819,13 @@ radix_tree_gang_lookup_tag(struct radix_ if (!node) return 0; - if (radix_tree_is_direct_ptr(node)) { + if (!radix_tree_is_indirect_ptr(node)) { if (first_index > 0) return 0; - node = radix_tree_direct_to_ptr(node); - results[0] = rcu_dereference(node); + results[0] = node; return 1; } + node = radix_tree_indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -849,12 +855,22 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag static inline void radix_tree_shrink(struct radix_tree_root *root) { /* try to shrink tree height */ - while (root->height > 0 && - root->rnode->count == 1 && - root->rnode->slots[0]) { + while (root->height > 0) { struct radix_tree_node *to_free = root->rnode; void *newptr; + BUG_ON(!radix_tree_is_indirect_ptr(to_free)); + to_free = radix_tree_indirect_to_ptr(to_free); + + /* + * The candidate node has more than one child, or its child + * is not at the leftmost slot, we cannot shrink. + */ + if (to_free->count != 1) + break; + if (!to_free->slots[0]) + break; + /* * We don't need rcu_assign_pointer(), since we are simply * moving the node from one part of the tree to another. If @@ -863,8 +879,8 @@ static inline void radix_tree_shrink(str * one (root->rnode). */ newptr = to_free->slots[0]; - if (root->height == 1) - newptr = radix_tree_ptr_to_direct(newptr); + if (root->height > 1) + newptr = radix_tree_ptr_to_indirect(newptr); root->rnode = newptr; root->height--; /* must only free zeroed nodes into the slab */ @@ -899,12 +915,12 @@ void *radix_tree_delete(struct radix_tre goto out; slot = root->rnode; - if (height == 0 && root->rnode) { - slot = radix_tree_direct_to_ptr(slot); + if (height == 0 /* XXX: bugfix? */) { root_tag_clear_all(root); root->rnode = NULL; goto out; } + slot = radix_tree_indirect_to_ptr(slot); shift = (height - 1) * RADIX_TREE_MAP_SHIFT; pathp->node = NULL; @@ -946,7 +962,8 @@ void *radix_tree_delete(struct radix_tre radix_tree_node_free(to_free); if (pathp->node->count) { - if (pathp->node == root->rnode) + if (pathp->node == + radix_tree_indirect_to_ptr(root->rnode)) radix_tree_shrink(root); goto out; } patches/preempt-realtime-sh.patch0000664000077200007720000010077410655544574016456 0ustar mingomingoFrom lethal@linux-sh.org Fri Apr 27 10:21:47 2007 Date: Fri, 27 Apr 2007 10:21:47 +0900 From: Paul Mundt To: Thomas Gleixner , Ingo Molnar Subject: [PATCH] preempt-rt: Preliminary SH support Hi Thomas, Ingo, Here's preliminary preempt-rt support for SH. It was written against 2.6.21-rc5, but still applies cleanly. I've kept the clock events stuff out of this patch, since I'm planning on overhauling the timer stuff on SH first, but this should trickle in through 2.6.22-rc. Feel free to either merge this in to preempt-rt or hold off until the timer stuff gets done. Patch from Matsubara-san. Signed-off-by: Katsuya MATSUBARA Signed-off-by: Paul Mundt -- arch/sh/kernel/cpu/clock.c | 2 - arch/sh/kernel/cpu/sh4/sq.c | 2 - arch/sh/kernel/entry-common.S | 8 ++-- arch/sh/kernel/irq.c | 2 - arch/sh/kernel/process.c | 10 +++--- arch/sh/kernel/semaphore.c | 14 ++++++-- arch/sh/kernel/sh_ksyms.c | 9 ++--- arch/sh/kernel/signal.c | 7 ++++ arch/sh/kernel/time.c | 2 - arch/sh/kernel/traps.c | 2 - arch/sh/mm/cache-sh4.c | 12 +++---- arch/sh/mm/init.c | 2 - arch/sh/mm/pg-sh4.c | 4 +- arch/sh/mm/tlb-flush.c | 20 ++++++------ arch/sh/mm/tlb-sh4.c | 4 +- include/asm-sh/atomic-irq.h | 24 +++++++------- include/asm-sh/atomic.h | 8 ++-- include/asm-sh/bitops.h | 24 +++++++------- include/asm-sh/pgalloc.h | 2 - include/asm-sh/rwsem.h | 46 ++++++++++++++-------------- include/asm-sh/semaphore-helper.h | 8 ++-- include/asm-sh/semaphore.h | 61 +++++++++++++++++++++++--------------- include/asm-sh/system.h | 12 +++---- include/asm-sh/thread_info.h | 2 + 24 files changed, 158 insertions(+), 129 deletions(-) Index: linux-rt-rebase.q/arch/sh/kernel/cpu/clock.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/cpu/clock.c +++ linux-rt-rebase.q/arch/sh/kernel/cpu/clock.c @@ -28,7 +28,7 @@ #include static LIST_HEAD(clock_list); -static DEFINE_SPINLOCK(clock_lock); +static DEFINE_RAW_SPINLOCK(clock_lock); static DEFINE_MUTEX(clock_list_sem); /* Index: linux-rt-rebase.q/arch/sh/kernel/cpu/sh4/sq.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/cpu/sh4/sq.c +++ linux-rt-rebase.q/arch/sh/kernel/cpu/sh4/sq.c @@ -37,7 +37,7 @@ struct sq_mapping { }; static struct sq_mapping *sq_mapping_list; -static DEFINE_SPINLOCK(sq_mapping_lock); +static DEFINE_RAW_SPINLOCK(sq_mapping_lock); static struct kmem_cache *sq_cache; static unsigned long *sq_bitmap; Index: linux-rt-rebase.q/arch/sh/kernel/entry-common.S =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/entry-common.S +++ linux-rt-rebase.q/arch/sh/kernel/entry-common.S @@ -157,7 +157,7 @@ ENTRY(resume_userspace) mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #_TIF_WORK_MASK, r0 bt/s __restore_all - tst #_TIF_NEED_RESCHED, r0 + tst #_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED, r0 .align 2 work_pending: @@ -209,10 +209,10 @@ work_resched: tst #_TIF_WORK_MASK, r0 bt __restore_all bra work_pending - tst #_TIF_NEED_RESCHED, r0 + tst #_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_DELAYED, r0 .align 2 -1: .long schedule +1: .long __schedule 2: .long do_notify_resume 3: .long restore_all #ifdef CONFIG_TRACE_IRQFLAGS @@ -226,7 +226,7 @@ syscall_exit_work: ! r8: current_thread_info tst #_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP, r0 bt/s work_pending - tst #_TIF_NEED_RESCHED, r0 + tst #_TIF_NEED_RESCHED| _TIF_NEED_RESCHED_DELAYED, r0 #ifdef CONFIG_TRACE_IRQFLAGS mov.l 5f, r0 jsr @r0 Index: linux-rt-rebase.q/arch/sh/kernel/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/irq.c +++ linux-rt-rebase.q/arch/sh/kernel/irq.c @@ -82,7 +82,7 @@ static union irq_ctx *hardirq_ctx[NR_CPU static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; #endif -asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) +asmlinkage notrace int do_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); #ifdef CONFIG_4KSTACKS Index: linux-rt-rebase.q/arch/sh/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/process.c +++ linux-rt-rebase.q/arch/sh/kernel/process.c @@ -64,7 +64,7 @@ void default_idle(void) clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); set_bl_bit(); - while (!need_resched()) + while (!need_resched() && !need_resched_delayed()) cpu_sleep(); clear_bl_bit(); set_thread_flag(TIF_POLLING_NRFLAG); @@ -85,13 +85,15 @@ void cpu_idle(void) idle = default_idle; tick_nohz_stop_sched_tick(); - while (!need_resched()) + while (!need_resched() && !need_resched_delayed()) idle(); tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); + local_irq_disable(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); + local_irq_enable(); check_pgt_cache(); } } Index: linux-rt-rebase.q/arch/sh/kernel/semaphore.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/semaphore.c +++ linux-rt-rebase.q/arch/sh/kernel/semaphore.c @@ -46,7 +46,7 @@ DEFINE_SPINLOCK(semaphore_wake_lock); * critical part is the inline stuff in * where we want to avoid any extra jumps and calls. */ -void __up(struct semaphore *sem) +void __attribute_used__ __compat_up(struct compat_semaphore *sem) { wake_one_more(sem); wake_up(&sem->wait); @@ -104,7 +104,7 @@ void __up(struct semaphore *sem) tsk->state = TASK_RUNNING; \ remove_wait_queue(&sem->wait, &wait); -void __sched __down(struct semaphore * sem) +void __attribute_used__ __sched __compat_down(struct compat_semaphore * sem) { DOWN_VAR DOWN_HEAD(TASK_UNINTERRUPTIBLE) @@ -114,7 +114,7 @@ void __sched __down(struct semaphore * s DOWN_TAIL(TASK_UNINTERRUPTIBLE) } -int __sched __down_interruptible(struct semaphore * sem) +int __attribute_used__ __sched __compat_down_interruptible(struct compat_semaphore * sem) { int ret = 0; DOWN_VAR @@ -133,7 +133,13 @@ int __sched __down_interruptible(struct return ret; } -int __down_trylock(struct semaphore * sem) +int __attribute_used__ __compat_down_trylock(struct compat_semaphore * sem) { return waking_non_zero_trylock(sem); } + +fastcall int __sched compat_sem_is_locked(struct compat_semaphore *sem) +{ + return (int) atomic_read(&sem->count) < 0; +} + Index: linux-rt-rebase.q/arch/sh/kernel/sh_ksyms.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/sh_ksyms.c +++ linux-rt-rebase.q/arch/sh/kernel/sh_ksyms.c @@ -26,7 +26,6 @@ EXPORT_SYMBOL(sh_mv); /* platform dependent support */ EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(no_irq_type); EXPORT_SYMBOL(strlen); @@ -50,9 +49,9 @@ EXPORT_SYMBOL(get_vm_area); #endif /* semaphore exports */ -EXPORT_SYMBOL(__up); -EXPORT_SYMBOL(__down); -EXPORT_SYMBOL(__down_interruptible); +EXPORT_SYMBOL(__compat_up); +EXPORT_SYMBOL(__compat_down); +EXPORT_SYMBOL(__compat_down_interruptible); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay); @@ -141,7 +140,7 @@ EXPORT_SYMBOL(__flush_purge_region); EXPORT_SYMBOL(clear_user_page); #endif -EXPORT_SYMBOL(__down_trylock); +EXPORT_SYMBOL(__compat_down_trylock); #ifdef CONFIG_SMP EXPORT_SYMBOL(synchronize_irq); Index: linux-rt-rebase.q/arch/sh/kernel/signal.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/signal.c +++ linux-rt-rebase.q/arch/sh/kernel/signal.c @@ -566,6 +566,13 @@ static void do_signal(struct pt_regs *re struct k_sigaction ka; sigset_t *oldset; +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + raw_local_irq_enable(); + preempt_check_resched(); +#endif /* * We want the common case to go fast, which * is why we may in certain cases get here from Index: linux-rt-rebase.q/arch/sh/kernel/time.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/time.c +++ linux-rt-rebase.q/arch/sh/kernel/time.c @@ -24,7 +24,7 @@ struct sys_timer *sys_timer; /* Move this somewhere more sensible.. */ -DEFINE_SPINLOCK(rtc_lock); +DEFINE_RAW_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); /* Dummy RTC ops */ Index: linux-rt-rebase.q/arch/sh/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/kernel/traps.c +++ linux-rt-rebase.q/arch/sh/kernel/traps.c @@ -77,7 +77,7 @@ static void dump_mem(const char *str, un } } -static DEFINE_SPINLOCK(die_lock); +static DEFINE_RAW_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) { Index: linux-rt-rebase.q/arch/sh/mm/cache-sh4.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/mm/cache-sh4.c +++ linux-rt-rebase.q/arch/sh/mm/cache-sh4.c @@ -189,7 +189,7 @@ void flush_cache_sigtramp(unsigned long index = CACHE_IC_ADDRESS_ARRAY | (v & current_cpu_data.icache.entry_mask); - local_irq_save(flags); + raw_local_irq_save(flags); jump_to_P2(); for (i = 0; i < current_cpu_data.icache.ways; @@ -198,7 +198,7 @@ void flush_cache_sigtramp(unsigned long back_to_P1(); wmb(); - local_irq_restore(flags); + raw_local_irq_restore(flags); } static inline void flush_cache_4096(unsigned long start, @@ -214,10 +214,10 @@ static inline void flush_cache_4096(unsi (start < CACHE_OC_ADDRESS_ARRAY)) exec_offset = 0x20000000; - local_irq_save(flags); + raw_local_irq_save(flags); __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset); - local_irq_restore(flags); + raw_local_irq_restore(flags); } /* @@ -245,7 +245,7 @@ static inline void flush_icache_all(void { unsigned long flags, ccr; - local_irq_save(flags); + raw_local_irq_save(flags); jump_to_P2(); /* Flush I-cache */ @@ -259,7 +259,7 @@ static inline void flush_icache_all(void */ back_to_P1(); - local_irq_restore(flags); + raw_local_irq_restore(flags); } void flush_dcache_all(void) Index: linux-rt-rebase.q/arch/sh/mm/init.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/mm/init.c +++ linux-rt-rebase.q/arch/sh/mm/init.c @@ -21,7 +21,7 @@ #include #include -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +DEFINE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; void (*copy_page)(void *from, void *to); Index: linux-rt-rebase.q/arch/sh/mm/pg-sh4.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/mm/pg-sh4.c +++ linux-rt-rebase.q/arch/sh/mm/pg-sh4.c @@ -26,9 +26,9 @@ static inline void *kmap_coherent(struct vaddr = __fix_to_virt(FIX_CMAP_END - idx); pte = mk_pte(page, PAGE_KERNEL); - local_irq_save(flags); + raw_local_irq_save(flags); flush_tlb_one(get_asid(), vaddr); - local_irq_restore(flags); + raw_local_irq_restore(flags); update_mmu_cache(NULL, vaddr, pte); Index: linux-rt-rebase.q/arch/sh/mm/tlb-flush.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/mm/tlb-flush.c +++ linux-rt-rebase.q/arch/sh/mm/tlb-flush.c @@ -24,7 +24,7 @@ void local_flush_tlb_page(struct vm_area asid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; - local_irq_save(flags); + raw_local_irq_save(flags); if (vma->vm_mm != current->mm) { saved_asid = get_asid(); set_asid(asid); @@ -32,7 +32,7 @@ void local_flush_tlb_page(struct vm_area local_flush_tlb_one(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); - local_irq_restore(flags); + raw_local_irq_restore(flags); } } @@ -46,7 +46,7 @@ void local_flush_tlb_range(struct vm_are unsigned long flags; int size; - local_irq_save(flags); + raw_local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ cpu_context(cpu, mm) = NO_CONTEXT; @@ -71,7 +71,7 @@ void local_flush_tlb_range(struct vm_are if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); } - local_irq_restore(flags); + raw_local_irq_restore(flags); } } @@ -81,7 +81,7 @@ void local_flush_tlb_kernel_range(unsign unsigned long flags; int size; - local_irq_save(flags); + raw_local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ local_flush_tlb_all(); @@ -100,7 +100,7 @@ void local_flush_tlb_kernel_range(unsign } set_asid(saved_asid); } - local_irq_restore(flags); + raw_local_irq_restore(flags); } void local_flush_tlb_mm(struct mm_struct *mm) @@ -112,11 +112,11 @@ void local_flush_tlb_mm(struct mm_struct if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) activate_context(mm, cpu); - local_irq_restore(flags); + raw_local_irq_restore(flags); } } @@ -131,10 +131,10 @@ void local_flush_tlb_all(void) * TF-bit for SH-3, TI-bit for SH-4. * It's same position, bit #2. */ - local_irq_save(flags); + raw_local_irq_save(flags); status = ctrl_inl(MMUCR); status |= 0x04; ctrl_outl(status, MMUCR); ctrl_barrier(); - local_irq_restore(flags); + raw_local_irq_restore(flags); } Index: linux-rt-rebase.q/arch/sh/mm/tlb-sh4.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh/mm/tlb-sh4.c +++ linux-rt-rebase.q/arch/sh/mm/tlb-sh4.c @@ -51,7 +51,7 @@ void update_mmu_cache(struct vm_area_str } } - local_irq_save(flags); + raw_local_irq_save(flags); /* Set PTEH register */ vpn = (address & MMU_VPN_MASK) | get_asid(); @@ -74,7 +74,7 @@ void update_mmu_cache(struct vm_area_str /* Load the TLB */ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); - local_irq_restore(flags); + raw_local_irq_restore(flags); } void local_flush_tlb_one(unsigned long asid, unsigned long page) Index: linux-rt-rebase.q/include/asm-sh/atomic-irq.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/atomic-irq.h +++ linux-rt-rebase.q/include/asm-sh/atomic-irq.h @@ -10,29 +10,29 @@ static inline void atomic_add(int i, ato { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); *(long *)v += i; - local_irq_restore(flags); + raw_local_irq_restore(flags); } static inline void atomic_sub(int i, atomic_t *v) { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); *(long *)v -= i; - local_irq_restore(flags); + raw_local_irq_restore(flags); } static inline int atomic_add_return(int i, atomic_t *v) { unsigned long temp, flags; - local_irq_save(flags); + raw_local_irq_save(flags); temp = *(long *)v; temp += i; *(long *)v = temp; - local_irq_restore(flags); + raw_local_irq_restore(flags); return temp; } @@ -41,11 +41,11 @@ static inline int atomic_sub_return(int { unsigned long temp, flags; - local_irq_save(flags); + raw_local_irq_save(flags); temp = *(long *)v; temp -= i; *(long *)v = temp; - local_irq_restore(flags); + raw_local_irq_restore(flags); return temp; } @@ -54,18 +54,18 @@ static inline void atomic_clear_mask(uns { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); *(long *)v &= ~mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); *(long *)v |= mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); } #endif /* __ASM_SH_ATOMIC_IRQ_H */ Index: linux-rt-rebase.q/include/asm-sh/atomic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/atomic.h +++ linux-rt-rebase.q/include/asm-sh/atomic.h @@ -49,11 +49,11 @@ static inline int atomic_cmpxchg(atomic_ int ret; unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); ret = v->counter; if (likely(ret == old)) v->counter = new; - local_irq_restore(flags); + raw_local_irq_restore(flags); return ret; } @@ -65,11 +65,11 @@ static inline int atomic_add_unless(atom int ret; unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); ret = v->counter; if (ret != u) v->counter += a; - local_irq_restore(flags); + raw_local_irq_restore(flags); return ret != u; } Index: linux-rt-rebase.q/include/asm-sh/bitops.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/bitops.h +++ linux-rt-rebase.q/include/asm-sh/bitops.h @@ -14,9 +14,9 @@ static inline void set_bit(int nr, volat a += nr >> 5; mask = 1 << (nr & 0x1f); - local_irq_save(flags); + raw_local_irq_save(flags); *a |= mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); } /* @@ -32,9 +32,9 @@ static inline void clear_bit(int nr, vol a += nr >> 5; mask = 1 << (nr & 0x1f); - local_irq_save(flags); + raw_local_irq_save(flags); *a &= ~mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); } static inline void change_bit(int nr, volatile void * addr) @@ -45,9 +45,9 @@ static inline void change_bit(int nr, vo a += nr >> 5; mask = 1 << (nr & 0x1f); - local_irq_save(flags); + raw_local_irq_save(flags); *a ^= mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); } static inline int test_and_set_bit(int nr, volatile void * addr) @@ -58,10 +58,10 @@ static inline int test_and_set_bit(int n a += nr >> 5; mask = 1 << (nr & 0x1f); - local_irq_save(flags); + raw_local_irq_save(flags); retval = (mask & *a) != 0; *a |= mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); return retval; } @@ -74,10 +74,10 @@ static inline int test_and_clear_bit(int a += nr >> 5; mask = 1 << (nr & 0x1f); - local_irq_save(flags); + raw_local_irq_save(flags); retval = (mask & *a) != 0; *a &= ~mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); return retval; } @@ -90,10 +90,10 @@ static inline int test_and_change_bit(in a += nr >> 5; mask = 1 << (nr & 0x1f); - local_irq_save(flags); + raw_local_irq_save(flags); retval = (mask & *a) != 0; *a ^= mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); return retval; } Index: linux-rt-rebase.q/include/asm-sh/pgalloc.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/pgalloc.h +++ linux-rt-rebase.q/include/asm-sh/pgalloc.h @@ -13,7 +13,7 @@ static inline void pmd_populate_kernel(s set_pmd(pmd, __pmd((unsigned long)pte)); } -static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, +static inline void notrace pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) { set_pmd(pmd, __pmd((unsigned long)page_address(pte))); Index: linux-rt-rebase.q/include/asm-sh/rwsem.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/rwsem.h +++ linux-rt-rebase.q/include/asm-sh/rwsem.h @@ -19,7 +19,7 @@ /* * the semaphore definition */ -struct rw_semaphore { +struct compat_rw_semaphore { long count; #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 @@ -27,7 +27,7 @@ struct rw_semaphore { #define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; + raw_spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -45,25 +45,25 @@ struct rw_semaphore { LIST_HEAD_INIT((name).wait_list) \ __RWSEM_DEP_MAP_INIT(name) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define COMPAT_DECLARE_RWSEM(name) \ + struct compat_rw_semaphore name = __RWSEM_INITIALIZER(name) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_down_read_failed(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_down_write_failed(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_wake(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_downgrade_wake(struct compat_rw_semaphore *sem); -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, +extern void __compat_init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key); -#define init_rwsem(sem) \ +#define compat_init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __compat_init_rwsem((sem), #sem, &__key); \ } while (0) -static inline void init_rwsem(struct rw_semaphore *sem) +static inline void compat_init_rwsem(struct rw_semaphore *sem) { sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); @@ -73,7 +73,7 @@ static inline void init_rwsem(struct rw_ /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct compat_rw_semaphore *sem) { if (atomic_inc_return((atomic_t *)(&sem->count)) > 0) smp_wmb(); @@ -81,7 +81,7 @@ static inline void __down_read(struct rw rwsem_down_read_failed(sem); } -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct compat_rw_semaphore *sem) { int tmp; @@ -98,7 +98,7 @@ static inline int __down_read_trylock(st /* * lock for writing */ -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct compat_rw_semaphore *sem) { int tmp; @@ -110,7 +110,7 @@ static inline void __down_write(struct r rwsem_down_write_failed(sem); } -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct compat_rw_semaphore *sem) { int tmp; @@ -123,7 +123,7 @@ static inline int __down_write_trylock(s /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct compat_rw_semaphore *sem) { int tmp; @@ -136,7 +136,7 @@ static inline void __up_read(struct rw_s /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct compat_rw_semaphore *sem) { smp_wmb(); if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, @@ -147,7 +147,7 @@ static inline void __up_write(struct rw_ /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct compat_rw_semaphore *sem) { atomic_add(delta, (atomic_t *)(&sem->count)); } @@ -155,7 +155,7 @@ static inline void rwsem_atomic_add(int /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct compat_rw_semaphore *sem) { int tmp; @@ -165,7 +165,7 @@ static inline void __downgrade_write(str rwsem_downgrade_wake(sem); } -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void __down_write_nested(struct compat_rw_semaphore *sem, int subclass) { __down_write(sem); } @@ -173,13 +173,13 @@ static inline void __down_write_nested(s /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct compat_rw_semaphore *sem) { smp_mb(); return atomic_add_return(delta, (atomic_t *)(&sem->count)); } -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline int rwsem_is_locked(struct compat_rw_semaphore *sem) { return (sem->count != 0); } Index: linux-rt-rebase.q/include/asm-sh/semaphore-helper.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/semaphore-helper.h +++ linux-rt-rebase.q/include/asm-sh/semaphore-helper.h @@ -14,12 +14,12 @@ * This is trivially done with load_locked/store_cond, * which we have. Let the rest of the losers suck eggs. */ -static __inline__ void wake_one_more(struct semaphore * sem) +static __inline__ void wake_one_more(struct compat_semaphore * sem) { atomic_inc((atomic_t *)&sem->sleepers); } -static __inline__ int waking_non_zero(struct semaphore *sem) +static __inline__ int waking_non_zero(struct compat_semaphore *sem) { unsigned long flags; int ret = 0; @@ -43,7 +43,7 @@ static __inline__ int waking_non_zero(st * protected by the spinlock in order to make atomic this atomic_inc() with the * atomic_read() in wake_one_more(), otherwise we can race. -arca */ -static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, +static __inline__ int waking_non_zero_interruptible(struct compat_semaphore *sem, struct task_struct *tsk) { unsigned long flags; @@ -70,7 +70,7 @@ static __inline__ int waking_non_zero_in * protected by the spinlock in order to make atomic this atomic_inc() with the * atomic_read() in wake_one_more(), otherwise we can race. -arca */ -static __inline__ int waking_non_zero_trylock(struct semaphore *sem) +static __inline__ int waking_non_zero_trylock(struct compat_semaphore *sem) { unsigned long flags; int ret = 1; Index: linux-rt-rebase.q/include/asm-sh/semaphore.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/semaphore.h +++ linux-rt-rebase.q/include/asm-sh/semaphore.h @@ -20,29 +20,36 @@ #include #include -struct semaphore { +/* + * On !PREEMPT_RT all semaphores are compat: + */ +#ifndef CONFIG_PREEMPT_RT +# define compat_semaphore semaphore +#endif + +struct compat_semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; }; -#define __SEMAPHORE_INITIALIZER(name, n) \ +#define __COMPAT_SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .sleepers = 0, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) +#define __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct compat_semaphore name = __COMPAT_SEMAPHORE_INITIALIZER(name,count) -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) +#define COMPAT_DECLARE_MUTEX(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,1) +#define COMPAT_DECLARE_MUTEX_LOCKED(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,0) -static inline void sema_init (struct semaphore *sem, int val) +static inline void compat_sema_init (struct compat_semaphore *sem, int val) { /* - * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); + * *sem = (struct compat_semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * * i'd rather use the more flexible initialization above, but sadly * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. @@ -52,14 +59,14 @@ static inline void sema_init (struct sem init_waitqueue_head(&sem->wait); } -static inline void init_MUTEX (struct semaphore *sem) +static inline void compat_init_MUTEX (struct compat_semaphore *sem) { - sema_init(sem, 1); + compat_sema_init(sem, 1); } -static inline void init_MUTEX_LOCKED (struct semaphore *sem) +static inline void compat_init_MUTEX_LOCKED (struct compat_semaphore *sem) { - sema_init(sem, 0); + compat_sema_init(sem, 0); } #if 0 @@ -69,36 +76,36 @@ asmlinkage int __down_failed_trylock(vo asmlinkage void __up_wakeup(void /* special register calling convention */); #endif -asmlinkage void __down(struct semaphore * sem); -asmlinkage int __down_interruptible(struct semaphore * sem); -asmlinkage int __down_trylock(struct semaphore * sem); -asmlinkage void __up(struct semaphore * sem); +asmlinkage void __compat_down(struct compat_semaphore * sem); +asmlinkage int __compat_down_interruptible(struct compat_semaphore * sem); +asmlinkage int __compat_down_trylock(struct compat_semaphore * sem); +asmlinkage void __compat_up(struct compat_semaphore * sem); extern spinlock_t semaphore_wake_lock; -static inline void down(struct semaphore * sem) +static inline void compat_down(struct compat_semaphore * sem) { might_sleep(); if (atomic_dec_return(&sem->count) < 0) - __down(sem); + __compat_down(sem); } -static inline int down_interruptible(struct semaphore * sem) +static inline int compat_down_interruptible(struct compat_semaphore * sem) { int ret = 0; might_sleep(); if (atomic_dec_return(&sem->count) < 0) - ret = __down_interruptible(sem); + ret = __compat_down_interruptible(sem); return ret; } -static inline int down_trylock(struct semaphore * sem) +static inline int compat_down_trylock(struct compat_semaphore * sem) { int ret = 0; if (atomic_dec_return(&sem->count) < 0) - ret = __down_trylock(sem); + ret = __compat_down_trylock(sem); return ret; } @@ -106,11 +113,17 @@ static inline int down_trylock(struct se * Note! This is subtle. We jump to wake people up only if * the semaphore was negative (== somebody was waiting on it). */ -static inline void up(struct semaphore * sem) +static inline void compat_up(struct compat_semaphore * sem) { if (atomic_inc_return(&sem->count) <= 0) - __up(sem); + __compat_up(sem); } +extern int compat_sem_is_locked(struct compat_semaphore *sem); + +#define compat_sema_count(sem) atomic_read(&(sem)->count) + +#include + #endif #endif /* __ASM_SH_SEMAPHORE_H */ Index: linux-rt-rebase.q/include/asm-sh/system.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/system.h +++ linux-rt-rebase.q/include/asm-sh/system.h @@ -158,10 +158,10 @@ static inline unsigned long xchg_u32(vol { unsigned long flags, retval; - local_irq_save(flags); + raw_local_irq_save(flags); retval = *m; *m = val; - local_irq_restore(flags); + raw_local_irq_restore(flags); return retval; } @@ -169,10 +169,10 @@ static inline unsigned long xchg_u8(vola { unsigned long flags, retval; - local_irq_save(flags); + raw_local_irq_save(flags); retval = *m; *m = val & 0xff; - local_irq_restore(flags); + raw_local_irq_restore(flags); return retval; } @@ -207,11 +207,11 @@ static inline unsigned long __cmpxchg_u3 __u32 retval; unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); retval = *m; if (retval == old) *m = new; - local_irq_restore(flags); /* implies memory barrier */ + raw_local_irq_restore(flags); /* implies memory barrier */ return retval; } Index: linux-rt-rebase.q/include/asm-sh/thread_info.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-sh/thread_info.h +++ linux-rt-rebase.q/include/asm-sh/thread_info.h @@ -111,6 +111,7 @@ static inline struct thread_info *curren #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */ #define TIF_SINGLESTEP 4 /* singlestepping active */ +#define TIF_NEED_RESCHED_DELAYED 6 /* reschedule on return to userspace */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 @@ -121,6 +122,7 @@ static inline struct thread_info *curren #define _TIF_NEED_RESCHED (1< To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [patch 1/4] powerpc 2.6.21-rt1: fix a build breakage by adding __raw_*_relax() macros Add missing macros to fix a build breakage for PREEMPT_DESKTOP. Signed-off-by: Tsutomu OWA -- owa --- include/asm-powerpc/spinlock.h | 4 ++++ 1 file changed, 4 insertions(+) Index: linux-rt-rebase.q/include/asm-powerpc/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/spinlock.h +++ linux-rt-rebase.q/include/asm-powerpc/spinlock.h @@ -289,5 +289,9 @@ static __inline__ void __raw_write_unloc #define _raw_read_relax(lock) __rw_yield(lock) #define _raw_write_relax(lock) __rw_yield(lock) +#define __raw_spin_relax(lock) cpu_relax() +#define __raw_read_relax(lock) cpu_relax() +#define __raw_write_relax(lock) cpu_relax() + #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ patches/ppc-add-mcount.patch0000664000077200007720000001011610655544571015370 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Mon May 14 10:15:30 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=UNPARSEABLE_RELAY autolearn=ham version=3.1.7-deb Received: from imx12.toshiba.co.jp (imx12.toshiba.co.jp [61.202.160.132]) by mail.tglx.de (Postfix) with ESMTP id 7006365C065 for ; Mon, 14 May 2007 10:15:30 +0200 (CEST) Received: from wall11.toshiba.co.jp (wall11 [133.199.90.149]) by imx12.toshiba.co.jp with ESMTP id l4E8FKmi007480; Mon, 14 May 2007 17:15:20 +0900 (JST) Received: (from root@localhost) by wall11.toshiba.co.jp id l4E8FKaH003434; Mon, 14 May 2007 17:15:20 +0900 (JST) Received: from ovp11.toshiba.co.jp [133.199.90.148] by wall11.toshiba.co.jp with ESMTP id TAA03430; Mon, 14 May 2007 17:15:20 +0900 Received: from mx2.toshiba.co.jp (localhost [127.0.0.1]) by ovp11.toshiba.co.jp with ESMTP id l4E8FJCq025717; Mon, 14 May 2007 17:15:19 +0900 (JST) Received: from rdcgw.rdc.toshiba.co.jp by toshiba.co.jp id l4E8FJ3Y013473; Mon, 14 May 2007 17:15:19 +0900 (JST) Received: from island.swc.toshiba.co.jp by rdcgw.rdc.toshiba.co.jp (8.8.8p2+Sun/3.7W) with ESMTP id RAA01521; Mon, 14 May 2007 17:15:18 +0900 (JST) Received: from forest.toshiba.co.jp (forest [133.196.122.2]) by island.swc.toshiba.co.jp (Postfix) with ESMTP id 87FCB40002; Mon, 14 May 2007 17:15:10 +0900 (JST) Date: Mon, 14 May 2007 17:15:10 +0900 Message-ID: From: Tsutomu OWA To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [patch 1/5] powerpc 2.6.21-rt1: add mcount() and _mcount() In-Reply-To: References: User-Agent: Wanderlust/2.8.1 (Something) Emacs/20.7 Mule/4.0 (HANANOEN) Organization: Software Engineering Center, TOSHIBA. MIME-Version: 1.0 (generated by SEMI 1.14.4 - "Hosorogi") Content-Type: text/plain; charset=US-ASCII X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit add mcount() and _mcount() for latency trace support. Signed-off-by: Tsutomu OWA -- owa --- arch/powerpc/kernel/entry_64.S | 60 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) Index: linux/arch/powerpc/kernel/entry_64.S =================================================================== --- linux.orig/arch/powerpc/kernel/entry_64.S +++ linux/arch/powerpc/kernel/entry_64.S @@ -828,3 +828,63 @@ _GLOBAL(enter_prom) ld r0,16(r1) mtlr r0 blr + +#ifdef CONFIG_MCOUNT +/* + * code almost taken from entry_32.S + */ +#define MCOUNT_FRAME_SIZE 32 +_GLOBAL(mcount) + stdu r1,-MCOUNT_FRAME_SIZE(r1) + mflr r3 + + LOAD_REG_ADDR(r5,mcount_enabled) + lwz r5,0(r5) + std r3,MCOUNT_FRAME_SIZE+16(r1) + cmpwi r5,0 + beq 1f + + /* r3 contains lr (eip), put parent lr (parent_eip) in r4 */ + ld r4,MCOUNT_FRAME_SIZE(r1) + ld r4,16(r4) + bl .__trace + nop +1: + ld r0,MCOUNT_FRAME_SIZE+16(r1) + mtlr r0 + addi r1,r1,MCOUNT_FRAME_SIZE + blr + +/* + * Based on glibc-2.4/sysdeps/powerpc/powerpc64/ppc-mcount.S + * + * We don't need to save the parameter-passing registers as gcc takes + * care of that for us. Thus this function looks fairly normal. + * In fact, the generic code would work for us. + */ +_GLOBAL(_mcount) + /* return if we're in real mode. */ + mfmsr r3 + andi. r0,r3,MSR_IR|MSR_DR /* see if relocation is on? */ + beqlr /* if not, do nothing. */ + /* we're in translation mode. keep going. */ + mflr r3 + ld r11,0(r1) /* load back chain ptr */ + stdu r1,-STACK_FRAME_OVERHEAD(r1) + std r3,STACK_FRAME_OVERHEAD+16(r1) + ld r4,16(r11) /* LR in back chain */ + LOAD_REG_ADDR(r5,mcount_enabled) + lwz r5,0(r5) + cmpwi r5,0 /* see if mcount_enabled? */ + beq 1f /* if disabled, then skip */ + + /* r3 contains lr (eip), put parent lr (parent_eip) in r4 */ + bl .__trace + nop +1: + ld r0,STACK_FRAME_OVERHEAD+16(r1) /* restore saved LR */ + mtlr r0 + addi r1,r1,STACK_FRAME_OVERHEAD + blr + +#endif /* CONFIG_MCOUNT */ patches/add-notrace.patch0000664000077200007720000000064110655544571014740 0ustar mingomingo--- include/linux/linkage.h | 2 ++ 1 file changed, 2 insertions(+) Index: linux/include/linux/linkage.h =================================================================== --- linux.orig/include/linux/linkage.h +++ linux/include/linux/linkage.h @@ -3,6 +3,8 @@ #include +#define notrace __attribute ((no_instrument_function)) + #ifdef __cplusplus #define CPP_ASMLINKAGE extern "C" #else patches/quicklist-release-before-free-page.patch0000664000077200007720000001527210655544577021312 0ustar mingomingoFrom peterz@infradead.org Mon Jul 23 21:40:44 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=none autolearn=ham version=3.1.7-deb Received: from mx2.mail.elte.hu (mx2.mail.elte.hu [157.181.151.9]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by mail.tglx.de (Postfix) with ESMTP id CAC4B65C003 for ; Mon, 23 Jul 2007 21:40:44 +0200 (CEST) Received: from elvis.elte.hu ([157.181.1.14]) by mx2.mail.elte.hu with esmtp (Exim) id 1ID3lr-0000tI-MW from for ; Mon, 23 Jul 2007 21:40:43 +0200 Received: by elvis.elte.hu (Postfix, from userid 1004) id 1D9593E2153; Mon, 23 Jul 2007 21:40:43 +0200 (CEST) Resent-From: Ingo Molnar Resent-Date: Mon, 23 Jul 2007 21:40:40 +0200 Resent-Message-ID: <20070723194040.GA7831@elte.hu> Resent-To: Thomas Gleixner X-Original-To: mingo@elvis.elte.hu Delivered-To: mingo@elvis.elte.hu Received: from mx3.mail.elte.hu (mx3.mail.elte.hu [157.181.1.138]) by elvis.elte.hu (Postfix) with ESMTP id 03EA13E214E for ; Mon, 23 Jul 2007 18:33:06 +0200 (CEST) Received: from pentafluge.infradead.org ([213.146.154.40]) by mx3.mail.elte.hu with esmtp (Exim) id 1ID0qK-0003mK-9A from for ; Mon, 23 Jul 2007 18:33:08 +0200 Received: from i55087.upc-i.chello.nl ([62.195.55.87] helo=[192.168.0.111]) by pentafluge.infradead.org with esmtpsa (Exim 4.63 #1 (Red Hat Linux)) id 1ID0qB-0003Kf-Tf; Mon, 23 Jul 2007 17:33:00 +0100 Subject: Re: [PATCH] release quicklist before free_page From: Peter Zijlstra To: Daniel Walker Cc: mingo@elte.hu, paulmck@linux.vnet.ibm.com, linux-kernel@vger.kernel.org, linux-rt-users@vger.kernel.org In-Reply-To: <20070723152129.036573829@mvista.com> References: <20070723152129.036573829@mvista.com> Content-Type: text/plain Date: Mon, 23 Jul 2007 18:32:58 +0200 Message-Id: <1185208378.8197.20.camel@twins> Mime-Version: 1.0 X-Mailer: Evolution 2.10.1 X-ELTE-VirusStatus: clean X-ELTE-SpamScore: -1.0 X-ELTE-SpamLevel: X-ELTE-SpamCheck: no X-ELTE-SpamVersion: ELTE 2.0 X-ELTE-SpamCheck-Details: score=-1.0 required=5.9 tests=BAYES_00 autolearn=no SpamAssassin version=3.0.3 -1.0 BAYES_00 BODY: Bayesian spam probability is 0 to 1% [score: 0.0000] Received-SPF: softfail (mx2: transitioning domain of elte.hu does not designate 157.181.1.14 as permitted sender) client-ip=157.181.1.14; envelope-from=mingo@elte.hu; helo=elvis.elte.hu; X-ELTE-VirusStatus: clean X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit On Mon, 2007-07-23 at 08:21 -0700, Daniel Walker wrote: > Resolves, > > BUG: sleeping function called from invalid context cc1(29651) at kernel/rtmutex.c:636 > in_atomic():1 [00000001], irqs_disabled():0 > [] __might_sleep+0xf3/0xf9 > [] __rt_spin_lock+0x21/0x3c > [] get_zone_pcp+0x20/0x29 > [] free_hot_cold_page+0xdc/0x167 > [] add_preempt_count+0x12/0xcc > [] pgd_dtor+0x0/0x1 > [] quicklist_trim+0xb7/0xe3 > [] check_pgt_cache+0x19/0x1c > [] free_pgtables+0x54/0x12c > [] add_preempt_count+0x12/0xcc > [] unmap_region+0xeb/0x13b > > > It looks like the quicklist isn't used after a few variables are evaluated. > So no need to keep preemption disabled over the whole function. Not quite, it uses preempt_disable() to avoid migration and stick to a cpu. Without that it might end up freeing pages from another quicklist. How about this - compile tested only --- We cannot call the page allocator with preemption-disabled, use the per_cpu_locked construct to allow preemption while guarding the per cpu data. Signed-off-by: Peter Zijlstra --- include/linux/quicklist.h | 19 +++++++++++++++---- mm/quicklist.c | 9 +++++---- 2 files changed, 20 insertions(+), 8 deletions(-) Index: linux-rt-rebase.q/include/linux/quicklist.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/quicklist.h +++ linux-rt-rebase.q/include/linux/quicklist.h @@ -18,7 +18,7 @@ struct quicklist { int nr_pages; }; -DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; +DECLARE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK]; /* * The two key functions quicklist_alloc and quicklist_free are inline so @@ -30,19 +30,30 @@ DECLARE_PER_CPU(struct quicklist, quickl * The fast patch in quicklist_alloc touched only a per cpu cacheline and * the first cacheline of the page itself. There is minmal overhead involved. */ -static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +static inline void *__quicklist_alloc(int cpu, int nr, gfp_t flags, void (*ctor)(void *)) { struct quicklist *q; void **p = NULL; - q =&get_cpu_var(quicklist)[nr]; + q = &__get_cpu_var_locked(quicklist, cpu)[nr]; p = q->page; if (likely(p)) { q->page = p[0]; p[0] = NULL; q->nr_pages--; } - put_cpu_var(quicklist); + return p; +} + +static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +{ + struct quicklist *q; + void **p = NULL; + int cpu; + + (void)get_cpu_var_locked(quicklist, &cpu)[nr]; + p = __quicklist_alloc(cpu, nr, flags, ctor); + put_cpu_var_locked(quicklist, cpu); if (likely(p)) return p; Index: linux-rt-rebase.q/mm/quicklist.c =================================================================== --- linux-rt-rebase.q.orig/mm/quicklist.c +++ linux-rt-rebase.q/mm/quicklist.c @@ -19,7 +19,7 @@ #include #include -DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; +DEFINE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK]; #define FRACTION_OF_NODE_MEM 16 @@ -51,8 +51,9 @@ void quicklist_trim(int nr, void (*dtor) { long pages_to_free; struct quicklist *q; + int cpu; - q = &get_cpu_var(quicklist)[nr]; + q = &get_cpu_var_locked(quicklist, &cpu)[nr]; if (q->nr_pages > min_pages) { pages_to_free = min_pages_to_free(q, min_pages, max_free); @@ -61,7 +62,7 @@ void quicklist_trim(int nr, void (*dtor) * We pass a gfp_t of 0 to quicklist_alloc here * because we will never call into the page allocator. */ - void *p = quicklist_alloc(nr, 0, NULL); + void *p = __quicklist_alloc(cpu, nr, 0, NULL); if (dtor) dtor(p); @@ -69,7 +70,7 @@ void quicklist_trim(int nr, void (*dtor) pages_to_free--; } } - put_cpu_var(quicklist); + put_cpu_var_locked(quicklist, cpu); } unsigned long quicklist_total_size(void) patches/preempt-realtime-powerpc.patch0000664000077200007720000004047110655544574017520 0ustar mingomingo--- arch/powerpc/kernel/smp.c | 12 ++++++++- arch/powerpc/kernel/traps.c | 9 +++++- arch/powerpc/platforms/cell/smp.c | 2 - arch/powerpc/platforms/chrp/smp.c | 2 - arch/powerpc/platforms/chrp/time.c | 2 - arch/powerpc/platforms/powermac/feature.c | 2 - arch/powerpc/platforms/powermac/nvram.c | 2 - arch/powerpc/platforms/powermac/pic.c | 2 - arch/powerpc/platforms/pseries/smp.c | 2 - arch/ppc/8260_io/enet.c | 2 - arch/ppc/8260_io/fcc_enet.c | 2 - arch/ppc/8xx_io/commproc.c | 2 - arch/ppc/8xx_io/enet.c | 2 - arch/ppc/8xx_io/fec.c | 2 - arch/ppc/kernel/smp.c | 12 ++++++++- arch/ppc/kernel/traps.c | 6 +++- arch/ppc/platforms/hdpu.c | 2 - arch/ppc/platforms/sbc82xx.c | 2 - arch/ppc/syslib/cpm2_common.c | 2 - arch/ppc/syslib/open_pic.c | 2 - arch/ppc/syslib/open_pic2.c | 2 - include/asm-powerpc/hw_irq.h | 40 ++++++++++++++++++------------ 22 files changed, 76 insertions(+), 37 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/smp.c +++ linux-rt-rebase.q/arch/powerpc/kernel/smp.c @@ -126,6 +126,16 @@ void smp_send_reschedule(int cpu) smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); } +/* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them: + */ +void smp_send_reschedule_allbutself(void) +{ + smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_RESCHEDULE); +} + #ifdef CONFIG_DEBUGGER void smp_send_debugger_break(int cpu) { @@ -162,7 +172,7 @@ void smp_send_stop(void) * static memory requirements. It also looks cleaner. * Stolen from the i386 version. */ -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); +static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(call_lock); static struct call_data_struct { void (*func) (void *info); Index: linux-rt-rebase.q/arch/powerpc/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/traps.c +++ linux-rt-rebase.q/arch/powerpc/kernel/traps.c @@ -97,11 +97,11 @@ static inline void pmac_backlight_unblan int die(const char *str, struct pt_regs *regs, long err) { static struct { - spinlock_t lock; + raw_spinlock_t lock; u32 lock_owner; int lock_owner_depth; } die = { - .lock = __SPIN_LOCK_UNLOCKED(die.lock), + .lock = _RAW_SPIN_LOCK_UNLOCKED(die.lock), .lock_owner = -1, .lock_owner_depth = 0 }; @@ -178,6 +178,11 @@ void _exception(int signr, struct pt_reg return; } +#ifdef CONFIG_PREEMPT_RT + local_irq_enable(); + preempt_check_resched(); +#endif + memset(&info, 0, sizeof(info)); info.si_signo = signr; info.si_code = code; Index: linux-rt-rebase.q/arch/powerpc/platforms/cell/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/cell/smp.c +++ linux-rt-rebase.q/arch/powerpc/platforms/cell/smp.c @@ -133,7 +133,7 @@ static void __devinit smp_iic_setup_cpu( iic_setup_cpu(); } -static DEFINE_SPINLOCK(timebase_lock); +static DEFINE_RAW_SPINLOCK(timebase_lock); static unsigned long timebase = 0; static void __devinit cell_give_timebase(void) Index: linux-rt-rebase.q/arch/powerpc/platforms/chrp/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/chrp/smp.c +++ linux-rt-rebase.q/arch/powerpc/platforms/chrp/smp.c @@ -44,7 +44,7 @@ static void __devinit smp_chrp_setup_cpu mpic_setup_this_cpu(); } -static DEFINE_SPINLOCK(timebase_lock); +static DEFINE_RAW_SPINLOCK(timebase_lock); static unsigned int timebase_upper = 0, timebase_lower = 0; void __devinit smp_chrp_give_timebase(void) Index: linux-rt-rebase.q/arch/powerpc/platforms/chrp/time.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/chrp/time.c +++ linux-rt-rebase.q/arch/powerpc/platforms/chrp/time.c @@ -27,7 +27,7 @@ #include #include -extern spinlock_t rtc_lock; +extern raw_spinlock_t rtc_lock; static int nvram_as1 = NVRAM_AS1; static int nvram_as0 = NVRAM_AS0; Index: linux-rt-rebase.q/arch/powerpc/platforms/powermac/feature.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/powermac/feature.c +++ linux-rt-rebase.q/arch/powerpc/platforms/powermac/feature.c @@ -59,7 +59,7 @@ extern struct device_node *k2_skiplist[2 * We use a single global lock to protect accesses. Each driver has * to take care of its own locking */ -DEFINE_SPINLOCK(feature_lock); +DEFINE_RAW_SPINLOCK(feature_lock); #define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); #define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); Index: linux-rt-rebase.q/arch/powerpc/platforms/powermac/nvram.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/powermac/nvram.c +++ linux-rt-rebase.q/arch/powerpc/platforms/powermac/nvram.c @@ -80,7 +80,7 @@ static int is_core_99; static int core99_bank = 0; static int nvram_partitions[3]; // XXX Turn that into a sem -static DEFINE_SPINLOCK(nv_lock); +static DEFINE_RAW_SPINLOCK(nv_lock); static int (*core99_write_bank)(int bank, u8* datas); static int (*core99_erase_bank)(int bank); Index: linux-rt-rebase.q/arch/powerpc/platforms/powermac/pic.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/powermac/pic.c +++ linux-rt-rebase.q/arch/powerpc/platforms/powermac/pic.c @@ -63,7 +63,7 @@ static int max_irqs; static int max_real_irqs; static u32 level_mask[4]; -static DEFINE_SPINLOCK(pmac_pic_lock); +static DEFINE_RAW_SPINLOCK(pmac_pic_lock); #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; Index: linux-rt-rebase.q/arch/powerpc/platforms/pseries/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/pseries/smp.c +++ linux-rt-rebase.q/arch/powerpc/platforms/pseries/smp.c @@ -154,7 +154,7 @@ static void __devinit smp_xics_setup_cpu } #endif /* CONFIG_XICS */ -static DEFINE_SPINLOCK(timebase_lock); +static DEFINE_RAW_SPINLOCK(timebase_lock); static unsigned long timebase = 0; static void __devinit pSeries_give_timebase(void) Index: linux-rt-rebase.q/arch/ppc/8260_io/enet.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/8260_io/enet.c +++ linux-rt-rebase.q/arch/ppc/8260_io/enet.c @@ -115,7 +115,7 @@ struct scc_enet_private { scc_t *sccp; struct net_device_stats stats; uint tx_full; - spinlock_t lock; + raw_spinlock_t lock; }; static int scc_enet_open(struct net_device *dev); Index: linux-rt-rebase.q/arch/ppc/8260_io/fcc_enet.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/8260_io/fcc_enet.c +++ linux-rt-rebase.q/arch/ppc/8260_io/fcc_enet.c @@ -375,7 +375,7 @@ struct fcc_enet_private { volatile fcc_enet_t *ep; struct net_device_stats stats; uint tx_free; - spinlock_t lock; + raw_spinlock_t lock; #ifdef CONFIG_USE_MDIO uint phy_id; Index: linux-rt-rebase.q/arch/ppc/8xx_io/commproc.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/8xx_io/commproc.c +++ linux-rt-rebase.q/arch/ppc/8xx_io/commproc.c @@ -370,7 +370,7 @@ cpm_setbrg(uint brg, uint rate) /* * dpalloc / dpfree bits. */ -static spinlock_t cpm_dpmem_lock; +static raw_spinlock_t cpm_dpmem_lock; /* * 16 blocks should be enough to satisfy all requests * until the memory subsystem goes up... Index: linux-rt-rebase.q/arch/ppc/8xx_io/enet.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/8xx_io/enet.c +++ linux-rt-rebase.q/arch/ppc/8xx_io/enet.c @@ -142,7 +142,7 @@ struct scc_enet_private { unsigned char *rx_vaddr[RX_RING_SIZE]; struct net_device_stats stats; uint tx_full; - spinlock_t lock; + raw_spinlock_t lock; }; static int scc_enet_open(struct net_device *dev); Index: linux-rt-rebase.q/arch/ppc/8xx_io/fec.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/8xx_io/fec.c +++ linux-rt-rebase.q/arch/ppc/8xx_io/fec.c @@ -164,7 +164,7 @@ struct fec_enet_private { struct net_device_stats stats; uint tx_full; - spinlock_t lock; + raw_spinlock_t lock; #ifdef CONFIG_USE_MDIO uint phy_id; Index: linux-rt-rebase.q/arch/ppc/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/kernel/smp.c +++ linux-rt-rebase.q/arch/ppc/kernel/smp.c @@ -136,6 +136,16 @@ void smp_send_reschedule(int cpu) smp_message_pass(cpu, PPC_MSG_RESCHEDULE); } +/* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them: + */ +void smp_send_reschedule_allbutself(void) +{ + smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_RESCHEDULE, 0, 0); +} + #ifdef CONFIG_XMON void smp_send_xmon_break(int cpu) { @@ -160,7 +170,7 @@ void smp_send_stop(void) * static memory requirements. It also looks cleaner. * Stolen from the i386 version. */ -static DEFINE_SPINLOCK(call_lock); +static DEFINE_RAW_SPINLOCK(call_lock); static struct call_data_struct { void (*func) (void *info); Index: linux-rt-rebase.q/arch/ppc/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/kernel/traps.c +++ linux-rt-rebase.q/arch/ppc/kernel/traps.c @@ -72,7 +72,7 @@ void (*debugger_fault_handler)(struct pt * Trap & Exception support */ -DEFINE_SPINLOCK(die_lock); +DEFINE_RAW_SPINLOCK(die_lock); int die(const char * str, struct pt_regs * fp, long err) { @@ -108,6 +108,10 @@ void _exception(int signr, struct pt_reg debugger(regs); die("Exception in kernel mode", regs, signr); } +#ifdef CONFIG_PREEMPT_RT + local_irq_enable(); + preempt_check_resched(); +#endif info.si_signo = signr; info.si_errno = 0; info.si_code = code; Index: linux-rt-rebase.q/arch/ppc/platforms/hdpu.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/platforms/hdpu.c +++ linux-rt-rebase.q/arch/ppc/platforms/hdpu.c @@ -55,7 +55,7 @@ static void parse_bootinfo(unsigned long static void hdpu_set_l1pe(void); static void hdpu_cpustate_set(unsigned char new_state); #ifdef CONFIG_SMP -static DEFINE_SPINLOCK(timebase_lock); +static DEFINE_RAW_SPINLOCK(timebase_lock); static unsigned int timebase_upper = 0, timebase_lower = 0; extern int smp_tb_synchronized; Index: linux-rt-rebase.q/arch/ppc/platforms/sbc82xx.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/platforms/sbc82xx.c +++ linux-rt-rebase.q/arch/ppc/platforms/sbc82xx.c @@ -65,7 +65,7 @@ static void sbc82xx_time_init(void) static volatile char *sbc82xx_i8259_map; static char sbc82xx_i8259_mask = 0xff; -static DEFINE_SPINLOCK(sbc82xx_i8259_lock); +static DEFINE_RAW_SPINLOCK(sbc82xx_i8259_lock); static void sbc82xx_i8259_mask_and_ack_irq(unsigned int irq_nr) { Index: linux-rt-rebase.q/arch/ppc/syslib/cpm2_common.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/syslib/cpm2_common.c +++ linux-rt-rebase.q/arch/ppc/syslib/cpm2_common.c @@ -114,7 +114,7 @@ cpm2_fastbrg(uint brg, uint rate, int di /* * dpalloc / dpfree bits. */ -static spinlock_t cpm_dpmem_lock; +static raw_spinlock_t cpm_dpmem_lock; /* 16 blocks should be enough to satisfy all requests * until the memory subsystem goes up... */ static rh_block_t cpm_boot_dpmem_rh_block[16]; Index: linux-rt-rebase.q/arch/ppc/syslib/open_pic.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/syslib/open_pic.c +++ linux-rt-rebase.q/arch/ppc/syslib/open_pic.c @@ -526,7 +526,7 @@ void openpic_reset_processor_phys(u_int } #if defined(CONFIG_SMP) || defined(CONFIG_PM) -static DEFINE_SPINLOCK(openpic_setup_lock); +static DEFINE_RAW_SPINLOCK(openpic_setup_lock); #endif #ifdef CONFIG_SMP Index: linux-rt-rebase.q/arch/ppc/syslib/open_pic2.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/syslib/open_pic2.c +++ linux-rt-rebase.q/arch/ppc/syslib/open_pic2.c @@ -380,7 +380,7 @@ static void openpic2_set_spurious(u_int vec); } -static DEFINE_SPINLOCK(openpic2_setup_lock); +static DEFINE_RAW_SPINLOCK(openpic2_setup_lock); /* * Initialize a timer interrupt (and disable it) Index: linux-rt-rebase.q/include/asm-powerpc/hw_irq.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/hw_irq.h +++ linux-rt-rebase.q/include/asm-powerpc/hw_irq.h @@ -20,8 +20,8 @@ static inline unsigned long local_get_fl { unsigned long flags; - __asm__ __volatile__("lbz %0,%1(13)" - : "=r" (flags) +<<<<<<< delete extern unsigned long local_get_flags(void); +<<<<<<< delete extern unsigned long local_irq_disable(void); : "i" (offsetof(struct paca_struct, soft_enabled))); return flags; @@ -39,14 +39,19 @@ static inline unsigned long local_irq_di return flags; } -extern void local_irq_restore(unsigned long); + extern void iseries_handle_interrupts(void); +extern unsigned long raw_local_get_flags(void); +extern unsigned long raw_local_irq_disable(void); +extern void raw_local_irq_restore(unsigned long); + +#define raw_local_irq_enable() raw_local_irq_restore(1) +#define raw_local_save_flags(flags) ((flags) = raw_local_get_flags()) +#define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable()) -#define local_irq_enable() local_irq_restore(1) -#define local_save_flags(flags) ((flags) = local_get_flags()) -#define local_irq_save(flags) ((flags) = local_irq_disable()) +#define raw_irqs_disabled() (raw_local_get_flags() == 0) +#define raw_irqs_disabled_flags(flags) ((flags) == 0) -#define irqs_disabled() (local_get_flags() == 0) #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) @@ -62,13 +67,15 @@ extern void iseries_handle_interrupts(vo #if defined(CONFIG_BOOKE) #define SET_MSR_EE(x) mtmsr(x) -#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") +#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") +<<<<<<< delete #define local_irq_restore(flags) do { \ +#define raw_local_irq_restore(flags) do { \ #else #define SET_MSR_EE(x) mtmsr(x) -#define local_irq_restore(flags) mtmsr(flags) +#define raw_local_irq_restore(flags) mtmsr(flags) #endif -static inline void local_irq_disable(void) +static inline void raw_local_irq_disable(void) { #ifdef CONFIG_BOOKE __asm__ __volatile__("wrteei 0": : :"memory"); @@ -80,7 +87,7 @@ static inline void local_irq_disable(voi #endif } -static inline void local_irq_enable(void) +static inline void raw_local_irq_enable(void) { #ifdef CONFIG_BOOKE __asm__ __volatile__("wrteei 1": : :"memory"); @@ -92,7 +99,7 @@ static inline void local_irq_enable(void #endif } -static inline void local_irq_save_ptr(unsigned long *flags) +static inline void raw_local_irq_save_ptr(unsigned long *flags) { unsigned long msr; msr = mfmsr(); @@ -105,13 +112,16 @@ static inline void local_irq_save_ptr(un __asm__ __volatile__("": : :"memory"); } -#define local_save_flags(flags) ((flags) = mfmsr()) -#define local_irq_save(flags) local_irq_save_ptr(&flags) -#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) +#define raw_local_save_flags(flags) ((flags) = mfmsr()) +#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) +#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) +#define raw_irqs_disabled_flags(flags) ((flags & MSR_EE) == 0) #define hard_irq_enable() local_irq_enable() #define hard_irq_disable() local_irq_disable() +#include + #endif /* CONFIG_PPC64 */ /* patches/ep93xx-clockevents.patch0000664000077200007720000001435110655544571016240 0ustar mingomingoclockevent support for the EP93xx platform clockevent support for the EP93xx platform (by tglx) Only added a fix for clockevent_ep93xx.mult, which was using the wrong clock tickrate) --- arch/arm/mach-ep93xx/core.c | 125 ++++++++++++++++++++---------- include/asm-arm/arch-ep93xx/ep93xx-regs.h | 6 + 2 files changed, 91 insertions(+), 40 deletions(-) Index: linux/arch/arm/mach-ep93xx/core.c =================================================================== --- linux.orig/arch/arm/mach-ep93xx/core.c +++ linux/arch/arm/mach-ep93xx/core.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include #include @@ -50,7 +52,6 @@ #include - /************************************************************************* * Static I/O mappings that are needed for all EP93xx platforms *************************************************************************/ @@ -93,39 +94,58 @@ void __init ep93xx_map_io(void) * to use this timer for something else. We also use timer 4 for keeping * track of lost jiffies. */ -static unsigned int last_jiffy_time; -static unsigned int next_jiffy_time; -static unsigned int accumulator; +static struct clock_event_device clockevent_ep93xx; + +static int ep93xx_timer_interrupt(int irq, void *dev_id) +{ + __raw_writel(EP93XX_TC_CLEAR, EP93XX_TIMER1_CLEAR); -#define TIMER4_TICKS_PER_JIFFY (983040 / HZ) -#define TIMER4_TICKS_MOD_JIFFY (983040 % HZ) + clockevent_ep93xx.event_handler(&clockevent_ep93xx); -static int after_eq(unsigned long a, unsigned long b) + return IRQ_HANDLED; +} + +static int ep93xx_set_next_event(unsigned long evt, + struct clock_event_device *unused) { - return ((signed long)(a - b)) >= 0; + __raw_writel(evt, EP93XX_TIMER1_LOAD); + return 0; } -static int ep93xx_timer_interrupt(int irq, void *dev_id) +static void ep93xx_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) { - write_seqlock(&xtime_lock); + u32 tmode = EP93XX_TC123_SEL_508KHZ; - __raw_writel(1, EP93XX_TIMER1_CLEAR); - while (after_eq(__raw_readl(EP93XX_TIMER4_VALUE_LOW), next_jiffy_time)) { - timer_tick(); - - last_jiffy_time = next_jiffy_time; - next_jiffy_time += TIMER4_TICKS_PER_JIFFY; - accumulator += TIMER4_TICKS_MOD_JIFFY; - if (accumulator >= HZ) { - next_jiffy_time++; - accumulator -= HZ; - } + /* Disable timer */ + __raw_writel(tmode, EP93XX_TIMER1_CONTROL); + + switch(mode) { + case CLOCK_EVT_MODE_PERIODIC: + /* Set timer period */ + __raw_writel((508469 / HZ) - 1, EP93XX_TIMER1_LOAD); + tmode |= EP93XX_TC123_PERIODIC; + + case CLOCK_EVT_MODE_ONESHOT: + tmode |= EP93XX_TC123_ENABLE; + __raw_writel(tmode, EP93XX_TIMER1_CONTROL); + break; + + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_RESUME: + return; } +} - write_sequnlock(&xtime_lock); +static struct clock_event_device clockevent_ep93xx = { + .name = "ep93xx-timer1", + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, + .shift = 32, + .set_mode = ep93xx_set_mode, + .set_next_event = ep93xx_set_next_event, +}; - return IRQ_HANDLED; -} static struct irqaction ep93xx_timer_irq = { .name = "ep93xx timer", @@ -133,32 +153,58 @@ static struct irqaction ep93xx_timer_irq .handler = ep93xx_timer_interrupt, }; -static void __init ep93xx_timer_init(void) +static void __init ep93xx_clockevent_init(void) { - /* Enable periodic HZ timer. */ - __raw_writel(0x48, EP93XX_TIMER1_CONTROL); - __raw_writel((508469 / HZ) - 1, EP93XX_TIMER1_LOAD); - __raw_writel(0xc8, EP93XX_TIMER1_CONTROL); + setup_irq(IRQ_EP93XX_TIMER1, &ep93xx_timer_irq); - /* Enable lost jiffy timer. */ - __raw_writel(0x100, EP93XX_TIMER4_VALUE_HIGH); + clockevent_ep93xx.mult = div_sc(508469, NSEC_PER_SEC, + clockevent_ep93xx.shift); + clockevent_ep93xx.max_delta_ns = + clockevent_delta2ns(0xfffffffe, &clockevent_ep93xx); + clockevent_ep93xx.min_delta_ns = + clockevent_delta2ns(0xf, &clockevent_ep93xx); + clockevent_ep93xx.cpumask = cpumask_of_cpu(0); + clockevents_register_device(&clockevent_ep93xx); +} - setup_irq(IRQ_EP93XX_TIMER1, &ep93xx_timer_irq); +/* + * timer4 is a 40 Bit timer, separated in a 32bit and a 8 bit + * register, EP93XX_TIMER4_VALUE_LOW stores 32 bit word. The + * controlregister is in EP93XX_TIMER4_VALUE_HIGH + */ + +cycle_t ep93xx_get_cycles(void) +{ + return __raw_readl(EP93XX_TIMER4_VALUE_LOW); } -static unsigned long ep93xx_gettimeoffset(void) +static struct clocksource clocksource_ep93xx = { + .name = "ep93xx_timer4", + .rating = 200, + .read = ep93xx_get_cycles, + .mask = 0xFFFFFFFF, + .shift = 20, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static void __init ep93xx_clocksource_init(void) { - int offset; + /* Reset time-stamp counter */ + __raw_writel(0x100, EP93XX_TIMER4_VALUE_HIGH); - offset = __raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time; + clocksource_ep93xx.mult = + clocksource_hz2mult(983040, clocksource_ep93xx.shift); + clocksource_register(&clocksource_ep93xx); +} - /* Calculate (1000000 / 983040) * offset. */ - return offset + (53 * offset / 3072); +static void __init ep93xx_timer_init(void) +{ + ep93xx_clocksource_init(); + ep93xx_clockevent_init(); } struct sys_timer ep93xx_timer = { - .init = ep93xx_timer_init, - .offset = ep93xx_gettimeoffset, + .init = ep93xx_timer_init, }; @@ -510,7 +556,6 @@ static struct platform_device ep93xx_ohc .resource = ep93xx_ohci_resources, }; - void __init ep93xx_init_devices(void) { unsigned int v; Index: linux/include/asm-arm/arch-ep93xx/ep93xx-regs.h =================================================================== --- linux.orig/include/asm-arm/arch-ep93xx/ep93xx-regs.h +++ linux/include/asm-arm/arch-ep93xx/ep93xx-regs.h @@ -67,6 +67,12 @@ #define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88) #define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c) +#define EP93XX_TC_CLEAR 0x00000001 +#define EP93XX_TC123_ENABLE 0x00000080 +#define EP93XX_TC123_PERIODIC 0x00000040 +#define EP93XX_TC123_SEL_508KHZ 0x00000008 +#define EP93XX_TC4_ENABLE 0x00000100 + #define EP93XX_I2S_BASE (EP93XX_APB_VIRT_BASE + 0x00020000) #define EP93XX_SECURITY_BASE (EP93XX_APB_VIRT_BASE + 0x00030000) patches/preempt-realtime-x86_64.patch0000664000077200007720000003112410655544574016772 0ustar mingomingo arch/x86_64/kernel/early_printk.c | 2 +- arch/x86_64/kernel/head64.c | 6 +++++- arch/x86_64/kernel/i8259.c | 2 +- arch/x86_64/kernel/io_apic.c | 13 +++++++------ arch/x86_64/kernel/nmi.c | 2 ++ arch/x86_64/kernel/process.c | 23 +++++++++++++---------- arch/x86_64/kernel/signal.c | 7 +++++++ arch/x86_64/kernel/smp.c | 14 ++++++++++++-- arch/x86_64/kernel/traps.c | 5 ++--- include/asm-x86_64/acpi.h | 4 ++-- include/asm-x86_64/hw_irq.h | 2 +- include/asm-x86_64/io_apic.h | 2 +- include/asm-x86_64/spinlock.h | 6 +++--- include/asm-x86_64/tlbflush.h | 8 +++++++- include/asm-x86_64/vgtod.h | 2 +- 15 files changed, 65 insertions(+), 33 deletions(-) Index: linux-rt-rebase.q/arch/x86_64/kernel/early_printk.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/early_printk.c +++ linux-rt-rebase.q/arch/x86_64/kernel/early_printk.c @@ -203,7 +203,7 @@ static int early_console_initialized = 0 void early_printk(const char *fmt, ...) { - char buf[512]; + static char buf[512]; int n; va_list ap; Index: linux-rt-rebase.q/arch/x86_64/kernel/head64.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/head64.c +++ linux-rt-rebase.q/arch/x86_64/kernel/head64.c @@ -26,7 +26,11 @@ static void __init zap_identity_mappings { pgd_t *pgd = pgd_offset_k(0UL); pgd_clear(pgd); - __flush_tlb(); + /* + * preempt_disable/enable does not work this early in the + * bootup yet: + */ + write_cr3(read_cr3()); } /* Don't add a printk in there. printk relies on the PDA which is not initialized Index: linux-rt-rebase.q/arch/x86_64/kernel/i8259.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/i8259.c +++ linux-rt-rebase.q/arch/x86_64/kernel/i8259.c @@ -96,8 +96,8 @@ static void (*interrupt[NR_VECTORS - FIR */ static int i8259A_auto_eoi; -DEFINE_SPINLOCK(i8259A_lock); static void mask_and_ack_8259A(unsigned int); +DEFINE_RAW_SPINLOCK(i8259A_lock); static struct irq_chip i8259A_chip = { .name = "XT-PIC", Index: linux-rt-rebase.q/arch/x86_64/kernel/io_apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/io_apic.c +++ linux-rt-rebase.q/arch/x86_64/kernel/io_apic.c @@ -90,8 +90,8 @@ int timer_over_8254 __initdata = 1; /* Where if anywhere is the i8259 connect in external int mode */ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; -static DEFINE_SPINLOCK(ioapic_lock); -DEFINE_SPINLOCK(vector_lock); +static DEFINE_RAW_SPINLOCK(ioapic_lock); +DEFINE_RAW_SPINLOCK(vector_lock); /* * # of IRQ routing registers @@ -204,6 +204,9 @@ static inline void io_apic_sync(unsigned reg ACTION; \ io_apic_modify(entry->apic, reg); \ FINAL; \ + /* Force POST flush by reading: */ \ + reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \ + \ if (!entry->next) \ break; \ entry = irq_2_pin + entry->next; \ @@ -348,10 +351,8 @@ static void add_pin_to_irq(unsigned int static void name##_IO_APIC_irq (unsigned int irq) \ __DO_ACTION(R, ACTION, FINAL) -DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) ) - /* mask = 1 */ -DO_ACTION( __unmask, 0, &= 0xfffeffff, ) - /* mask = 0 */ +DO_ACTION( __mask, 0, |= 0x00010000, ) /* mask = 1 */ +DO_ACTION( __unmask, 0, &= 0xfffeffff, ) /* mask = 0 */ DO_ACTION( __pcix_mask, 0, &= 0xffff7fff, ) /* edge */ DO_ACTION( __pcix_unmask, 0, = (reg & 0xfffeffff) | 0x00008000, ) /* level */ Index: linux-rt-rebase.q/arch/x86_64/kernel/nmi.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/nmi.c +++ linux-rt-rebase.q/arch/x86_64/kernel/nmi.c @@ -70,7 +70,9 @@ static int endflag __initdata = 0; */ static __init void nmi_cpu_busy(void *data) { +#ifndef CONFIG_PREEMPT_RT local_irq_enable_in_hardirq(); +#endif /* Intentionally don't use cpu_relax here. This is to make sure that the performance counter really ticks, even if there is a simulator or similar that catches the Index: linux-rt-rebase.q/arch/x86_64/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/process.c +++ linux-rt-rebase.q/arch/x86_64/kernel/process.c @@ -117,7 +117,7 @@ static void default_idle(void) */ smp_mb(); local_irq_disable(); - if (!need_resched()) { + if (!need_resched() && !need_resched_delayed()) { /* Enables interrupts one instruction before HLT. x86 special cases this so there is no race. */ safe_halt(); @@ -203,7 +203,7 @@ void cpu_idle (void) current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ while (1) { - while (!need_resched()) { + while (!need_resched() && !need_resched_delayed()) { void (*idle)(void); if (__get_cpu_var(cpu_idle_state)) @@ -232,12 +232,14 @@ void cpu_idle (void) __exit_idle(); } - trace_preempt_exit_idle(); tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); + local_irq_disable(); + trace_preempt_exit_idle(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); trace_preempt_enter_idle(); + local_irq_enable(); } } @@ -253,10 +255,10 @@ void cpu_idle (void) */ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { - if (!need_resched()) { + if (!need_resched() && !need_resched_delayed()) { __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); - if (!need_resched()) + if (!need_resched() && !need_resched_delayed()) __mwait(eax, ecx); } } @@ -264,10 +266,10 @@ void mwait_idle_with_hints(unsigned long /* Default MONITOR/MWAIT with no hints, used for default C1 state */ static void mwait_idle(void) { - if (!need_resched()) { + if (!need_resched() && !need_resched_delayed()) { __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); - if (!need_resched()) { + if (!need_resched() && !need_resched_delayed()) { trace_hardirqs_on(); __sti_mwait(0, 0); } else @@ -386,7 +388,7 @@ void exit_thread(void) struct thread_struct *t = &me->thread; if (me->thread.io_bitmap_ptr) { - struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); + struct tss_struct *tss; kfree(t->io_bitmap_ptr); t->io_bitmap_ptr = NULL; @@ -394,6 +396,7 @@ void exit_thread(void) /* * Careful, clear this in the TSS too: */ + tss = &per_cpu(init_tss, get_cpu()); memset(tss->io_bitmap, 0xff, t->io_bitmap_max); t->io_bitmap_max = 0; put_cpu(); Index: linux-rt-rebase.q/arch/x86_64/kernel/signal.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/signal.c +++ linux-rt-rebase.q/arch/x86_64/kernel/signal.c @@ -396,6 +396,13 @@ static void do_signal(struct pt_regs *re int signr; sigset_t *oldset; +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif /* * We want the common case to go fast, which * is why we may in certain cases get here from Index: linux-rt-rebase.q/arch/x86_64/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/smp.c +++ linux-rt-rebase.q/arch/x86_64/kernel/smp.c @@ -56,7 +56,7 @@ union smp_flush_state { struct mm_struct *flush_mm; unsigned long flush_va; #define FLUSH_ALL -1ULL - spinlock_t tlbstate_lock; + raw_spinlock_t tlbstate_lock; }; char pad[SMP_CACHE_BYTES]; } ____cacheline_aligned; @@ -295,10 +295,20 @@ void smp_send_reschedule(int cpu) } /* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them: + */ +void smp_send_reschedule_allbutself(void) +{ + send_IPI_allbutself(RESCHEDULE_VECTOR); +} + +/* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */ -static DEFINE_SPINLOCK(call_lock); +static DEFINE_RAW_SPINLOCK(call_lock); struct call_data_struct { void (*func) (void *info); Index: linux-rt-rebase.q/arch/x86_64/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/traps.c +++ linux-rt-rebase.q/arch/x86_64/kernel/traps.c @@ -219,7 +219,7 @@ void dump_trace(struct task_struct *tsk, unsigned long *stack, struct stacktrace_ops *ops, void *data) { - const unsigned cpu = get_cpu(); + const unsigned cpu = raw_smp_processor_id(); unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; unsigned used = 0; struct thread_info *tinfo; @@ -310,7 +310,6 @@ void dump_trace(struct task_struct *tsk, tinfo = task_thread_info(tsk); HANDLE_STACK (valid_stack_ptr(tinfo, stack)); #undef HANDLE_STACK - put_cpu(); } EXPORT_SYMBOL(dump_trace); @@ -360,7 +359,7 @@ _show_stack(struct task_struct *tsk, str { unsigned long *stack; int i; - const int cpu = smp_processor_id(); + const int cpu = raw_smp_processor_id(); unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); Index: linux-rt-rebase.q/include/asm-x86_64/acpi.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/acpi.h +++ linux-rt-rebase.q/include/asm-x86_64/acpi.h @@ -51,8 +51,8 @@ #define ACPI_ASM_MACROS #define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable() -#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_DISABLE_IRQS() local_irq_disable_nort() +#define ACPI_ENABLE_IRQS() local_irq_enable_nort() #define ACPI_FLUSH_CPU_CACHE() wbinvd() int __acpi_acquire_global_lock(unsigned int *lock); Index: linux-rt-rebase.q/include/asm-x86_64/hw_irq.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/hw_irq.h +++ linux-rt-rebase.q/include/asm-x86_64/hw_irq.h @@ -118,7 +118,7 @@ void i8254_timer_resume(void); typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); extern void __setup_vector_irq(int cpu); -extern spinlock_t vector_lock; +extern raw_spinlock_t vector_lock; /* * Various low-level irq details needed by irq.c, process.c, Index: linux-rt-rebase.q/include/asm-x86_64/io_apic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/io_apic.h +++ linux-rt-rebase.q/include/asm-x86_64/io_apic.h @@ -125,6 +125,6 @@ extern int sis_apic_bug; /* dummy */ void enable_NMI_through_LVT0 (void * dummy); -extern spinlock_t i8259A_lock; +extern raw_spinlock_t i8259A_lock; #endif Index: linux-rt-rebase.q/include/asm-x86_64/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/spinlock.h +++ linux-rt-rebase.q/include/asm-x86_64/spinlock.h @@ -160,8 +160,8 @@ static inline void __raw_write_unlock(__ : "=m" (rw->lock) : : "memory"); } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define __raw_spin_relax(lock) cpu_relax() +#define __raw_read_relax(lock) cpu_relax() +#define __raw_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ Index: linux-rt-rebase.q/include/asm-x86_64/tlbflush.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/tlbflush.h +++ linux-rt-rebase.q/include/asm-x86_64/tlbflush.h @@ -8,14 +8,20 @@ static inline void __flush_tlb(void) { + preempt_disable(); write_cr3(read_cr3()); + preempt_enable(); } static inline void __flush_tlb_all(void) { - unsigned long cr4 = read_cr4(); + unsigned long cr4; + + preempt_disable(); + cr4 = read_cr4(); write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ write_cr4(cr4); /* write old PGE again and flush TLBs */ + preempt_enable(); } #define __flush_tlb_one(addr) \ Index: linux-rt-rebase.q/include/asm-x86_64/vgtod.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/vgtod.h +++ linux-rt-rebase.q/include/asm-x86_64/vgtod.h @@ -5,7 +5,7 @@ #include struct vsyscall_gtod_data { - seqlock_t lock; + raw_seqlock_t lock; /* open coded 'struct timespec' */ time_t wall_time_sec; patches/ppc-clockevents-fix.patch0000664000077200007720000001010610655544572016441 0ustar mingomingoFrom linux-kernel-owner@vger.kernel.org Thu May 24 20:24:54 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by mail.tglx.de (Postfix) with ESMTP id B0D2F65C3E9 for ; Thu, 24 May 2007 20:24:54 +0200 (CEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751886AbXEXSYQ (ORCPT ); Thu, 24 May 2007 14:24:16 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1750768AbXEXSYE (ORCPT ); Thu, 24 May 2007 14:24:04 -0400 Received: from gateway-1237.mvista.com ([63.81.120.155]:2175 "EHLO imap.sh.mvista.com" rhost-flags-OK-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1750741AbXEXSYD (ORCPT ); Thu, 24 May 2007 14:24:03 -0400 Received: from wasted.dev.rtsoft.ru (unknown [10.150.0.9]) by imap.sh.mvista.com (Postfix) with ESMTP id 767D13ECA; Thu, 24 May 2007 11:23:59 -0700 (PDT) From: Sergei Shtylyov Organization: MontaVista Software Inc. To: tglx@linutronix.de, mingo@elte.hu Subject: [PATCH 2.6.21-rt7] PowerPC: fix clockevents for classic CPUs Date: Thu, 24 May 2007 22:25:30 +0400 User-Agent: KMail/1.5 Cc: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org References: <200705172142.26739.sshtylyov@ru.mvista.com> In-Reply-To: <200705172142.26739.sshtylyov@ru.mvista.com> MIME-Version: 1.0 Content-Type: text/plain; charset="iso-8859-1" Content-Disposition: inline Message-Id: <200705242225.30225.sshtylyov@ru.mvista.com> Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org X-Filter-To: .Kernel.LKML X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Uncoditionally set a maximum positive value to the decrementer before calling an event handler for all "classic" PPC CPUs (although this is only necessary to clear interrupt on POWER4+, I've been asked to do it this way) -- otherwise it wouldn't have been done for an offline CPU in periodic mode since the event reprogramming has been delegated to the timer subsystem. Also, as the classic decrementer doesn't have periodic mode, make set_mode() method for this case completely empty. While at it, add a switch case for CLOCK_EVT_MODE_RESUME to hush the warning. Signed-off-by: Sergei Shtylyov --- Testing on "classic" CPUs is still needed (used to work atop of 2.6.18-rt7). arch/powerpc/kernel/time.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -167,11 +167,14 @@ static void decrementer_set_mode(enum cl case CLOCK_EVT_MODE_SHUTDOWN: tcr &= ~TCR_DIE; break; + case CLOCK_EVT_MODE_RESUME: + break; } mtspr(SPRN_TCR, tcr); -#endif + if (mode == CLOCK_EVT_MODE_PERIODIC) decrementer_set_next_event(tb_ticks_per_jiffy, dev); +#endif } static struct clock_event_device decrementer_clockevent = { @@ -571,16 +574,12 @@ void timer_interrupt(struct pt_regs * re irq_enter(); #ifdef CONFIG_GENERIC_CLOCKEVENTS -#ifdef CONFIG_PPC_MULTIPLATFORM +#if !defined(CONFIG_40x) && !defined(CONFIG_BOOKE) /* * We must write a positive value to the decrementer to clear - * the interrupt on the IBM 970 CPU series. In periodic mode, - * this happens when the decrementer gets reloaded later, but - * in one-shot mode, we have to do it here since an event handler - * may skip loading the new value... + * the interrupt on POWER4+ compatible CPUs. */ - if (per_cpu(decrementers, cpu).mode != CLOCK_EVT_MODE_PERIODIC) - set_dec(DECREMENTER_MAX); + set_dec(DECREMENTER_MAX); #endif /* * We can't disable the decrementer, so in the period between patches/arm-cmpxchg-support-armv6.patch0000664000077200007720000000317710655544573017541 0ustar mingomingo[PATCH -rt] cmpxchg support on ARMv6 Current rt patch don't support the cmpxchg on ARMv6. This patch supports cmpxchg in ARMv6. It's tested on OMAP2 (apollon board). Signed-off-by: Kyungmin Park p.s., Pleaes cc to me, I'm not subscriber on this mailing list. -- --- include/asm-arm/atomic.h | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) Index: linux-rt-rebase.q/include/asm-arm/atomic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/atomic.h +++ linux-rt-rebase.q/include/asm-arm/atomic.h @@ -114,6 +114,46 @@ static inline void atomic_clear_mask(uns : "cc"); } +/* + * Atomic compare and exchange. + */ +#define __HAVE_ARCH_CMPXCHG 1 + +extern unsigned long wrong_size_cmpxchg(volatile void *ptr); + +static inline unsigned long __cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + volatile unsigned long *p = ptr; + + if (size == 4) { + unsigned long oldval, res; + + do { + __asm__ __volatile__("@ atomic_cmpxchg\n" + "ldrex %1, [%2]\n" + "mov %0, #0\n" + "teq %1, %3\n" + "strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (p), "Ir" (old), "r" (new) + : "cc"); + } while (res); + + return oldval; + } else + return wrong_size_cmpxchg(ptr); +} + +#define cmpxchg(ptr,o,n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ +}) + #else /* ARM_ARCH_6 */ #include patches/nf_conntrack-weird-crash-fix.patch0000664000077200007720000000223110655544576020220 0ustar mingomingo--- net/netfilter/nf_conntrack_core.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) Index: linux-rt-rebase.q/net/netfilter/nf_conntrack_core.c =================================================================== --- linux-rt-rebase.q.orig/net/netfilter/nf_conntrack_core.c +++ linux-rt-rebase.q/net/netfilter/nf_conntrack_core.c @@ -1137,6 +1137,24 @@ int __init nf_conntrack_init(void) /* - and look it like as a confirmed connection */ set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); + /* + * There's something really weird (read: crash) going on in + * this module when lockdep and rt is enabled - the locks are + * not initialized in the per-CPU area properly - or they might + * be initialized by getting a copy of the first CPU's per-cpu + * area? Only seems to happen when things are modular. Maybe + * per-cpu-alloc does not zero buffers properly? Needs + * investigating. Reported and fixed by Mike. + */ +#if defined(CONFIG_NF_CONNTRACK_EVENTS) && defined(CONFIG_SMP) + { + int cpu; + + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu_lock(nf_conntrack_ecache, cpu)); + } +#endif + return ret; out_fini_expect: patches/ich-force-hpet-ich5-quirk-to-force-detect-enable-fix.patch0000664000077200007720000000264710655544570024334 0ustar mingomingoFrom: Andrew Morton arch/i386/kernel/quirks.c: In function 'old_ich_force_enable_hpet': arch/i386/kernel/quirks.c:196: warning: 'gen_cntl' is used uninitialized in this function arch/i386/kernel/quirks.c: In function 'force_hpet_resume': arch/i386/kernel/quirks.c:171: warning: 'gen_cntl' is used uninitialized in this function Cc: Andi Kleen Cc: Greg KH Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Venkatesh Pallipadi Cc: Venki Pallipadi Cc: john stultz Signed-off-by: Andrew Morton --- arch/i386/kernel/quirks.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) Index: linux/arch/i386/kernel/quirks.c =================================================================== --- linux.orig/arch/i386/kernel/quirks.c +++ linux/arch/i386/kernel/quirks.c @@ -161,7 +161,8 @@ static struct pci_dev *cached_dev; static void old_ich_force_hpet_resume(void) { - u32 val, gen_cntl; + u32 val; + u32 uninitialized_var(gen_cntl); if (!force_hpet_address || !cached_dev) return; @@ -182,7 +183,8 @@ static void old_ich_force_hpet_resume(vo static void old_ich_force_enable_hpet(struct pci_dev *dev) { - u32 val, gen_cntl; + u32 val; + u32 uninitialized_var(gen_cntl); if (hpet_address || force_hpet_address) return; patches/s_files.patch0000664000077200007720000003033010655544576014206 0ustar mingomingoSubject: remove global files_lock remove the global files_lock by reworking super_block and tty file lists. these are replaced by percpu_lists which are fine grain locked lists (lock_list) with a per cpu list head. Signed-off-by: Peter Zijlstra --- drivers/char/tty_io.c | 23 ++++++++++------------- fs/file_table.c | 37 +++++++++++++++++++------------------ fs/proc/generic.c | 9 ++++----- fs/super.c | 12 ++++++++---- include/linux/fs.h | 14 +++++++------- include/linux/tty.h | 2 +- security/selinux/hooks.c | 9 ++++++--- security/selinux/selinuxfs.c | 12 +++++++----- 8 files changed, 62 insertions(+), 56 deletions(-) Index: linux-rt-rebase.q/drivers/char/tty_io.c =================================================================== --- linux-rt-rebase.q.orig/drivers/char/tty_io.c +++ linux-rt-rebase.q/drivers/char/tty_io.c @@ -241,14 +241,13 @@ int tty_paranoia_check(struct tty_struct static int check_tty_count(struct tty_struct *tty, const char *routine) { #ifdef CHECK_TTY_COUNT - struct list_head *p; + struct file *filp; int count = 0; - - file_list_lock(); - list_for_each(p, &tty->tty_files) { + + percpu_list_fold(&tty->tty_files); + lock_list_for_each_entry(filp, percpu_list_head(&tty->tty_files), f_u.fu_llist) count++; - } - file_list_unlock(); + if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_SLAVE && tty->link && tty->link->count) @@ -1346,9 +1345,8 @@ static void do_tty_hangup(struct work_st spin_unlock(&redirect_lock); check_tty_count(tty, "do_tty_hangup"); - file_list_lock(); /* This breaks for file handles being sent over AF_UNIX sockets ? */ - list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) { + lock_list_for_each_entry(filp, percpu_list_head(&tty->tty_files), f_u.fu_llist) { if (filp->f_op->write == redirected_tty_write) cons_filp = filp; if (filp->f_op->write != tty_write) @@ -1357,7 +1355,6 @@ static void do_tty_hangup(struct work_st tty_fasync(-1, filp, 0); /* can't block */ filp->f_op = &hung_up_tty_fops; } - file_list_unlock(); /* FIXME! What are the locking issues here? This may me overdoing things.. * this question is especially important now that we've removed the irqlock. */ @@ -2240,9 +2237,9 @@ static void release_one_tty(struct tty_s tty->magic = 0; tty->driver->refcount--; - file_list_lock(); - list_del_init(&tty->tty_files); - file_list_unlock(); + percpu_list_fold(&tty->tty_files); + lock_list_del_init(percpu_list_head(&tty->tty_files)); + percpu_list_destroy(&tty->tty_files); free_tty_struct(tty); } @@ -3691,7 +3688,7 @@ static void initialize_tty_struct(struct mutex_init(&tty->atomic_read_lock); mutex_init(&tty->atomic_write_lock); spin_lock_init(&tty->read_lock); - INIT_LIST_HEAD(&tty->tty_files); + percpu_list_init(&tty->tty_files); INIT_WORK(&tty->SAK_work, do_SAK_work); } Index: linux-rt-rebase.q/fs/file_table.c =================================================================== --- linux-rt-rebase.q.orig/fs/file_table.c +++ linux-rt-rebase.q/fs/file_table.c @@ -28,9 +28,6 @@ struct files_stat_struct files_stat = { .max_files = NR_FILE }; -/* public. Not pretty! */ -__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); - static struct percpu_counter nr_files __cacheline_aligned_in_smp; static inline void file_free_rcu(struct rcu_head *head) @@ -112,7 +109,7 @@ struct file *get_empty_filp(void) goto fail_sec; tsk = current; - INIT_LIST_HEAD(&f->f_u.fu_list); + INIT_LOCK_LIST_HEAD(&f->f_u.fu_llist); atomic_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); f->f_uid = tsk->fsuid; @@ -244,32 +241,35 @@ void put_filp(struct file *file) } } -void file_move(struct file *file, struct list_head *list) +void file_move(struct file *file, struct percpu_list *list) { if (!list) return; - file_list_lock(); - list_move(&file->f_u.fu_list, list); - file_list_unlock(); + + file_kill(file); + percpu_list_add(list, &file->f_u.fu_llist); } void file_kill(struct file *file) { - if (!list_empty(&file->f_u.fu_list)) { - file_list_lock(); - list_del_init(&file->f_u.fu_list); - file_list_unlock(); + if (file && file->f_mapping && file->f_mapping->host) { + struct super_block *sb = file->f_mapping->host->i_sb; + if (sb) + synchronize_qrcu(&sb->s_qrcu); } + + lock_list_del_init(&file->f_u.fu_llist); } int fs_may_remount_ro(struct super_block *sb) { - struct list_head *p; + struct file *file; + int idx; /* Check that no files are currently opened for writing. */ - file_list_lock(); - list_for_each(p, &sb->s_files) { - struct file *file = list_entry(p, struct file, f_u.fu_list); + idx = qrcu_read_lock(&sb->s_qrcu); + percpu_list_fold(&sb->s_files); + lock_list_for_each_entry(file, percpu_list_head(&sb->s_files), f_u.fu_llist) { struct inode *inode = file->f_path.dentry->d_inode; /* File with pending delete? */ @@ -280,10 +280,11 @@ int fs_may_remount_ro(struct super_block if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) goto too_bad; } - file_list_unlock(); + qrcu_read_unlock(&sb->s_qrcu, idx); return 1; /* Tis' cool bro. */ too_bad: - file_list_unlock(); + lock_list_for_each_entry_stop(file, f_u.fu_llist); + qrcu_read_unlock(&sb->s_qrcu, idx); return 0; } Index: linux-rt-rebase.q/fs/proc/generic.c =================================================================== --- linux-rt-rebase.q.orig/fs/proc/generic.c +++ linux-rt-rebase.q/fs/proc/generic.c @@ -560,15 +560,13 @@ static int proc_register(struct proc_dir */ static void proc_kill_inodes(struct proc_dir_entry *de) { - struct list_head *p; + struct file *filp; struct super_block *sb = proc_mnt->mnt_sb; /* * Actually it's a partial revoke(). */ - file_list_lock(); - list_for_each(p, &sb->s_files) { - struct file * filp = list_entry(p, struct file, f_u.fu_list); + lock_list_for_each_entry(filp, percpu_list_head(&sb->s_files), f_u.fu_llist) { struct dentry * dentry = filp->f_path.dentry; struct inode * inode; const struct file_operations *fops; @@ -582,7 +580,6 @@ static void proc_kill_inodes(struct proc filp->f_op = NULL; fops_put(fops); } - file_list_unlock(); } static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent, @@ -725,6 +722,8 @@ void remove_proc_entry(const char *name, goto out; len = strlen(fn); + percpu_list_fold(&proc_mnt->mnt_sb->s_files); + spin_lock(&proc_subdir_lock); for (p = &parent->subdir; *p; p=&(*p)->next ) { if (!proc_match(len, fn, *p)) Index: linux-rt-rebase.q/fs/super.c =================================================================== --- linux-rt-rebase.q.orig/fs/super.c +++ linux-rt-rebase.q/fs/super.c @@ -67,7 +67,8 @@ static struct super_block *alloc_super(s } INIT_LIST_HEAD(&s->s_dirty); INIT_LIST_HEAD(&s->s_io); - INIT_LIST_HEAD(&s->s_files); + percpu_list_init(&s->s_files); + init_qrcu_struct(&s->s_qrcu); INIT_LIST_HEAD(&s->s_instances); INIT_HLIST_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); @@ -106,6 +107,7 @@ out: */ static inline void destroy_super(struct super_block *s) { + percpu_list_destroy(&s->s_files); security_sb_free(s); kfree(s->s_subtype); kfree(s); @@ -568,13 +570,15 @@ out: static void mark_files_ro(struct super_block *sb) { struct file *f; + int idx; - file_list_lock(); - list_for_each_entry(f, &sb->s_files, f_u.fu_list) { + idx = qrcu_read_lock(&sb->s_qrcu); + percpu_list_fold(&sb->s_files); + lock_list_for_each_entry(f, percpu_list_head(&sb->s_files), f_u.fu_llist) { if (S_ISREG(f->f_path.dentry->d_inode->i_mode) && file_count(f)) f->f_mode &= ~FMODE_WRITE; } - file_list_unlock(); + qrcu_read_unlock(&sb->s_qrcu, idx); } /** Index: linux-rt-rebase.q/include/linux/fs.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/fs.h +++ linux-rt-rebase.q/include/linux/fs.h @@ -278,6 +278,7 @@ extern int dir_notify_enable; #include #include #include +#include #include #include #include @@ -285,6 +286,7 @@ extern int dir_notify_enable; #include #include #include +#include #include #include @@ -720,11 +722,11 @@ static inline int ra_has_index(struct fi struct file { /* - * fu_list becomes invalid after file_free is called and queued via + * fu_llist becomes invalid after file_free is called and queued via * fu_rcuhead for RCU freeing */ union { - struct list_head fu_list; + struct lock_list_head fu_llist; struct rcu_head fu_rcuhead; } f_u; struct path f_path; @@ -753,9 +755,6 @@ struct file { #endif /* #ifdef CONFIG_EPOLL */ struct address_space *f_mapping; }; -extern spinlock_t files_lock; -#define file_list_lock() spin_lock(&files_lock); -#define file_list_unlock() spin_unlock(&files_lock); #define get_file(x) atomic_inc(&(x)->f_count) #define file_count(x) atomic_read(&(x)->f_count) @@ -949,7 +948,8 @@ struct super_block { struct list_head s_dirty; /* dirty inodes */ struct list_head s_io; /* parked for writeback */ struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ - struct list_head s_files; + struct percpu_list s_files; + struct qrcu_struct s_qrcu; struct block_device *s_bdev; struct mtd_info *s_mtd; @@ -1645,7 +1645,7 @@ static inline void insert_inode_hash(str } extern struct file * get_empty_filp(void); -extern void file_move(struct file *f, struct list_head *list); +extern void file_move(struct file *f, struct percpu_list *list); extern void file_kill(struct file *f); #ifdef CONFIG_BLOCK struct bio; Index: linux-rt-rebase.q/include/linux/tty.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/tty.h +++ linux-rt-rebase.q/include/linux/tty.h @@ -217,7 +217,7 @@ struct tty_struct { struct work_struct hangup_work; void *disc_data; void *driver_data; - struct list_head tty_files; + struct percpu_list tty_files; #define N_TTY_BUF_SIZE 4096 Index: linux-rt-rebase.q/security/selinux/hooks.c =================================================================== --- linux-rt-rebase.q.orig/security/selinux/hooks.c +++ linux-rt-rebase.q/security/selinux/hooks.c @@ -1743,8 +1743,11 @@ static inline void flush_unauthorized_fi mutex_lock(&tty_mutex); tty = get_current_tty(); if (tty) { - file_list_lock(); - file = list_entry(tty->tty_files.next, typeof(*file), f_u.fu_list); + lock_list_for_each_entry(file, + percpu_list_head(&tty->tty_files), + f_u.fu_llist) + break; + if (file) { /* Revalidate access to controlling tty. Use inode_has_perm on the tty inode directly rather @@ -1756,8 +1759,8 @@ static inline void flush_unauthorized_fi FILE__READ | FILE__WRITE, NULL)) { drop_tty = 1; } + lock_list_for_each_entry_stop(file, f_u.fu_llist); } - file_list_unlock(); } mutex_unlock(&tty_mutex); /* Reset controlling tty. */ Index: linux-rt-rebase.q/security/selinux/selinuxfs.c =================================================================== --- linux-rt-rebase.q.orig/security/selinux/selinuxfs.c +++ linux-rt-rebase.q/security/selinux/selinuxfs.c @@ -961,8 +961,10 @@ static const struct file_operations sel_ /* partial revoke() from fs/proc/generic.c proc_kill_inodes */ static void sel_remove_entries(struct dentry *de) { - struct list_head *p, *node; + struct list_head *node; + struct file *filp; struct super_block *sb = de->d_sb; + int idx; spin_lock(&dcache_lock); node = de->d_subdirs.next; @@ -983,9 +985,9 @@ static void sel_remove_entries(struct de spin_unlock(&dcache_lock); - file_list_lock(); - list_for_each(p, &sb->s_files) { - struct file * filp = list_entry(p, struct file, f_u.fu_list); + idx = qrcu_read_lock(&sb->s_qrcu); + percpu_list_fold(&sb->s_files); + lock_list_for_each_entry(filp, percpu_list_head(&sb->s_files), f_u.fu_llist) { struct dentry * dentry = filp->f_path.dentry; if (dentry->d_parent != de) { @@ -993,7 +995,7 @@ static void sel_remove_entries(struct de } filp->f_op = NULL; } - file_list_unlock(); + qrcu_read_unlock(&sb->s_qrcu, idx); } #define BOOL_DIR_NAME "booleans" patches/lock_list.patch0000664000077200007720000001363410655544576014555 0ustar mingomingoSubject: lock_list - a fine grain locked double linked list Provide a simple fine grain locked double link list. It build upon the regular double linked list primitives, spinlocks and RCU. In order to avoid deadlocks a prev -> next locking order is observed. This prevents reverse iteration. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lock_list.h | 74 +++++++++++++++++++++++++++++++ lib/Makefile | 2 lib/lock_list.c | 107 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 182 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/include/linux/lock_list.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/linux/lock_list.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2006, Red Hat, Inc., Peter Zijlstra + * Licenced under the GPLv2. + * + * Simple fine grain locked double linked list. + */ +#ifndef _LINUX_LOCK_LIST_H +#define _LINUX_LOCK_LIST_H + +#ifdef __KERNEL__ + +#include +#include +#include + +struct lock_list_head { + union { + struct list_head head; + struct { + struct lock_list_head *next, *prev; + }; + }; + spinlock_t lock; +}; + +enum { + LOCK_LIST_NESTING_PREV = 1, + LOCK_LIST_NESTING_CUR, + LOCK_LIST_NESTING_NEXT, +}; + +static inline void INIT_LOCK_LIST_HEAD(struct lock_list_head *list) +{ + INIT_LIST_HEAD(&list->head); + spin_lock_init(&list->lock); +} + +/* + * Passed pointers are assumed stable by external means (refcount, rcu) + */ +extern void lock_list_add(struct lock_list_head *new, + struct lock_list_head *list); +extern void lock_list_del_init(struct lock_list_head *entry); +extern void lock_list_splice_init(struct lock_list_head *list, + struct lock_list_head *head); + +struct lock_list_head *lock_list_next_entry(struct lock_list_head *list, + struct lock_list_head *entry); +struct lock_list_head *lock_list_first_entry(struct lock_list_head *list); + +#define lock_list_for_each_entry(pos, list, member) \ + for (pos = list_entry(lock_list_first_entry(list), \ + typeof(*pos), member); \ + pos; \ + pos = list_entry(lock_list_next_entry(list, &pos->member), \ + typeof(*pos), member)) + +/* + * to be used when iteration is terminated by breaking out of the + * lock_list_for_each_entry() loop. + * + * lock_list_for_each_entry(i, list, member) { + * if (cond) { + * lock_list_for_each_entry_stop(i, member); + * goto foo; + * } + * } + * + */ +#define lock_list_for_each_entry_stop(pos, member) \ + spin_unlock(&(pos->member.lock)) + +#endif /* __KERNEL__ */ +#endif /* _LINUX_LOCK_LIST_H */ Index: linux-rt-rebase.q/lib/Makefile =================================================================== --- linux-rt-rebase.q.orig/lib/Makefile +++ linux-rt-rebase.q/lib/Makefile @@ -3,7 +3,7 @@ # lib-y := ctype.o string.o vsprintf.o kasprintf.o cmdline.o \ - rbtree.o radix-tree.o dump_stack.o \ + rbtree.o radix-tree.o dump_stack.o lock_list.o \ idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o Index: linux-rt-rebase.q/lib/lock_list.c =================================================================== --- /dev/null +++ linux-rt-rebase.q/lib/lock_list.c @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2006, Red Hat, Inc., Peter Zijlstra + * Licenced under the GPLv2. + * + * Simple fine grain locked double linked list. + * + * Locking order is from prev -> next. + * Edges are locked not nodes; that is, cur->lock protects: + * - cur->next, + * - cur->next->prev. + * + * Passed pointers are assumed to be stable by external means such as + * refcounts or RCU. The individual list entries are assumed to be RCU + * freed (requirement of __lock_list_del). + */ + +#include + +void lock_list_add(struct lock_list_head *new, + struct lock_list_head *list) +{ + struct lock_list_head *next; + + spin_lock(&new->lock); + spin_lock_nested(&list->lock, LOCK_LIST_NESTING_PREV); + next = list->next; + __list_add(&new->head, &list->head, &next->head); + spin_unlock(&list->lock); + spin_unlock(&new->lock); +} + +static spinlock_t *__lock_list(struct lock_list_head *entry) +{ + struct lock_list_head *prev; + spinlock_t *lock = NULL; + +again: + prev = entry->prev; + if (prev == entry) + goto one; + spin_lock_nested(&prev->lock, LOCK_LIST_NESTING_PREV); + if (unlikely(entry->prev != prev)) { + /* + * we lost + */ + spin_unlock(&prev->lock); + goto again; + } + lock = &prev->lock; +one: + spin_lock_nested(&entry->lock, LOCK_LIST_NESTING_CUR); + return lock; +} + +void lock_list_del_init(struct lock_list_head *entry) +{ + spinlock_t *lock; + + rcu_read_lock(); + lock = __lock_list(entry); + list_del_init(&entry->head); + spin_unlock(&entry->lock); + if (lock) + spin_unlock(lock); + rcu_read_unlock(); +} + +void lock_list_splice_init(struct lock_list_head *list, + struct lock_list_head *head) +{ + spinlock_t *lock; + + rcu_read_lock(); + lock = __lock_list(list); + if (!list_empty(&list->head)) { + spin_lock_nested(&head->lock, LOCK_LIST_NESTING_NEXT); + __list_splice(&list->head, &head->head); + INIT_LIST_HEAD(&list->head); + spin_unlock(&head->lock); + } + spin_unlock(&list->lock); + if (lock) + spin_unlock(lock); + rcu_read_unlock(); +} + +struct lock_list_head *lock_list_next_entry(struct lock_list_head *list, + struct lock_list_head *entry) +{ + struct lock_list_head *next = entry->next; + if (likely(next != list)) { + lock_set_subclass(&entry->lock.dep_map, + LOCK_LIST_NESTING_CUR, _THIS_IP_); + spin_lock_nested(&next->lock, LOCK_LIST_NESTING_NEXT); + BUG_ON(entry->next != next); + } else + next = NULL; + spin_unlock(&entry->lock); + return next; +} + +struct lock_list_head *lock_list_first_entry(struct lock_list_head *list) +{ + spin_lock(&list->lock); + return lock_list_next_entry(list, list); +} + patches/cond_resched_softirq-WARN-fix.patch0000664000077200007720000000144510655544577020310 0ustar mingomingoSubject: [BUG RT] WARNING: at kernel/sched.c:5071 2.6.23-rc1-rt7 From: Steven Rostedt The below ifndef, shouldn't that be ifndef CONFIG_PREEMPT_SOFTIRQS ? I hit that warning while I was running !PREEMPT_RT but with both hard and softiqs as threads. --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -5130,7 +5130,7 @@ EXPORT_SYMBOL(__cond_resched_spinlock); */ int __sched cond_resched_softirq(void) { -#ifndef CONFIG_PREEMPT_RT +#ifndef CONFIG_PREEMPT_SOFTIRQS WARN_ON_ONCE(!in_softirq()); #endif if (need_resched() && system_state == SYSTEM_RUNNING) { patches/rcu-1.patch0000664000077200007720000013473710655544573013526 0ustar mingomingo This patch re-organizes the RCU code to enable multiple implementations of RCU. Users of RCU continues to include rcupdate.h and the RCU interfaces remain the same. This is in preparation for subsequently merging the preepmtpible RCU implementation. Signed-off-by: Dipankar Sarma --- --- include/linux/rcuclassic.h | 148 +++++++++++ include/linux/rcupdate.h | 154 +++--------- kernel/Makefile | 2 kernel/rcuclassic.c | 561 +++++++++++++++++++++++++++++++++++++++++++++ kernel/rcupdate.c | 558 ++------------------------------------------ 5 files changed, 782 insertions(+), 641 deletions(-) Index: linux/include/linux/rcuclassic.h =================================================================== --- /dev/null +++ linux/include/linux/rcuclassic.h @@ -0,0 +1,148 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (classic version) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2001 + * + * Author: Dipankar Sarma + * + * Based on the original work by Paul McKenney + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +#ifndef __LINUX_RCUCLASSIC_H +#define __LINUX_RCUCLASSIC_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include + + +/* Global control variables for rcupdate callback mechanism. */ +struct rcu_ctrlblk { + long cur; /* Current batch number. */ + long completed; /* Number of the last completed batch */ + int next_pending; /* Is the next batch already waiting? */ + + int signaled; + + spinlock_t lock ____cacheline_internodealigned_in_smp; + cpumask_t cpumask; /* CPUs that need to switch in order */ + /* for current batch to proceed. */ +} ____cacheline_internodealigned_in_smp; + +/* Is batch a before batch b ? */ +static inline int rcu_batch_before(long a, long b) +{ + return (a - b) < 0; +} + +/* Is batch a after batch b ? */ +static inline int rcu_batch_after(long a, long b) +{ + return (a - b) > 0; +} + +/* + * Per-CPU data for Read-Copy UPdate. + * nxtlist - new callbacks are added here + * curlist - current batch for which quiescent cycle started if any + */ +struct rcu_data { + /* 1) quiescent state handling : */ + long quiescbatch; /* Batch # for grace period */ + int passed_quiesc; /* User-mode/idle loop etc. */ + int qs_pending; /* core waits for quiesc state */ + + /* 2) batch handling */ + long batch; /* Batch # for current RCU batch */ + struct rcu_head *nxtlist; + struct rcu_head **nxttail; + long qlen; /* # of queued callbacks */ + struct rcu_head *curlist; + struct rcu_head **curtail; + struct rcu_head *donelist; + struct rcu_head **donetail; + long blimit; /* Upper limit on a processed batch */ + int cpu; +}; + +DECLARE_PER_CPU(struct rcu_data, rcu_data); +DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); + +/* + * Increment the quiescent state counter. + * The counter is a bit degenerated: We do not need to know + * how many quiescent states passed, just if there was at least + * one since the start of the grace period. Thus just a flag. + */ +static inline void rcu_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + rdp->passed_quiesc = 1; +} +static inline void rcu_bh_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); + rdp->passed_quiesc = 1; +} + +extern int rcu_pending(int cpu); +extern int rcu_needs_cpu(int cpu); + +#define __rcu_read_lock() \ + do { \ + preempt_disable(); \ + __acquire(RCU); \ + } while(0) +#define __rcu_read_unlock() \ + do { \ + __release(RCU); \ + preempt_enable(); \ + } while(0) + +#define __rcu_read_lock_bh() \ + do { \ + local_bh_disable(); \ + __acquire(RCU_BH); \ + } while(0) +#define __rcu_read_unlock_bh() \ + do { \ + __release(RCU_BH); \ + local_bh_enable(); \ + } while(0) + +#define __synchronize_sched() synchronize_rcu() + +extern void __rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); +extern void rcu_restart_cpu(int cpu); +extern long rcu_batches_completed(void); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_RCUCLASSIC_H */ Index: linux/include/linux/rcupdate.h =================================================================== --- linux.orig/include/linux/rcupdate.h +++ linux/include/linux/rcupdate.h @@ -1,5 +1,5 @@ /* - * Read-Copy Update mechanism for mutual exclusion + * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -18,8 +18,8 @@ * Copyright (C) IBM Corporation, 2001 * * Author: Dipankar Sarma - * - * Based on the original work by Paul McKenney + * + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf @@ -41,6 +41,7 @@ #include #include #include +#include /** * struct rcu_head - callback structure for use with RCU @@ -59,80 +60,6 @@ struct rcu_head { } while (0) - -/* Global control variables for rcupdate callback mechanism. */ -struct rcu_ctrlblk { - long cur; /* Current batch number. */ - long completed; /* Number of the last completed batch */ - int next_pending; /* Is the next batch already waiting? */ - - int signaled; - - spinlock_t lock ____cacheline_internodealigned_in_smp; - cpumask_t cpumask; /* CPUs that need to switch in order */ - /* for current batch to proceed. */ -} ____cacheline_internodealigned_in_smp; - -/* Is batch a before batch b ? */ -static inline int rcu_batch_before(long a, long b) -{ - return (a - b) < 0; -} - -/* Is batch a after batch b ? */ -static inline int rcu_batch_after(long a, long b) -{ - return (a - b) > 0; -} - -/* - * Per-CPU data for Read-Copy UPdate. - * nxtlist - new callbacks are added here - * curlist - current batch for which quiescent cycle started if any - */ -struct rcu_data { - /* 1) quiescent state handling : */ - long quiescbatch; /* Batch # for grace period */ - int passed_quiesc; /* User-mode/idle loop etc. */ - int qs_pending; /* core waits for quiesc state */ - - /* 2) batch handling */ - long batch; /* Batch # for current RCU batch */ - struct rcu_head *nxtlist; - struct rcu_head **nxttail; - long qlen; /* # of queued callbacks */ - struct rcu_head *curlist; - struct rcu_head **curtail; - struct rcu_head *donelist; - struct rcu_head **donetail; - long blimit; /* Upper limit on a processed batch */ - int cpu; - struct rcu_head barrier; -}; - -DECLARE_PER_CPU(struct rcu_data, rcu_data); -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); - -/* - * Increment the quiescent state counter. - * The counter is a bit degenerated: We do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period. Thus just a flag. - */ -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; -} -static inline void rcu_bh_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; -} - -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); - /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * @@ -162,22 +89,14 @@ extern int rcu_needs_cpu(int cpu); * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() \ - do { \ - preempt_disable(); \ - __acquire(RCU); \ - } while(0) +#define rcu_read_lock() __rcu_read_lock() /** * rcu_read_unlock - marks the end of an RCU read-side critical section. * * See rcu_read_lock() for more information. */ -#define rcu_read_unlock() \ - do { \ - __release(RCU); \ - preempt_enable(); \ - } while(0) +#define rcu_read_unlock() __rcu_read_unlock() /* * So where is rcu_write_lock()? It does not exist, as there is no @@ -200,22 +119,14 @@ extern int rcu_needs_cpu(int cpu); * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() \ - do { \ - local_bh_disable(); \ - __acquire(RCU_BH); \ - } while(0) +#define rcu_read_lock_bh() __rcu_read_lock_bh() -/* +/** * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() \ - do { \ - __release(RCU_BH); \ - local_bh_enable(); \ - } while(0) +#define rcu_read_unlock_bh() __rcu_read_unlock_bh() /** * rcu_dereference - fetch an RCU-protected pointer in an @@ -267,22 +178,49 @@ extern int rcu_needs_cpu(int cpu); * In "classic RCU", these two guarantees happen to be one and * the same, but can differ in realtime RCU implementations. */ -#define synchronize_sched() synchronize_rcu() - -extern void rcu_init(void); -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); -extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); +#define synchronize_sched() __synchronize_sched() -/* Exported interfaces */ -extern void FASTCALL(call_rcu(struct rcu_head *head, +/** + * call_rcu - Queue an RCU callback for invocation after a grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. + */ +extern void FASTCALL(call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head))); + + +/** + * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_bh() assumes + * that the read-side critical sections end on completion of a softirq + * handler. This means that read-side critical sections in process + * context must not be interrupted by softirqs. This interface is to be + * used when most of the read-side critical sections are in softirq context. + * RCU read-side critical sections are delimited by rcu_read_lock() and + * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() + * and rcu_read_unlock_bh(), if in process context. These may be nested. + */ extern void FASTCALL(call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head))); + +/* Exported common interfaces */ extern void synchronize_rcu(void); -void synchronize_idle(void); extern void rcu_barrier(void); +/* Internal to kernel */ +extern void rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); + #endif /* __KERNEL__ */ #endif /* __LINUX_RCUPDATE_H */ Index: linux/kernel/Makefile =================================================================== --- linux.orig/kernel/Makefile +++ linux/kernel/Makefile @@ -6,7 +6,7 @@ obj-y = sched.o fork.o exec_domain.o exit.o itimer.o time.o softirq.o resource.o \ sysctl.o capability.o ptrace.o timer.o user.o user_namespace.o \ signal.o sys.o kmod.o workqueue.o pid.o \ - rcupdate.o extable.o params.o posix-timers.o \ + rcupdate.o rcuclassic.o extable.o params.o posix-timers.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o \ utsname.o Index: linux/kernel/rcuclassic.c =================================================================== --- /dev/null +++ linux/kernel/rcuclassic.c @@ -0,0 +1,561 @@ +/* + * Read-Copy Update mechanism for mutual exclusion, classic implementation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2001 + * + * Authors: Dipankar Sarma + * Manfred Spraul + * + * Based on the original work by Paul McKenney + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * + * Papers: http://www.rdrop.com/users/paulmck/RCU + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU/ *.txt + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Definition for rcupdate control block. */ +static struct rcu_ctrlblk rcu_ctrlblk = { + .cur = -300, + .completed = -300, + .lock = SPIN_LOCK_UNLOCKED, + .cpumask = CPU_MASK_NONE, +}; +static struct rcu_ctrlblk rcu_bh_ctrlblk = { + .cur = -300, + .completed = -300, + .lock = SPIN_LOCK_UNLOCKED, + .cpumask = CPU_MASK_NONE, +}; + +DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; +DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; + +/* Fake initialization required by compiler */ +static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; +static int blimit = 10; +static int qhimark = 10000; +static int qlowmark = 100; + +#ifdef CONFIG_SMP +static void force_quiescent_state(struct rcu_data *rdp, + struct rcu_ctrlblk *rcp) +{ + int cpu; + cpumask_t cpumask; + set_need_resched(); + if (unlikely(!rcp->signaled)) { + rcp->signaled = 1; + /* + * Don't send IPI to itself. With irqs disabled, + * rdp->cpu is the current cpu. + */ + cpumask = rcp->cpumask; + cpu_clear(rdp->cpu, cpumask); + for_each_cpu_mask(cpu, cpumask) + smp_send_reschedule(cpu); + } +} +#else +static inline void force_quiescent_state(struct rcu_data *rdp, + struct rcu_ctrlblk *rcp) +{ + set_need_resched(); +} +#endif + +/* + * call_rcu - Queue an RCU callback for invocation after a grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. + */ +void fastcall call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) +{ + unsigned long flags; + struct rcu_data *rdp; + + head->func = func; + head->next = NULL; + local_irq_save(flags); + rdp = &__get_cpu_var(rcu_data); + *rdp->nxttail = head; + rdp->nxttail = &head->next; + if (unlikely(++rdp->qlen > qhimark)) { + rdp->blimit = INT_MAX; + force_quiescent_state(rdp, &rcu_ctrlblk); + } + local_irq_restore(flags); +} + +/* + * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_bh() assumes + * that the read-side critical sections end on completion of a softirq + * handler. This means that read-side critical sections in process + * context must not be interrupted by softirqs. This interface is to be + * used when most of the read-side critical sections are in softirq context. + * RCU read-side critical sections are delimited by rcu_read_lock() and + * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() + * and rcu_read_unlock_bh(), if in process context. These may be nested. + */ +void fastcall call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) +{ + unsigned long flags; + struct rcu_data *rdp; + + head->func = func; + head->next = NULL; + local_irq_save(flags); + rdp = &__get_cpu_var(rcu_bh_data); + *rdp->nxttail = head; + rdp->nxttail = &head->next; + + if (unlikely(++rdp->qlen > qhimark)) { + rdp->blimit = INT_MAX; + force_quiescent_state(rdp, &rcu_bh_ctrlblk); + } + + local_irq_restore(flags); +} + +/* + * Return the number of RCU batches processed thus far. Useful + * for debug and statistics. + */ +long rcu_batches_completed(void) +{ + return rcu_ctrlblk.completed; +} + +/* + * Return the number of RCU batches processed thus far. Useful + * for debug and statistics. + */ +long rcu_batches_completed_bh(void) +{ + return rcu_bh_ctrlblk.completed; +} + +/* + * Invoke the completed RCU callbacks. They are expected to be in + * a per-cpu list. + */ +static void rcu_do_batch(struct rcu_data *rdp) +{ + struct rcu_head *next, *list; + int count = 0; + + list = rdp->donelist; + while (list) { + next = list->next; + prefetch(next); + list->func(list); + list = next; + if (++count >= rdp->blimit) + break; + } + rdp->donelist = list; + + local_irq_disable(); + rdp->qlen -= count; + local_irq_enable(); + if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) + rdp->blimit = blimit; + + if (!rdp->donelist) + rdp->donetail = &rdp->donelist; + else + tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); +} + +/* + * Grace period handling: + * The grace period handling consists out of two steps: + * - A new grace period is started. + * This is done by rcu_start_batch. The start is not broadcasted to + * all cpus, they must pick this up by comparing rcp->cur with + * rdp->quiescbatch. All cpus are recorded in the + * rcu_ctrlblk.cpumask bitmap. + * - All cpus must go through a quiescent state. + * Since the start of the grace period is not broadcasted, at least two + * calls to rcu_check_quiescent_state are required: + * The first call just notices that a new grace period is running. The + * following calls check if there was a quiescent state since the beginning + * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If + * the bitmap is empty, then the grace period is completed. + * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace + * period (if necessary). + */ +/* + * Register a new batch of callbacks, and start it up if there is currently no + * active batch and the batch to be registered has not already occurred. + * Caller must hold rcu_ctrlblk.lock. + */ +static void rcu_start_batch(struct rcu_ctrlblk *rcp) +{ + if (rcp->next_pending && + rcp->completed == rcp->cur) { + rcp->next_pending = 0; + /* + * next_pending == 0 must be visible in + * __rcu_process_callbacks() before it can see new value of cur. + */ + smp_wmb(); + rcp->cur++; + + /* + * Accessing nohz_cpu_mask before incrementing rcp->cur needs a + * Barrier Otherwise it can cause tickless idle CPUs to be + * included in rcp->cpumask, which will extend graceperiods + * unnecessarily. + */ + smp_mb(); + cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); + + rcp->signaled = 0; + } +} + +/* + * cpu went through a quiescent state since the beginning of the grace period. + * Clear it from the cpu mask and complete the grace period if it was the last + * cpu. Start another grace period if someone has further entries pending + */ +static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) +{ + cpu_clear(cpu, rcp->cpumask); + if (cpus_empty(rcp->cpumask)) { + /* batch completed ! */ + rcp->completed = rcp->cur; + rcu_start_batch(rcp); + } +} + +/* + * Check if the cpu has gone through a quiescent state (say context + * switch). If so and if it already hasn't done so in this RCU + * quiescent cycle, then indicate that it has done so. + */ +static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) +{ + if (rdp->quiescbatch != rcp->cur) { + /* start new grace period: */ + rdp->qs_pending = 1; + rdp->passed_quiesc = 0; + rdp->quiescbatch = rcp->cur; + return; + } + + /* Grace period already completed for this cpu? + * qs_pending is checked instead of the actual bitmap to avoid + * cacheline trashing. + */ + if (!rdp->qs_pending) + return; + + /* + * Was there a quiescent state since the beginning of the grace + * period? If no, then exit and wait for the next call. + */ + if (!rdp->passed_quiesc) + return; + rdp->qs_pending = 0; + + spin_lock(&rcp->lock); + /* + * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync + * during cpu startup. Ignore the quiescent state. + */ + if (likely(rdp->quiescbatch == rcp->cur)) + cpu_quiet(rdp->cpu, rcp); + + spin_unlock(&rcp->lock); +} + + +#ifdef CONFIG_HOTPLUG_CPU + +/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing + * locking requirements, the list it's pulling from has to belong to a cpu + * which is dead and hence not processing interrupts. + */ +static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, + struct rcu_head **tail) +{ + local_irq_disable(); + *this_rdp->nxttail = list; + if (list) + this_rdp->nxttail = tail; + local_irq_enable(); +} + +static void __rcu_offline_cpu(struct rcu_data *this_rdp, + struct rcu_ctrlblk *rcp, struct rcu_data *rdp) +{ + /* if the cpu going offline owns the grace period + * we can block indefinitely waiting for it, so flush + * it here + */ + spin_lock_bh(&rcp->lock); + if (rcp->cur != rcp->completed) + cpu_quiet(rdp->cpu, rcp); + spin_unlock_bh(&rcp->lock); + rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); + rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); + rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); +} + +static void rcu_offline_cpu(int cpu) +{ + struct rcu_data *this_rdp = &get_cpu_var(rcu_data); + struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); + + __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, + &per_cpu(rcu_data, cpu)); + __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, + &per_cpu(rcu_bh_data, cpu)); + put_cpu_var(rcu_data); + put_cpu_var(rcu_bh_data); + tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu); +} + +#else + +static void rcu_offline_cpu(int cpu) +{ +} + +#endif + +/* + * This does the RCU processing work from tasklet context. + */ +static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) +{ + if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { + *rdp->donetail = rdp->curlist; + rdp->donetail = rdp->curtail; + rdp->curlist = NULL; + rdp->curtail = &rdp->curlist; + } + + if (rdp->nxtlist && !rdp->curlist) { + local_irq_disable(); + rdp->curlist = rdp->nxtlist; + rdp->curtail = rdp->nxttail; + rdp->nxtlist = NULL; + rdp->nxttail = &rdp->nxtlist; + local_irq_enable(); + + /* + * start the next batch of callbacks + */ + + /* determine batch number */ + rdp->batch = rcp->cur + 1; + /* see the comment and corresponding wmb() in + * the rcu_start_batch() + */ + smp_rmb(); + + if (!rcp->next_pending) { + /* and start it/schedule start if it's a new batch */ + spin_lock(&rcp->lock); + rcp->next_pending = 1; + rcu_start_batch(rcp); + spin_unlock(&rcp->lock); + } + } + + rcu_check_quiescent_state(rcp, rdp); + if (rdp->donelist) + rcu_do_batch(rdp); +} + +static void rcu_process_callbacks(unsigned long unused) +{ + __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); + __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); +} + +static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) +{ + /* This cpu has pending rcu entries and the grace period + * for them has completed. + */ + if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) + return 1; + + /* This cpu has no pending entries, but there are new entries */ + if (!rdp->curlist && rdp->nxtlist) + return 1; + + /* This cpu has finished callbacks to invoke */ + if (rdp->donelist) + return 1; + + /* The rcu core waits for a quiescent state from the cpu */ + if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) + return 1; + + /* nothing to do */ + return 0; +} + +/* + * Check to see if there is any immediate RCU-related work to be done + * by the current CPU, returning 1 if so. This function is part of the + * RCU implementation; it is -not- an exported member of the RCU API. + */ +int rcu_pending(int cpu) +{ + return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || + __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); +} + +/* + * Check to see if any future RCU-related work will need to be done + * by the current CPU, even if none need be done immediately, returning + * 1 if so. This function is part of the RCU implementation; it is -not- + * an exported member of the RCU API. + */ +int rcu_needs_cpu(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); + + return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); +} + +void rcu_check_callbacks(int cpu, int user) +{ + if (user || + (idle_cpu(cpu) && !in_softirq() && + hardirq_count() <= (1 << HARDIRQ_SHIFT))) { + rcu_qsctr_inc(cpu); + rcu_bh_qsctr_inc(cpu); + } else if (!in_softirq()) + rcu_bh_qsctr_inc(cpu); + tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); +} + +static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) +{ + memset(rdp, 0, sizeof(*rdp)); + rdp->curtail = &rdp->curlist; + rdp->nxttail = &rdp->nxtlist; + rdp->donetail = &rdp->donelist; + rdp->quiescbatch = rcp->completed; + rdp->qs_pending = 0; + rdp->cpu = cpu; + rdp->blimit = blimit; +} + +static void __devinit rcu_online_cpu(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); + + rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); + rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); + tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); +} + +static int __devinit rcu_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + rcu_online_cpu(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + rcu_offline_cpu(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __devinitdata rcu_nb = { + .notifier_call = rcu_cpu_notify, +}; + +/* + * Initializes rcu mechanism. Assumed to be called early. + * That is before local timer(SMP) or jiffie timer (uniproc) is setup. + * Note that rcu_qsctr and friends are implicitly + * initialized due to the choice of ``0'' for RCU_CTR_INVALID. + */ +void __init __rcu_init(void) +{ + rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + /* Register notifier for non-boot CPUs */ + register_cpu_notifier(&rcu_nb); +} + +module_param(blimit, int, 0); +module_param(qhimark, int, 0); +module_param(qlowmark, int, 0); + +EXPORT_SYMBOL_GPL(rcu_batches_completed); +EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); +EXPORT_SYMBOL_GPL(call_rcu); +EXPORT_SYMBOL_GPL(call_rcu_bh); Index: linux/kernel/rcupdate.c =================================================================== --- linux.orig/kernel/rcupdate.c +++ linux/kernel/rcupdate.c @@ -19,7 +19,7 @@ * * Authors: Dipankar Sarma * Manfred Spraul - * + * * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: @@ -40,152 +40,53 @@ #include #include #include -#include #include -#include #include -#include -#include #include #include +#include -/* Definition for rcupdate control block. */ -static struct rcu_ctrlblk rcu_ctrlblk = { - .cur = -300, - .completed = -300, - .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), - .cpumask = CPU_MASK_NONE, -}; -static struct rcu_ctrlblk rcu_bh_ctrlblk = { - .cur = -300, - .completed = -300, - .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), - .cpumask = CPU_MASK_NONE, +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; }; -DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; -DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; - -/* Fake initialization required by compiler */ -static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; -static int blimit = 10; -static int qhimark = 10000; -static int qlowmark = 100; - +static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head); static atomic_t rcu_barrier_cpu_count; static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; -#ifdef CONFIG_SMP -static void force_quiescent_state(struct rcu_data *rdp, - struct rcu_ctrlblk *rcp) -{ - int cpu; - cpumask_t cpumask; - set_need_resched(); - if (unlikely(!rcp->signaled)) { - rcp->signaled = 1; - /* - * Don't send IPI to itself. With irqs disabled, - * rdp->cpu is the current cpu. - */ - cpumask = rcp->cpumask; - cpu_clear(rdp->cpu, cpumask); - for_each_cpu_mask(cpu, cpumask) - smp_send_reschedule(cpu); - } -} -#else -static inline void force_quiescent_state(struct rcu_data *rdp, - struct rcu_ctrlblk *rcp) +/* Because of FASTCALL declaration of complete, we use this wrapper */ +static void wakeme_after_rcu(struct rcu_head *head) { - set_need_resched(); + struct rcu_synchronize *rcu; + + rcu = container_of(head, struct rcu_synchronize, head); + complete(&rcu->completion); } -#endif /** - * call_rcu - Queue an RCU callback for invocation after a grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period + * synchronize_rcu - wait until a grace period has elapsed. * - * The update function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU + * Control will return to the caller some time after a full grace + * period has elapsed, in other words after all currently executing RCU * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. - */ -void fastcall call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)) -{ - unsigned long flags; - struct rcu_data *rdp; - - head->func = func; - head->next = NULL; - local_irq_save(flags); - rdp = &__get_cpu_var(rcu_data); - *rdp->nxttail = head; - rdp->nxttail = &head->next; - if (unlikely(++rdp->qlen > qhimark)) { - rdp->blimit = INT_MAX; - force_quiescent_state(rdp, &rcu_ctrlblk); - } - local_irq_restore(flags); -} - -/** - * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period * - * The update function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_bh() assumes - * that the read-side critical sections end on completion of a softirq - * handler. This means that read-side critical sections in process - * context must not be interrupted by softirqs. This interface is to be - * used when most of the read-side critical sections are in softirq context. - * RCU read-side critical sections are delimited by rcu_read_lock() and - * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() - * and rcu_read_unlock_bh(), if in process context. These may be nested. + * If your read-side code is not protected by rcu_read_lock(), do -not- + * use synchronize_rcu(). */ -void fastcall call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)) +void synchronize_rcu(void) { - unsigned long flags; - struct rcu_data *rdp; - - head->func = func; - head->next = NULL; - local_irq_save(flags); - rdp = &__get_cpu_var(rcu_bh_data); - *rdp->nxttail = head; - rdp->nxttail = &head->next; - - if (unlikely(++rdp->qlen > qhimark)) { - rdp->blimit = INT_MAX; - force_quiescent_state(rdp, &rcu_bh_ctrlblk); - } - - local_irq_restore(flags); -} + struct rcu_synchronize rcu; -/* - * Return the number of RCU batches processed thus far. Useful - * for debug and statistics. - */ -long rcu_batches_completed(void) -{ - return rcu_ctrlblk.completed; -} + init_completion(&rcu.completion); + /* Will wake me after RCU finished */ + call_rcu(&rcu.head, wakeme_after_rcu); -/* - * Return the number of RCU batches processed thus far. Useful - * for debug and statistics. - */ -long rcu_batches_completed_bh(void) -{ - return rcu_bh_ctrlblk.completed; + /* Wait for it */ + wait_for_completion(&rcu.completion); } static void rcu_barrier_callback(struct rcu_head *notused) @@ -200,10 +101,8 @@ static void rcu_barrier_callback(struct static void rcu_barrier_func(void *notused) { int cpu = smp_processor_id(); - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - struct rcu_head *head; + struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); - head = &rdp->barrier; atomic_inc(&rcu_barrier_cpu_count); call_rcu(head, rcu_barrier_callback); } @@ -224,414 +123,9 @@ void rcu_barrier(void) } EXPORT_SYMBOL_GPL(rcu_barrier); -/* - * Invoke the completed RCU callbacks. They are expected to be in - * a per-cpu list. - */ -static void rcu_do_batch(struct rcu_data *rdp) -{ - struct rcu_head *next, *list; - int count = 0; - - list = rdp->donelist; - while (list) { - next = list->next; - prefetch(next); - list->func(list); - list = next; - if (++count >= rdp->blimit) - break; - } - rdp->donelist = list; - - local_irq_disable(); - rdp->qlen -= count; - local_irq_enable(); - if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) - rdp->blimit = blimit; - - if (!rdp->donelist) - rdp->donetail = &rdp->donelist; - else - tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); -} - -/* - * Grace period handling: - * The grace period handling consists out of two steps: - * - A new grace period is started. - * This is done by rcu_start_batch. The start is not broadcasted to - * all cpus, they must pick this up by comparing rcp->cur with - * rdp->quiescbatch. All cpus are recorded in the - * rcu_ctrlblk.cpumask bitmap. - * - All cpus must go through a quiescent state. - * Since the start of the grace period is not broadcasted, at least two - * calls to rcu_check_quiescent_state are required: - * The first call just notices that a new grace period is running. The - * following calls check if there was a quiescent state since the beginning - * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If - * the bitmap is empty, then the grace period is completed. - * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace - * period (if necessary). - */ -/* - * Register a new batch of callbacks, and start it up if there is currently no - * active batch and the batch to be registered has not already occurred. - * Caller must hold rcu_ctrlblk.lock. - */ -static void rcu_start_batch(struct rcu_ctrlblk *rcp) -{ - if (rcp->next_pending && - rcp->completed == rcp->cur) { - rcp->next_pending = 0; - /* - * next_pending == 0 must be visible in - * __rcu_process_callbacks() before it can see new value of cur. - */ - smp_wmb(); - rcp->cur++; - - /* - * Accessing nohz_cpu_mask before incrementing rcp->cur needs a - * Barrier Otherwise it can cause tickless idle CPUs to be - * included in rcp->cpumask, which will extend graceperiods - * unnecessarily. - */ - smp_mb(); - cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); - - rcp->signaled = 0; - } -} - -/* - * cpu went through a quiescent state since the beginning of the grace period. - * Clear it from the cpu mask and complete the grace period if it was the last - * cpu. Start another grace period if someone has further entries pending - */ -static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) -{ - cpu_clear(cpu, rcp->cpumask); - if (cpus_empty(rcp->cpumask)) { - /* batch completed ! */ - rcp->completed = rcp->cur; - rcu_start_batch(rcp); - } -} - -/* - * Check if the cpu has gone through a quiescent state (say context - * switch). If so and if it already hasn't done so in this RCU - * quiescent cycle, then indicate that it has done so. - */ -static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, - struct rcu_data *rdp) -{ - if (rdp->quiescbatch != rcp->cur) { - /* start new grace period: */ - rdp->qs_pending = 1; - rdp->passed_quiesc = 0; - rdp->quiescbatch = rcp->cur; - return; - } - - /* Grace period already completed for this cpu? - * qs_pending is checked instead of the actual bitmap to avoid - * cacheline trashing. - */ - if (!rdp->qs_pending) - return; - - /* - * Was there a quiescent state since the beginning of the grace - * period? If no, then exit and wait for the next call. - */ - if (!rdp->passed_quiesc) - return; - rdp->qs_pending = 0; - - spin_lock(&rcp->lock); - /* - * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync - * during cpu startup. Ignore the quiescent state. - */ - if (likely(rdp->quiescbatch == rcp->cur)) - cpu_quiet(rdp->cpu, rcp); - - spin_unlock(&rcp->lock); -} - - -#ifdef CONFIG_HOTPLUG_CPU - -/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing - * locking requirements, the list it's pulling from has to belong to a cpu - * which is dead and hence not processing interrupts. - */ -static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, - struct rcu_head **tail) -{ - local_irq_disable(); - *this_rdp->nxttail = list; - if (list) - this_rdp->nxttail = tail; - local_irq_enable(); -} - -static void __rcu_offline_cpu(struct rcu_data *this_rdp, - struct rcu_ctrlblk *rcp, struct rcu_data *rdp) -{ - /* if the cpu going offline owns the grace period - * we can block indefinitely waiting for it, so flush - * it here - */ - spin_lock_bh(&rcp->lock); - if (rcp->cur != rcp->completed) - cpu_quiet(rdp->cpu, rcp); - spin_unlock_bh(&rcp->lock); - rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); - rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); - rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); -} - -static void rcu_offline_cpu(int cpu) -{ - struct rcu_data *this_rdp = &get_cpu_var(rcu_data); - struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); - - __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, - &per_cpu(rcu_data, cpu)); - __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, - &per_cpu(rcu_bh_data, cpu)); - put_cpu_var(rcu_data); - put_cpu_var(rcu_bh_data); - tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu); -} - -#else - -static void rcu_offline_cpu(int cpu) -{ -} - -#endif - -/* - * This does the RCU processing work from tasklet context. - */ -static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, - struct rcu_data *rdp) -{ - if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { - *rdp->donetail = rdp->curlist; - rdp->donetail = rdp->curtail; - rdp->curlist = NULL; - rdp->curtail = &rdp->curlist; - } - - if (rdp->nxtlist && !rdp->curlist) { - local_irq_disable(); - rdp->curlist = rdp->nxtlist; - rdp->curtail = rdp->nxttail; - rdp->nxtlist = NULL; - rdp->nxttail = &rdp->nxtlist; - local_irq_enable(); - - /* - * start the next batch of callbacks - */ - - /* determine batch number */ - rdp->batch = rcp->cur + 1; - /* see the comment and corresponding wmb() in - * the rcu_start_batch() - */ - smp_rmb(); - - if (!rcp->next_pending) { - /* and start it/schedule start if it's a new batch */ - spin_lock(&rcp->lock); - rcp->next_pending = 1; - rcu_start_batch(rcp); - spin_unlock(&rcp->lock); - } - } - - rcu_check_quiescent_state(rcp, rdp); - if (rdp->donelist) - rcu_do_batch(rdp); -} - -static void rcu_process_callbacks(unsigned long unused) -{ - __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); - __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); -} - -static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) -{ - /* This cpu has pending rcu entries and the grace period - * for them has completed. - */ - if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) - return 1; - - /* This cpu has no pending entries, but there are new entries */ - if (!rdp->curlist && rdp->nxtlist) - return 1; - - /* This cpu has finished callbacks to invoke */ - if (rdp->donelist) - return 1; - - /* The rcu core waits for a quiescent state from the cpu */ - if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) - return 1; - - /* nothing to do */ - return 0; -} - -/* - * Check to see if there is any immediate RCU-related work to be done - * by the current CPU, returning 1 if so. This function is part of the - * RCU implementation; it is -not- an exported member of the RCU API. - */ -int rcu_pending(int cpu) -{ - return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || - __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); -} - -/* - * Check to see if any future RCU-related work will need to be done - * by the current CPU, even if none need be done immediately, returning - * 1 if so. This function is part of the RCU implementation; it is -not- - * an exported member of the RCU API. - */ -int rcu_needs_cpu(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); - - return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); -} - -void rcu_check_callbacks(int cpu, int user) -{ - if (user || - (idle_cpu(cpu) && !in_softirq() && - hardirq_count() <= (1 << HARDIRQ_SHIFT))) { - rcu_qsctr_inc(cpu); - rcu_bh_qsctr_inc(cpu); - } else if (!in_softirq()) - rcu_bh_qsctr_inc(cpu); - tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); -} - -static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, - struct rcu_data *rdp) -{ - memset(rdp, 0, sizeof(*rdp)); - rdp->curtail = &rdp->curlist; - rdp->nxttail = &rdp->nxtlist; - rdp->donetail = &rdp->donelist; - rdp->quiescbatch = rcp->completed; - rdp->qs_pending = 0; - rdp->cpu = cpu; - rdp->blimit = blimit; -} - -static void __devinit rcu_online_cpu(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); - - rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); - rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); - tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); -} - -static int __cpuinit rcu_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - long cpu = (long)hcpu; - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - rcu_online_cpu(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - rcu_offline_cpu(cpu); - break; - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata rcu_nb = { - .notifier_call = rcu_cpu_notify, -}; - -/* - * Initializes rcu mechanism. Assumed to be called early. - * That is before local timer(SMP) or jiffie timer (uniproc) is setup. - * Note that rcu_qsctr and friends are implicitly - * initialized due to the choice of ``0'' for RCU_CTR_INVALID. - */ void __init rcu_init(void) { - rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, - (void *)(long)smp_processor_id()); - /* Register notifier for non-boot CPUs */ - register_cpu_notifier(&rcu_nb); -} - -struct rcu_synchronize { - struct rcu_head head; - struct completion completion; -}; - -/* Because of FASTCALL declaration of complete, we use this wrapper */ -static void wakeme_after_rcu(struct rcu_head *head) -{ - struct rcu_synchronize *rcu; - - rcu = container_of(head, struct rcu_synchronize, head); - complete(&rcu->completion); -} - -/** - * synchronize_rcu - wait until a grace period has elapsed. - * - * Control will return to the caller some time after a full grace - * period has elapsed, in other words after all currently executing RCU - * read-side critical sections have completed. RCU read-side critical - * sections are delimited by rcu_read_lock() and rcu_read_unlock(), - * and may be nested. - * - * If your read-side code is not protected by rcu_read_lock(), do -not- - * use synchronize_rcu(). - */ -void synchronize_rcu(void) -{ - struct rcu_synchronize rcu; - - init_completion(&rcu.completion); - /* Will wake me after RCU finished */ - call_rcu(&rcu.head, wakeme_after_rcu); - - /* Wait for it */ - wait_for_completion(&rcu.completion); + __rcu_init(); } -module_param(blimit, int, 0); -module_param(qhimark, int, 0); -module_param(qlowmark, int, 0); -EXPORT_SYMBOL_GPL(rcu_batches_completed); -EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); -EXPORT_SYMBOL_GPL(call_rcu); -EXPORT_SYMBOL_GPL(call_rcu_bh); EXPORT_SYMBOL_GPL(synchronize_rcu); patches/preempt-realtime-mm.patch0000664000077200007720000001700010655544575016443 0ustar mingomingo--- include/linux/pagevec.h | 2 +- include/linux/vmstat.h | 10 ++++++++++ mm/bounce.c | 4 ++-- mm/memory.c | 11 +++++++++-- mm/mmap.c | 10 ++++++++-- mm/vmscan.c | 10 ++++++++-- mm/vmstat.c | 38 ++++++++++++++++++++++++++++++++------ 7 files changed, 70 insertions(+), 15 deletions(-) Index: linux-rt-rebase.q/include/linux/pagevec.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/pagevec.h +++ linux-rt-rebase.q/include/linux/pagevec.h @@ -9,7 +9,7 @@ #define _LINUX_PAGEVEC_H /* 14 pointers + two long's align the pagevec structure to a power of two */ -#define PAGEVEC_SIZE 14 +#define PAGEVEC_SIZE 8 struct page; struct address_space; Index: linux-rt-rebase.q/include/linux/vmstat.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/vmstat.h +++ linux-rt-rebase.q/include/linux/vmstat.h @@ -59,7 +59,12 @@ DECLARE_PER_CPU(struct vm_event_state, v static inline void __count_vm_event(enum vm_event_item item) { +#ifdef CONFIG_PREEMPT_RT + get_cpu_var(vm_event_states).event[item]++; + put_cpu(); +#else __get_cpu_var(vm_event_states).event[item]++; +#endif } static inline void count_vm_event(enum vm_event_item item) @@ -70,7 +75,12 @@ static inline void count_vm_event(enum v static inline void __count_vm_events(enum vm_event_item item, long delta) { +#ifdef CONFIG_PREEMPT_RT + get_cpu_var(vm_event_states).event[item] += delta; + put_cpu(); +#else __get_cpu_var(vm_event_states).event[item] += delta; +#endif } static inline void count_vm_events(enum vm_event_item item, long delta) Index: linux-rt-rebase.q/mm/bounce.c =================================================================== --- linux-rt-rebase.q.orig/mm/bounce.c +++ linux-rt-rebase.q/mm/bounce.c @@ -48,11 +48,11 @@ static void bounce_copy_vec(struct bio_v unsigned long flags; unsigned char *vto; - local_irq_save(flags); + local_irq_save_nort(flags); vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); memcpy(vto + to->bv_offset, vfrom, to->bv_len); kunmap_atomic(vto, KM_BOUNCE_READ); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #else /* CONFIG_HIGHMEM */ Index: linux-rt-rebase.q/mm/memory.c =================================================================== --- linux-rt-rebase.q.orig/mm/memory.c +++ linux-rt-rebase.q/mm/memory.c @@ -281,7 +281,9 @@ void free_pgtables(struct mmu_gather **t if (!vma) /* Sometimes when exiting after an oops */ return; +#ifndef CONFIG_PREEMPT_RT if (vma->vm_next) +#endif tlb_finish_mmu(*tlb, tlb_start_addr(*tlb), tlb_end_addr(*tlb)); /* * Hide vma from rmap and vmtruncate before freeeing pgtables, @@ -292,7 +294,9 @@ void free_pgtables(struct mmu_gather **t unlink_file_vma(unlink); unlink = unlink->vm_next; } +#ifndef CONFIG_PREEMPT_RT if (vma->vm_next) +#endif *tlb = tlb_gather_mmu(vma->vm_mm, fullmm); #endif while (vma) { @@ -805,10 +809,13 @@ static unsigned long unmap_page_range(st return addr; } -#ifdef CONFIG_PREEMPT +#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT) # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) #else -/* No preempt: go for improved straight-line efficiency */ +/* + * No preempt: go for improved straight-line efficiency + * on PREEMPT_RT this is not a critical latency-path. + */ # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) #endif Index: linux-rt-rebase.q/mm/mmap.c =================================================================== --- linux-rt-rebase.q.orig/mm/mmap.c +++ linux-rt-rebase.q/mm/mmap.c @@ -1907,10 +1907,16 @@ asmlinkage long sys_munmap(unsigned long static inline void verify_mm_writelocked(struct mm_struct *mm) { #ifdef CONFIG_DEBUG_VM - if (unlikely(down_read_trylock(&mm->mmap_sem))) { +# ifdef CONFIG_PREEMPT_RT + if (unlikely(!rt_rwsem_is_locked(&mm->mmap_sem))) { WARN_ON(1); - up_read(&mm->mmap_sem); } +# else + if (unlikely(down_read_trylock(&mm->mmap_sem))) { + WARN_ON(1); + up_read(&mm->mmap_sem); + } +# endif #endif } Index: linux-rt-rebase.q/mm/vmscan.c =================================================================== --- linux-rt-rebase.q.orig/mm/vmscan.c +++ linux-rt-rebase.q/mm/vmscan.c @@ -23,6 +23,7 @@ #include #include #include +#include #include /* for try_to_release_page(), buffer_heads_over_limit */ #include @@ -787,7 +788,7 @@ static unsigned long shrink_inactive_lis nr_scanned += nr_scan; nr_freed = shrink_page_list(&page_list, sc); nr_reclaimed += nr_freed; - local_irq_disable(); + local_irq_disable_nort(); if (current_is_kswapd()) { __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); __count_vm_events(KSWAPD_STEAL, nr_freed); @@ -818,9 +819,14 @@ static unsigned long shrink_inactive_lis } } } while (nr_scanned < max_scan); + /* + * Non-PREEMPT_RT relies on IRQs-off protecting the page_states + * per-CPU data. PREEMPT_RT has that data protected even in + * __mod_page_state(), so no need to keep IRQs disabled. + */ spin_unlock(&zone->lru_lock); done: - local_irq_enable(); + local_irq_enable_nort(); pagevec_release(&pvec); return nr_reclaimed; } Index: linux-rt-rebase.q/mm/vmstat.c =================================================================== --- linux-rt-rebase.q.orig/mm/vmstat.c +++ linux-rt-rebase.q/mm/vmstat.c @@ -157,10 +157,14 @@ static void refresh_zone_stat_thresholds void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = pcp->vm_stat_diff + item; + struct per_cpu_pageset *pcp; + int cpu; long x; + s8 *p; + cpu = get_cpu(); + pcp = zone_pcp(zone, cpu); + p = pcp->vm_stat_diff + item; x = delta + *p; if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { @@ -168,6 +172,7 @@ void __mod_zone_page_state(struct zone * x = 0; } *p = x; + put_cpu(); } EXPORT_SYMBOL(__mod_zone_page_state); @@ -210,9 +215,13 @@ EXPORT_SYMBOL(mod_zone_page_state); */ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = pcp->vm_stat_diff + item; + struct per_cpu_pageset *pcp; + int cpu; + s8 *p; + cpu = get_cpu(); + pcp = zone_pcp(zone, cpu); + p = pcp->vm_stat_diff + item; (*p)++; if (unlikely(*p > pcp->stat_threshold)) { @@ -221,18 +230,34 @@ void __inc_zone_state(struct zone *zone, zone_page_state_add(*p + overstep, zone, item); *p = -overstep; } + put_cpu(); } void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { +#ifdef CONFIG_PREEMPT_RT + unsigned long flags; + struct zone *zone; + + zone = page_zone(page); + local_irq_save(flags); + __inc_zone_state(zone, item); + local_irq_restore(flags); +#else __inc_zone_state(page_zone(page), item); +#endif } EXPORT_SYMBOL(__inc_zone_page_state); void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = pcp->vm_stat_diff + item; + struct per_cpu_pageset *pcp; + int cpu; + s8 *p; + + cpu = get_cpu(); + pcp = zone_pcp(zone, cpu); + p = pcp->vm_stat_diff + item; (*p)--; @@ -242,6 +267,7 @@ void __dec_zone_state(struct zone *zone, zone_page_state_add(*p - overstep, zone, item); *p = overstep; } + put_cpu(); } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) patches/nmi-profiling.patch0000664000077200007720000000661210655544575015341 0ustar mingomingo--- arch/i386/kernel/irq.c | 2 ++ arch/i386/kernel/nmi.c | 7 +++---- arch/x86_64/kernel/nmi.c | 6 +++--- drivers/char/sysrq.c | 2 +- include/asm-x86_64/apic.h | 2 ++ 5 files changed, 11 insertions(+), 8 deletions(-) Index: linux-rt-rebase.q/arch/i386/kernel/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/irq.c +++ linux-rt-rebase.q/arch/i386/kernel/irq.c @@ -79,7 +79,9 @@ fastcall notrace unsigned int do_IRQ(str u32 *isp; #endif +#ifdef CONFIG_X86_LOCAL_APIC irq_show_regs_callback(smp_processor_id(), regs); +#endif if (unlikely((unsigned)irq >= NR_IRQS)) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", Index: linux-rt-rebase.q/arch/i386/kernel/nmi.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/nmi.c +++ linux-rt-rebase.q/arch/i386/kernel/nmi.c @@ -350,9 +350,9 @@ void nmi_show_all_regs(void) } } -static DEFINE_SPINLOCK(nmi_print_lock); +static DEFINE_RAW_SPINLOCK(nmi_print_lock); -void irq_show_regs_callback(int cpu, struct pt_regs *regs) +notrace void irq_show_regs_callback(int cpu, struct pt_regs *regs) { if (!nmi_show_regs[cpu]) return; @@ -366,7 +366,7 @@ void irq_show_regs_callback(int cpu, str spin_unlock(&nmi_print_lock); } -__kprobes int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) +notrace __kprobes int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) { /* * Since current_thread_info()-> is always on the stack, and we @@ -434,7 +434,6 @@ __kprobes int nmi_watchdog_tick(struct p for_each_online_cpu(i) alert_counter[i] = 0; - } } else { Index: linux-rt-rebase.q/arch/x86_64/kernel/nmi.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/nmi.c +++ linux-rt-rebase.q/arch/x86_64/kernel/nmi.c @@ -339,9 +339,9 @@ void nmi_show_all_regs(void) } } -static DEFINE_SPINLOCK(nmi_print_lock); +static DEFINE_RAW_SPINLOCK(nmi_print_lock); -void irq_show_regs_callback(int cpu, struct pt_regs *regs) +notrace void irq_show_regs_callback(int cpu, struct pt_regs *regs) { if (!nmi_show_regs[cpu]) return; @@ -354,7 +354,7 @@ void irq_show_regs_callback(int cpu, str spin_unlock(&nmi_print_lock); } -int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) +int notrace __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) { int sum; int touched = 0; Index: linux-rt-rebase.q/drivers/char/sysrq.c =================================================================== --- linux-rt-rebase.q.orig/drivers/char/sysrq.c +++ linux-rt-rebase.q/drivers/char/sysrq.c @@ -208,7 +208,7 @@ static struct sysrq_key_op sysrq_showreg .enable_mask = SYSRQ_ENABLE_DUMP, }; -#if defined(__i386__) +#if defined(__i386__) || defined(__x86_64__) static void sysrq_handle_showallregs(int key, struct tty_struct *tty) { Index: linux-rt-rebase.q/include/asm-x86_64/apic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/apic.h +++ linux-rt-rebase.q/include/asm-x86_64/apic.h @@ -94,6 +94,8 @@ extern void smp_send_nmi_allbutself(void #define K8_APIC_EXT_INT_MSG_EXT 0x7 #define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 +extern void smp_send_nmi_allbutself(void); + #define ARCH_APICTIMER_STOPS_ON_C3 1 extern unsigned boot_cpu_id; patches/cputimer-thread-rt_A0.patch0000664000077200007720000002153010655544574016622 0ustar mingomingoIngo, This patch re-adds the posix-cpu-timer functionality by running it from a per-cpu RT thread. This allows cpu rlimits to be enforced against RT processes that would otherwise starve the system. thanks -john Signed-off-by: John Stultz include/linux/init_task.h | 1 include/linux/posix-timers.h | 2 include/linux/sched.h | 2 init/main.c | 2 kernel/fork.c | 2 kernel/posix-cpu-timers.c | 176 ++++++++++++++++++++++++++++++++++++++++++- 6 files changed, 180 insertions(+), 5 deletions(-) Index: linux-rt-rebase.q/include/linux/init_task.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/init_task.h +++ linux-rt-rebase.q/include/linux/init_task.h @@ -164,6 +164,7 @@ extern struct group_info init_groups; .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ + .posix_timer_list = NULL, \ .pi_lock = RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ Index: linux-rt-rebase.q/include/linux/posix-timers.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/posix-timers.h +++ linux-rt-rebase.q/include/linux/posix-timers.h @@ -115,4 +115,6 @@ void set_process_cpu_timer(struct task_s long clock_nanosleep_restart(struct restart_block *restart_block); +int posix_cpu_thread_init(void); + #endif Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -1161,6 +1161,8 @@ struct task_struct { unsigned long long it_sched_expires; struct list_head cpu_timers[3]; + struct task_struct* posix_timer_list; + /* process credentials */ uid_t uid,euid,suid,fsuid; gid_t gid,egid,sgid,fsgid; Index: linux-rt-rebase.q/init/main.c =================================================================== --- linux-rt-rebase.q.orig/init/main.c +++ linux-rt-rebase.q/init/main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -762,6 +763,7 @@ static void __init do_pre_smp_initcalls( migration_init(); #endif + posix_cpu_thread_init(); spawn_ksoftirqd(); if (!nosoftlockup) spawn_softlockup_task(); Index: linux-rt-rebase.q/kernel/fork.c =================================================================== --- linux-rt-rebase.q.orig/kernel/fork.c +++ linux-rt-rebase.q/kernel/fork.c @@ -1064,7 +1064,7 @@ static struct task_struct *copy_process( INIT_LIST_HEAD(&p->cpu_timers[0]); INIT_LIST_HEAD(&p->cpu_timers[1]); INIT_LIST_HEAD(&p->cpu_timers[2]); - + p->posix_timer_list = NULL; p->lock_depth = -1; /* -1 = no lock */ do_posix_clock_monotonic_gettime(&p->start_time); p->real_start_time = p->start_time; Index: linux-rt-rebase.q/kernel/posix-cpu-timers.c =================================================================== --- linux-rt-rebase.q.orig/kernel/posix-cpu-timers.c +++ linux-rt-rebase.q/kernel/posix-cpu-timers.c @@ -578,7 +578,7 @@ static void arm_timer(struct k_itimer *t p->cpu_timers : p->signal->cpu_timers); head += CPUCLOCK_WHICH(timer->it_clock); - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); spin_lock(&p->sighand->siglock); listpos = head; @@ -735,7 +735,7 @@ int posix_cpu_timer_set(struct k_itimer /* * Disarm any old timer after extracting its expiry time. */ - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); ret = 0; spin_lock(&p->sighand->siglock); @@ -1287,12 +1287,11 @@ out: * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ -void run_posix_cpu_timers(struct task_struct *tsk) +void __run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; - BUG_ON(!irqs_disabled()); #define UNEXPIRED(clock) \ (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ @@ -1355,6 +1354,169 @@ void run_posix_cpu_timers(struct task_st } } +#include +#include +DEFINE_PER_CPU(struct task_struct *, posix_timer_task); +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); + +static int posix_cpu_timers_thread(void *data) +{ + int cpu = (long)data; + + BUG_ON(per_cpu(posix_timer_task,cpu) != current); + + + while (!kthread_should_stop()) { + struct task_struct *tsk = NULL; + struct task_struct *next = NULL; + + if (cpu_is_offline(cpu)) { + goto wait_to_die; + } + + /* grab task list */ + raw_local_irq_disable(); + tsk = per_cpu(posix_timer_tasklist, cpu); + per_cpu(posix_timer_tasklist, cpu) = NULL; + raw_local_irq_enable(); + + + /* its possible the list is empty, just return */ + if (!tsk) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + __set_current_state(TASK_RUNNING); + continue; + } + + /* Process task list */ + while (1) { + /* save next */ + next = tsk->posix_timer_list; + + /* run the task timers, clear its ptr and + * unreference it + */ + __run_posix_cpu_timers(tsk); + tsk->posix_timer_list = NULL; + put_task_struct(tsk); + + /* check if this is the last on the list */ + if (next == tsk) + break; + tsk = next; + } + } + return 0; + +wait_to_die: + /* Wait for kthread_stop */ + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +void run_posix_cpu_timers(struct task_struct *tsk) +{ + unsigned long cpu = smp_processor_id(); + struct task_struct *tasklist; + + BUG_ON(!irqs_disabled()); + if(!per_cpu(posix_timer_task, cpu)) + return; + /* get per-cpu references */ + tasklist = per_cpu(posix_timer_tasklist, cpu); + + /* check to see if we're already queued */ + if (!tsk->posix_timer_list) { + get_task_struct(tsk); + if (tasklist) { + tsk->posix_timer_list = tasklist; + } else { + /* + * The list is terminated by a self-pointing + * task_struct + */ + tsk->posix_timer_list = tsk; + } + per_cpu(posix_timer_tasklist, cpu) = tsk; + } + /* XXX signal the thread somehow */ + wake_up_process(per_cpu(posix_timer_task,cpu)); +} + + + + +/* + * posix_cpu_thread_call - callback that gets triggered when a CPU is added. + * Here we can start up the necessary migration thread for the new CPU. + */ +static int posix_cpu_thread_call(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + int cpu = (long)hcpu; + struct task_struct *p; + struct sched_param param; + + switch (action) { + case CPU_UP_PREPARE: + p = kthread_create(posix_cpu_timers_thread, hcpu, + "posix_cpu_timers/%d",cpu); + if (IS_ERR(p)) + return NOTIFY_BAD; + p->flags |= PF_NOFREEZE; + kthread_bind(p, cpu); + /* Must be high prio to avoid getting starved */ + param.sched_priority = MAX_RT_PRIO-1; + sched_setscheduler(p, SCHED_FIFO, ¶m); + per_cpu(posix_timer_task,cpu) = p; + break; + case CPU_ONLINE: + /* Strictly unneccessary, as first user will wake it. */ + wake_up_process(per_cpu(posix_timer_task,cpu)); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + /* Unbind it from offline cpu so it can run. Fall thru. */ + kthread_bind(per_cpu(posix_timer_task,cpu), + any_online_cpu(cpu_online_map)); + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; + case CPU_DEAD: + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; +#endif + } + return NOTIFY_OK; +} + +/* Register at highest priority so that task migration (migrate_all_tasks) + * happens before everything else. + */ +static struct notifier_block __devinitdata posix_cpu_thread_notifier = { + .notifier_call = posix_cpu_thread_call, + .priority = 10 +}; + +int __init posix_cpu_thread_init(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + /* Start one for boot CPU. */ + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, cpu); + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, cpu); + register_cpu_notifier(&posix_cpu_thread_notifier); + return 0; +} + + + /* * Set one of the process-wide special case CPU timers. * The tasklist_lock and tsk->sighand->siglock must be held by the caller. @@ -1620,6 +1782,12 @@ static __init int init_posix_cpu_timers( .nsleep = thread_cpu_nsleep, .nsleep_restart = thread_cpu_nsleep_restart, }; + unsigned long cpu; + + /* init the per-cpu posix_timer_tasklets */ + for_each_cpu_mask(cpu, cpu_possible_map) { + per_cpu(posix_timer_tasklist, cpu) = NULL; + } register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); patches/disable-irqpoll.patch0000664000077200007720000000177310655544574015654 0ustar mingomingo kernel/irq/spurious.c | 10 ++++++++++ 1 file changed, 10 insertions(+) Index: linux-rt-rebase.q/kernel/irq/spurious.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/spurious.c +++ linux-rt-rebase.q/kernel/irq/spurious.c @@ -239,6 +239,11 @@ __setup("noirqdebug", noirqdebug_setup); static int __init irqfixup_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT + printk(KERN_WARNING "irqfixup boot option not supported " + "w/ CONFIG_PREEMPT_RT\n"); + return 1; +#endif irqfixup = 1; printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); printk(KERN_WARNING "This may impact system performance.\n"); @@ -250,6 +255,11 @@ __setup("irqfixup", irqfixup_setup); static int __init irqpoll_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT + printk(KERN_WARNING "irqpoll boot option not supported " + "w/ CONFIG_PREEMPT_RT\n"); + return 1; +#endif irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); patches/vsyscall-add-notrace.patch0000664000077200007720000000552610655544576016612 0ustar mingomingoFrom rostedt@goodmis.org Tue Jun 19 04:41:17 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from ms-smtp-01.nyroc.rr.com (ms-smtp-01.nyroc.rr.com [24.24.2.55]) by mail.tglx.de (Postfix) with ESMTP id 4C19265C3EC for ; Tue, 19 Jun 2007 04:41:17 +0200 (CEST) Received: from [192.168.23.10] (cpe-24-94-51-176.stny.res.rr.com [24.94.51.176]) by ms-smtp-01.nyroc.rr.com (8.13.6/8.13.6) with ESMTP id l5J2f9l0013971; Mon, 18 Jun 2007 22:41:10 -0400 (EDT) Subject: [PATCH RT] Don't call mcount from vsyscall_fn's From: Steven Rostedt To: Ingo Molnar Cc: Thomas Gleixner , LKML , RT Content-Type: text/plain Date: Mon, 18 Jun 2007 22:41:09 -0400 Message-Id: <1182220869.15228.10.camel@localhost.localdomain> Mime-Version: 1.0 X-Mailer: Evolution 2.6.3 X-Virus-Scanned: Symantec AntiVirus Scan Engine X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit This bit me in the butt. I couldn't understand why my init app was segfaulting, with a kernel address, but a user RIP and RSP. Well, the RIP I think was bogus, but the kernel address was always the start of "mcount". Looking deeper, I printed out what was in the RSP (even though it was a user stack). It ended up showing me that the calling address was from the VDSO area. Looking even further, I found the offending culprit, which was vread_hpet. Looking at the assembly dump, I saw the vread_hpet was calling mcount, but I could not see it in the code. Nor could I see it in hpet.i (-E option of compiling). Well, I guess Ingo is a magician when it comes to compiler tricks, and has the mcount being called by "every!!" function, unless you add the "notrace" option. This patch adds the notrace to vsyscall_fn, so that we don't have user land apps calling mcount and crashing! Signed-off-by: Steven Rostedt --- include/asm-x86_64/vsyscall.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/include/asm-x86_64/vsyscall.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/vsyscall.h +++ linux-rt-rebase.q/include/asm-x86_64/vsyscall.h @@ -24,7 +24,7 @@ enum vsyscall_num { ((unused, __section__ (".vsyscall_gtod_data"),aligned(16))) #define __section_vsyscall_clock __attribute__ \ ((unused, __section__ (".vsyscall_clock"),aligned(16))) -#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn"))) +#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn"))) notrace #define VGETCPU_RDTSCP 1 #define VGETCPU_LSL 2 patches/preempt-realtime-powerpc-b3.patch0000664000077200007720000000376710655544574020031 0ustar mingomingo To fix the following runtime warning. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - BUG: using smp_processor_id() in preemptible [00000000] code: init/371 caller is .pgtable_free_tlb+0x2c/0x14c Call Trace: [C00000000FF6B770] [C00000000000FAAC] .show_stack+0x68/0x1b0 (unreliable) [C00000000FF6B810] [C0000000001F7190] .debug_smp_processor_id+0xc8/0xf8 [C00000000FF6B8A0] [C00000000002C52C] .pgtable_free_tlb+0x2c/0x14c [C00000000FF6B940] [C0000000000B6528] .free_pgd_range+0x234/0x3bc [C00000000FF6BA40] [C0000000000B6AB8] .free_pgtables+0x224/0x260 [C00000000FF6BB00] [C0000000000B7FE8] .exit_mmap+0x100/0x208 [C00000000FF6BBC0] [C000000000055FB0] .mmput+0x70/0x12c [C00000000FF6BC50] [C00000000005B728] .exit_mm+0x150/0x170 [C00000000FF6BCE0] [C00000000005D80C] .do_exit+0x28c/0x9bc [C00000000FF6BDA0] [C00000000005DFF0] .sys_exit_group+0x0/0x8 [C00000000FF6BE30] [C000000000008634] syscall_exit+0x0/0x40 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Would it be better to just use raw_smp_processor_id() rather than tlb->cpu? Signed-off-by: Tsutomu Owa -- owa --- arch/powerpc/mm/tlb_64.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/mm/tlb_64.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/mm/tlb_64.c +++ linux-rt-rebase.q/arch/powerpc/mm/tlb_64.c @@ -93,8 +93,11 @@ static void pte_free_submit(struct pte_f void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); + /* + * This is safe since tlb_gather_mmu has disabled preemption. + * tlb->cpu is set by tlb_gather_mmu as well. + */ + cpumask_t local_cpumask = cpumask_of_cpu(tlb->cpu); struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || patches/rcu-various-fixups.patch0000664000077200007720000000402610655544576016360 0ustar mingomingo--- security/selinux/avc.c | 9 +++++++++ security/selinux/netif.c | 2 ++ 2 files changed, 11 insertions(+) Index: linux-rt-rebase.q/security/selinux/avc.c =================================================================== --- linux-rt-rebase.q.orig/security/selinux/avc.c +++ linux-rt-rebase.q/security/selinux/avc.c @@ -312,6 +312,7 @@ static inline int avc_reclaim_node(void) if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags)) continue; + rcu_read_lock(); list_for_each_entry(node, &avc_cache.slots[hvalue], list) { if (atomic_dec_and_test(&node->ae.used)) { /* Recently Unused */ @@ -319,11 +320,13 @@ static inline int avc_reclaim_node(void) avc_cache_stats_incr(reclaims); ecx++; if (ecx >= AVC_CACHE_RECLAIM) { + rcu_read_unlock(); spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); goto out; } } } + rcu_read_unlock(); spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); } out: @@ -807,8 +810,14 @@ int avc_ss_reset(u32 seqno) for (i = 0; i < AVC_CACHE_SLOTS; i++) { spin_lock_irqsave(&avc_cache.slots_lock[i], flag); + /* + * On -rt the outer spinlock does not prevent RCU + * from being performed: + */ + rcu_read_lock(); list_for_each_entry(node, &avc_cache.slots[i], list) avc_node_delete(node); + rcu_read_unlock(); spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag); } Index: linux-rt-rebase.q/security/selinux/netif.c =================================================================== --- linux-rt-rebase.q.orig/security/selinux/netif.c +++ linux-rt-rebase.q/security/selinux/netif.c @@ -209,6 +209,7 @@ static void sel_netif_flush(void) { int idx; + rcu_read_lock(); spin_lock_bh(&sel_netif_lock); for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++) { struct sel_netif *netif; @@ -217,6 +218,7 @@ static void sel_netif_flush(void) sel_netif_destroy(netif); } spin_unlock_bh(&sel_netif_lock); + rcu_read_unlock(); } static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid, patches/i386-mark-atomic-irq-ops-raw.patch0000664000077200007720000000112010655544571017622 0ustar mingomingo--- include/asm-i386/atomic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux/include/asm-i386/atomic.h =================================================================== --- linux.orig/include/asm-i386/atomic.h +++ linux/include/asm-i386/atomic.h @@ -195,10 +195,10 @@ static __inline__ int atomic_add_return( #ifdef CONFIG_M386 no_xadd: /* Legacy 386 processor */ - local_irq_save(flags); + raw_local_irq_save(flags); __i = atomic_read(v); atomic_set(v, i + __i); - local_irq_restore(flags); + raw_local_irq_restore(flags); return i + __i; #endif } patches/preempt-softirqs-core.patch0000664000077200007720000004400310655544573017033 0ustar mingomingo--- include/linux/bottom_half.h | 1 include/linux/interrupt.h | 13 +- include/linux/sched.h | 17 ++ kernel/Kconfig.preempt | 16 ++ kernel/sched.c | 28 ++++ kernel/softirq.c | 273 ++++++++++++++++++++++++++++++++++---------- 6 files changed, 279 insertions(+), 69 deletions(-) Index: linux-rt-rebase.q/include/linux/bottom_half.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/bottom_half.h +++ linux-rt-rebase.q/include/linux/bottom_half.h @@ -2,7 +2,6 @@ #define _LINUX_BH_H extern void local_bh_disable(void); -extern void __local_bh_enable(void); extern void _local_bh_enable(void); extern void local_bh_enable(void); extern void local_bh_enable_ip(unsigned long ip); Index: linux-rt-rebase.q/include/linux/interrupt.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/interrupt.h +++ linux-rt-rebase.q/include/linux/interrupt.h @@ -270,6 +270,8 @@ enum HRTIMER_SOFTIRQ, #endif RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ + /* Entries after this are ignored in split softirq mode */ + MAX_SOFTIRQ, }; /* softirq mask and active fields moved to irq_cpustat_t in @@ -282,13 +284,21 @@ struct softirq_action void *data; }; +#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) +#define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr) + asmlinkage void do_softirq(void); extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); extern void softirq_init(void); -#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); extern void FASTCALL(raise_softirq(unsigned int nr)); +extern void wakeup_irqd(void); +#ifdef CONFIG_PREEMPT_SOFTIRQS +extern void wait_for_softirq(int softirq); +#else +# define wait_for_softirq(x) do {} while(0) +#endif /* Tasklets --- multithreaded analogue of BHs. @@ -400,6 +410,7 @@ extern void tasklet_kill(struct tasklet_ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); +void takeover_tasklets(unsigned int cpu); /* * Autoprobing for irqs: Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -89,6 +89,12 @@ struct sched_param { #include +#ifdef CONFIG_PREEMPT_SOFTIRQS +extern int softirq_preemption; +#else +# define softirq_preemption 0 +#endif + struct exec_domain; struct futex_pi_state; struct bio; @@ -1445,6 +1451,7 @@ static inline void put_task_struct(struc #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ +#define PF_SOFTIRQ 0x04000000 /* softirq context */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ @@ -1876,6 +1883,7 @@ static inline int need_resched(void) extern int cond_resched(void); extern int cond_resched_lock(spinlock_t * lock); extern int cond_resched_softirq(void); +extern int cond_resched_softirq_context(void); /* * Does a critical section need to be broken due to another @@ -1891,10 +1899,13 @@ extern int cond_resched_softirq(void); * Does a critical section need to be broken due to another * task waiting or preemption being signalled: */ -static inline int lock_need_resched(spinlock_t *lock) +#define lock_need_resched(lock) \ + unlikely(need_lockbreak(lock) || need_resched()) + +static inline int softirq_need_resched(void) { - if (need_lockbreak(lock) || need_resched()) - return 1; + if (softirq_preemption && (current->flags & PF_SOFTIRQ)) + return need_resched(); return 0; } Index: linux-rt-rebase.q/kernel/Kconfig.preempt =================================================================== --- linux-rt-rebase.q.orig/kernel/Kconfig.preempt +++ linux-rt-rebase.q/kernel/Kconfig.preempt @@ -102,3 +102,19 @@ config RCU_TRACE Say Y here if you want to enable RCU tracing Say N if you are unsure. + +config PREEMPT_SOFTIRQS + bool "Thread Softirqs" + default n +# depends on PREEMPT + help + This option reduces the latency of the kernel by 'threading' + soft interrupts. This means that all softirqs will execute + in softirqd's context. While this helps latency, it can also + reduce performance. + + The threading of softirqs can also be controlled via + /proc/sys/kernel/softirq_preemption runtime flag and the + sofirq-preempt=0/1 boot-time option. + + Say N if you are unsure. Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -3267,7 +3267,7 @@ void account_system_time(struct task_str tmp = cputime_to_cputime64(cputime); if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); - else if (softirq_count()) + else if (softirq_count() || (p->flags & PF_SOFTIRQ)) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); else if (p != rq->idle) cpustat->system = cputime64_add(cpustat->system, tmp); @@ -3514,7 +3514,7 @@ asmlinkage void __sched preempt_schedule int saved_lock_depth; #endif /* Catch callers which need to be fixed */ - BUG_ON(ti->preempt_count || !irqs_disabled()); + WARN_ON_ONCE(ti->preempt_count || !irqs_disabled()); need_resched: add_preempt_count(PREEMPT_ACTIVE); @@ -4563,9 +4563,12 @@ int cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(cond_resched_lock); +/* + * Voluntarily preempt a process context that has softirqs disabled: + */ int __sched cond_resched_softirq(void) { - BUG_ON(!in_softirq()); + WARN_ON_ONCE(!in_softirq()); if (need_resched() && system_state == SYSTEM_RUNNING) { local_bh_enable(); @@ -4577,6 +4580,25 @@ int __sched cond_resched_softirq(void) } EXPORT_SYMBOL(cond_resched_softirq); +/* + * Voluntarily preempt a softirq context (possible with softirq threading): + */ +int __sched cond_resched_softirq_context(void) +{ + WARN_ON_ONCE(!in_softirq()); + + if (softirq_need_resched() && system_state == SYSTEM_RUNNING) { + raw_local_irq_disable(); + _local_bh_enable(); + raw_local_irq_enable(); + __cond_resched(); + local_bh_disable(); + return 1; + } + return 0; +} +EXPORT_SYMBOL(cond_resched_softirq_context); + /** * yield - yield the current processor to other threads. * Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -4,9 +4,15 @@ * Copyright (C) 1992 Linus Torvalds * * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) + * + * Softirq-split implemetation by + * Copyright (C) 2005 Thomas Gleixner, Ingo Molnar */ #include +#include +#include +#include #include #include #include @@ -46,7 +52,41 @@ EXPORT_SYMBOL(irq_stat); static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; -static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); +struct softirqdata { + int nr; + unsigned long cpu; + struct task_struct *tsk; +#ifdef CONFIG_PREEMPT_SOFTIRQS + wait_queue_head_t wait; + int running; +#endif +}; + +static DEFINE_PER_CPU(struct softirqdata [MAX_SOFTIRQ], ksoftirqd); + +#ifdef CONFIG_PREEMPT_SOFTIRQS +/* + * Preempting the softirq causes cases that would not be a + * problem when the softirq is not preempted. That is a + * process may have code to spin while waiting for a softirq + * to finish on another CPU. But if it happens that the + * process has preempted the softirq, this could cause a + * deadlock. + */ +void wait_for_softirq(int softirq) +{ + struct softirqdata *data = &__get_cpu_var(ksoftirqd)[softirq]; + if (data->running) { + DECLARE_WAITQUEUE(wait, current); + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&data->wait, &wait); + if (data->running) + schedule(); + remove_wait_queue(&data->wait, &wait); + __set_current_state(TASK_RUNNING); + } +} +#endif /* * we cannot loop indefinitely here to avoid userspace starvation, @@ -54,16 +94,32 @@ static DEFINE_PER_CPU(struct task_struct * to the pending events, so lets the scheduler to balance * the softirq load for us. */ -static inline void wakeup_softirqd(void) +static void wakeup_softirqd(int softirq) { /* Interrupts are disabled: no need to stop preemption */ - struct task_struct *tsk = __get_cpu_var(ksoftirqd); + struct task_struct *tsk = __get_cpu_var(ksoftirqd)[softirq].tsk; if (tsk && tsk->state != TASK_RUNNING) wake_up_process(tsk); } /* + * Wake up the softirq threads which have work + */ +static void trigger_softirqs(void) +{ + u32 pending = local_softirq_pending(); + int curr = 0; + + while (pending) { + if (pending & 1) + wakeup_softirqd(curr); + pending >>= 1; + curr++; + } +} + +/* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ @@ -98,20 +154,6 @@ void local_bh_disable(void) EXPORT_SYMBOL(local_bh_disable); -void __local_bh_enable(void) -{ - WARN_ON_ONCE(in_irq()); - - /* - * softirqs should never be enabled by __local_bh_enable(), - * it always nests inside local_bh_enable() sections: - */ - WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); - - sub_preempt_count(SOFTIRQ_OFFSET); -} -EXPORT_SYMBOL_GPL(__local_bh_enable); - /* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), @@ -205,7 +247,7 @@ EXPORT_SYMBOL(local_bh_enable_ip); */ #define MAX_SOFTIRQ_RESTART 10 -asmlinkage void __do_softirq(void) +asmlinkage void ___do_softirq(void) { struct softirq_action *h; __u32 pending; @@ -215,9 +257,6 @@ asmlinkage void __do_softirq(void) pending = local_softirq_pending(); account_system_vtime(current); - __local_bh_disable((unsigned long)__builtin_return_address(0)); - trace_softirq_enter(); - cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ @@ -229,8 +268,17 @@ restart: do { if (pending & 1) { - h->action(h); + { + u32 preempt_count = preempt_count(); + h->action(h); + if (preempt_count != preempt_count()) { + print_symbol("BUG: softirq exited %s with wrong preemption count!\n", (unsigned long) h->action); + printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count()); + preempt_count() = preempt_count; + } + } rcu_bh_qsctr_inc(cpu); + cond_resched_softirq_context(); } h++; pending >>= 1; @@ -243,12 +291,34 @@ restart: goto restart; if (pending) - wakeup_softirqd(); + trigger_softirqs(); +} + +asmlinkage void __do_softirq(void) +{ +#ifdef CONFIG_PREEMPT_SOFTIRQS + /* + * 'preempt harder'. Push all softirq processing off to ksoftirqd. + */ + if (softirq_preemption) { + if (local_softirq_pending()) + trigger_softirqs(); + return; + } +#endif + /* + * 'immediate' softirq execution: + */ + __local_bh_disable((unsigned long)__builtin_return_address(0)); + trace_softirq_enter(); + + ___do_softirq(); trace_softirq_exit(); account_system_vtime(current); _local_bh_enable(); + } #ifndef __ARCH_HAS_DO_SOFTIRQ @@ -317,19 +387,11 @@ void irq_exit(void) */ inline fastcall void raise_softirq_irqoff(unsigned int nr) { - __raise_softirq_irqoff(nr); + __do_raise_softirq_irqoff(nr); - /* - * If we're in an interrupt or softirq, we're done - * (this also catches softirq-disabled code). We will - * actually run the softirq once we return from - * the irq or softirq. - * - * Otherwise we wake up ksoftirqd to make sure we - * schedule the softirq soon. - */ - if (!in_interrupt()) - wakeup_softirqd(); +#ifdef CONFIG_PREEMPT_SOFTIRQS + wakeup_softirqd(nr); +#endif } EXPORT_SYMBOL(raise_softirq_irqoff); @@ -414,7 +476,7 @@ static void tasklet_action(struct softir local_irq_disable(); t->next = __get_cpu_var(tasklet_vec).list; __get_cpu_var(tasklet_vec).list = t; - __raise_softirq_irqoff(TASKLET_SOFTIRQ); + __do_raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_enable(); } } @@ -447,7 +509,7 @@ static void tasklet_hi_action(struct sof local_irq_disable(); t->next = __get_cpu_var(tasklet_hi_vec).list; __get_cpu_var(tasklet_hi_vec).list = t; - __raise_softirq_irqoff(HI_SOFTIRQ); + __do_raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } } @@ -487,13 +549,24 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL); } -static int ksoftirqd(void * __bind_cpu) +static int ksoftirqd(void * __data) { + struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2 }; + struct softirqdata *data = __data; + u32 mask = (1 << data->nr); + struct softirq_action *h; + +#ifdef CONFIG_PREEMPT_SOFTIRQS + init_waitqueue_head(&data->wait); +#endif + + sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m); + current->flags |= PF_SOFTIRQ; set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { preempt_disable(); - if (!local_softirq_pending()) { + if (!(local_softirq_pending() & mask)) { preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -501,19 +574,41 @@ static int ksoftirqd(void * __bind_cpu) __set_current_state(TASK_RUNNING); - while (local_softirq_pending()) { +#ifdef CONFIG_PREEMPT_SOFTIRQS + data->running = 1; +#endif + + while (local_softirq_pending() & mask) { /* Preempt disable stops cpu going offline. If already offline, we'll be on wrong CPU: don't process */ - if (cpu_is_offline((long)__bind_cpu)) + if (cpu_is_offline(data->cpu)) goto wait_to_die; - do_softirq(); + + local_irq_disable(); preempt_enable_no_resched(); + set_softirq_pending(local_softirq_pending() & ~mask); + local_bh_disable(); + local_irq_enable(); + + h = &softirq_vec[data->nr]; + if (h) + h->action(h); + rcu_bh_qsctr_inc(data->cpu); + + local_irq_disable(); + _local_bh_enable(); + local_irq_enable(); + cond_resched(); preempt_disable(); } preempt_enable(); set_current_state(TASK_INTERRUPTIBLE); +#ifdef CONFIG_PREEMPT_SOFTIRQS + data->running = 0; + wake_up(&data->wait); +#endif } __set_current_state(TASK_RUNNING); return 0; @@ -560,7 +655,7 @@ void tasklet_kill_immediate(struct taskl BUG(); } -static void takeover_tasklets(unsigned int cpu) +void takeover_tasklets(unsigned int cpu) { struct tasklet_struct **i; @@ -582,49 +677,82 @@ static void takeover_tasklets(unsigned i } #endif /* CONFIG_HOTPLUG_CPU */ +static const char *softirq_names [] = +{ + [HI_SOFTIRQ] = "high", + [SCHED_SOFTIRQ] = "sched", + [TIMER_SOFTIRQ] = "timer", + [NET_TX_SOFTIRQ] = "net-tx", + [NET_RX_SOFTIRQ] = "net-rx", + [BLOCK_SOFTIRQ] = "block", + [TASKLET_SOFTIRQ] = "tasklet", +#ifdef CONFIG_HIGH_RES_TIMERS + [HRTIMER_SOFTIRQ] = "hrtimer", +#endif + [RCU_SOFTIRQ] = "rcu", +}; + static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - int hotcpu = (unsigned long)hcpu; + int hotcpu = (unsigned long)hcpu, i; struct task_struct *p; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); - if (IS_ERR(p)) { - printk("ksoftirqd for %i failed\n", hotcpu); - return NOTIFY_BAD; + for (i = 0; i < MAX_SOFTIRQ; i++) { + per_cpu(ksoftirqd, hotcpu)[i].nr = i; + per_cpu(ksoftirqd, hotcpu)[i].cpu = hotcpu; + per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; + } + for (i = 0; i < MAX_SOFTIRQ; i++) { + p = kthread_create(ksoftirqd, + &per_cpu(ksoftirqd, hotcpu)[i], + "softirq-%s/%d", softirq_names[i], + hotcpu); + if (IS_ERR(p)) { + printk("ksoftirqd %d for %i failed\n", i, + hotcpu); + return NOTIFY_BAD; + } + kthread_bind(p, hotcpu); + per_cpu(ksoftirqd, hotcpu)[i].tsk = p; } - kthread_bind(p, hotcpu); - per_cpu(ksoftirqd, hotcpu) = p; - break; + break; + break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - wake_up_process(per_cpu(ksoftirqd, hotcpu)); + for (i = 0; i < MAX_SOFTIRQ; i++) + wake_up_process(per_cpu(ksoftirqd, hotcpu)[i].tsk); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - if (!per_cpu(ksoftirqd, hotcpu)) - break; - /* Unbind so it can run. Fall thru. */ - kthread_bind(per_cpu(ksoftirqd, hotcpu), - any_online_cpu(cpu_online_map)); +#if 0 + for (i = 0; i < MAX_SOFTIRQ; i++) { + if (!per_cpu(ksoftirqd, hotcpu)[i].tsk) + continue; + kthread_bind(per_cpu(ksoftirqd, hotcpu)[i].tsk, + any_online_cpu(cpu_online_map)); + } +#endif case CPU_DEAD: case CPU_DEAD_FROZEN: { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - p = per_cpu(ksoftirqd, hotcpu); - per_cpu(ksoftirqd, hotcpu) = NULL; sched_setscheduler(p, SCHED_FIFO, ¶m); - kthread_stop(p); + for (i = 0; i < MAX_SOFTIRQ; i++) { + p = per_cpu(ksoftirqd, hotcpu)[i].tsk; + per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; + kthread_stop(p); + } takeover_tasklets(hotcpu); break; - } #endif /* CONFIG_HOTPLUG_CPU */ } + } return NOTIFY_OK; } @@ -643,6 +771,29 @@ __init int spawn_ksoftirqd(void) return 0; } + +#ifdef CONFIG_PREEMPT_SOFTIRQS + +int softirq_preemption = 1; + +EXPORT_SYMBOL(softirq_preemption); + +static int __init softirq_preempt_setup (char *str) +{ + if (!strncmp(str, "off", 3)) + softirq_preemption = 0; + else + get_option(&str, &softirq_preemption); + if (!softirq_preemption) + printk("turning off softirq preemption!\n"); + + return 1; +} + +__setup("softirq-preempt=", softirq_preempt_setup); + +#endif + #ifdef CONFIG_SMP /* * Call a function on all processors patches/preempt-realtime-rcu.patch0000664000077200007720000000224510655544575016630 0ustar mingomingo--- kernel/rcupreempt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/kernel/rcupreempt.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rcupreempt.c +++ linux-rt-rebase.q/kernel/rcupreempt.c @@ -55,7 +55,7 @@ */ struct rcu_data { - spinlock_t lock; + raw_spinlock_t lock; long completed; /* Number of last completed batch. */ struct rcu_head *nextlist; struct rcu_head **nexttail; @@ -68,12 +68,12 @@ struct rcu_data { #endif /* #ifdef CONFIG_RCU_TRACE */ }; struct rcu_ctrlblk { - spinlock_t fliplock; + raw_spinlock_t fliplock; long completed; /* Number of last completed batch. */ }; static struct rcu_data rcu_data; static struct rcu_ctrlblk rcu_ctrlblk = { - .fliplock = SPIN_LOCK_UNLOCKED, + .fliplock = RAW_SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), .completed = 0, }; static DEFINE_PER_CPU(atomic_t [2], rcu_flipctr) = @@ -353,7 +353,7 @@ int rcu_needs_cpu(int cpu) return !!rcu_data.waitlist || rcu_pending(cpu); } -int rcu_pending(int cpu) +int notrace rcu_pending(int cpu) { return (rcu_data.donelist != NULL || rcu_data.waitlist != NULL || patches/x86_64-apic-add-clockevents-functions.patch0000664000077200007720000000724610655544570021510 0ustar mingomingoSubject: x86_64: Add (not yet used) clock event functions Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/Kconfig | 6 +++ arch/x86_64/kernel/apic.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) Index: linux/arch/x86_64/Kconfig =================================================================== --- linux.orig/arch/x86_64/Kconfig +++ linux/arch/x86_64/Kconfig @@ -28,6 +28,10 @@ config GENERIC_TIME bool default y +config GENERIC_CLOCKEVENTS_MIGR + bool + default y + config GENERIC_TIME_VSYSCALL bool default y @@ -138,6 +142,8 @@ source "init/Kconfig" menu "Processor type and features" +source "kernel/time/Kconfig" + choice prompt "Subarchitecture Type" default X86_PC Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -58,6 +59,77 @@ static struct resource lapic_resource = static unsigned int calibration_result; +static int lapic_next_event(unsigned long delta, + struct clock_event_device *evt); +static void lapic_timer_setup(enum clock_event_mode mode, + struct clock_event_device *evt); + +static void lapic_timer_broadcast(cpumask_t mask); + +static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen); + +static struct clock_event_device lapic_clockevent = { + .name = "lapic", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT + | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, + .shift = 32, + .set_mode = lapic_timer_setup, + .set_next_event = lapic_next_event, + .broadcast = lapic_timer_broadcast, + .rating = 100, + .irq = -1, +}; +static DEFINE_PER_CPU(struct clock_event_device, lapic_events); + +static int lapic_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + apic_write(APIC_TMICT, delta); + return 0; +} + +static void lapic_timer_setup(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + unsigned long flags; + unsigned int v; + + /* Lapic used as dummy for broadcast ? */ + if (evt->features & CLOCK_EVT_FEAT_DUMMY) + return; + + local_irq_save(flags); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_MODE_ONESHOT: + __setup_APIC_LVTT(calibration_result, + mode != CLOCK_EVT_MODE_PERIODIC, 1); + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + v = apic_read(APIC_LVTT); + v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); + apic_write(APIC_LVTT, v); + break; + case CLOCK_EVT_MODE_RESUME: + /* Nothing to do here */ + break; + } + + local_irq_restore(flags); +} + +/* + * Local APIC timer broadcast function + */ +static void lapic_timer_broadcast(cpumask_t mask) +{ +#ifdef CONFIG_SMP + send_IPI_mask(mask, LOCAL_TIMER_VECTOR); +#endif +} + /* * cpu_mask that denotes the CPUs that needs timer interrupt coming in as * IPIs in place of local APIC timers @@ -867,6 +939,13 @@ static void __init calibrate_APIC_clock( printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", result / 1000 / 1000, result / 1000 % 1000); + /* Calculate the scaled math multiplication factor */ + lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32); + lapic_clockevent.max_delta_ns = + clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); + lapic_clockevent.min_delta_ns = + clockevent_delta2ns(0xF, &lapic_clockevent); + calibration_result = result / HZ; } patches/mapping_nrpages.patch0000664000077200007720000004347210655544576015747 0ustar mingomingoSubject: mm/fs: abstract address_space::nrpages Currently the tree_lock protects mapping->nrpages, this will not be possible much longer. Hence abstract the access to this variable so that it can be easily replaced by an atomic_ulong_t. Signed-off-by: Peter Zijlstra --- arch/sh64/lib/dbg.c | 2 +- fs/block_dev.c | 4 ++-- fs/buffer.c | 2 +- fs/gfs2/glock.c | 2 +- fs/gfs2/glops.c | 4 ++-- fs/gfs2/meta_io.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/inode.c | 10 +++++----- fs/jffs2/dir.c | 4 ++-- fs/jffs2/fs.c | 2 +- fs/libfs.c | 2 +- fs/nfs/inode.c | 6 +++--- fs/xfs/linux-2.6/xfs_vnode.h | 2 +- include/linux/fs.h | 22 +++++++++++++++++++++- include/linux/swap.h | 2 +- ipc/shm.c | 4 ++-- mm/filemap.c | 14 +++++++------- mm/shmem.c | 8 ++++---- mm/swap_state.c | 4 ++-- mm/truncate.c | 2 +- 20 files changed, 60 insertions(+), 40 deletions(-) Index: linux-rt-rebase.q/arch/sh64/lib/dbg.c =================================================================== --- linux-rt-rebase.q.orig/arch/sh64/lib/dbg.c +++ linux-rt-rebase.q/arch/sh64/lib/dbg.c @@ -425,6 +425,6 @@ void print_page(struct page *page) printk(" page[%p] -> index 0x%lx, count 0x%x, flags 0x%lx\n", page, page->index, page_count(page), page->flags); printk(" address_space = %p, pages =%ld\n", page->mapping, - page->mapping->nrpages); + mapping_nrpages(page->mapping)); } Index: linux-rt-rebase.q/fs/block_dev.c =================================================================== --- linux-rt-rebase.q.orig/fs/block_dev.c +++ linux-rt-rebase.q/fs/block_dev.c @@ -59,7 +59,7 @@ static sector_t max_block(struct block_d /* Kill _all_ buffers and pagecache , dirty or not.. */ static void kill_bdev(struct block_device *bdev) { - if (bdev->bd_inode->i_mapping->nrpages == 0) + if (mapping_nrpages(bdev->bd_inode->i_mapping) == 0) return; invalidate_bh_lrus(); truncate_inode_pages(bdev->bd_inode->i_mapping, 0); @@ -592,7 +592,7 @@ long nr_blockdev_pages(void) long ret = 0; spin_lock(&bdev_lock); list_for_each_entry(bdev, &all_bdevs, bd_list) { - ret += bdev->bd_inode->i_mapping->nrpages; + ret += mapping_nrpages(bdev->bd_inode->i_mapping); } spin_unlock(&bdev_lock); return ret; Index: linux-rt-rebase.q/fs/buffer.c =================================================================== --- linux-rt-rebase.q.orig/fs/buffer.c +++ linux-rt-rebase.q/fs/buffer.c @@ -334,7 +334,7 @@ void invalidate_bdev(struct block_device { struct address_space *mapping = bdev->bd_inode->i_mapping; - if (mapping->nrpages == 0) + if (mapping_nrpages(mapping) == 0) return; invalidate_bh_lrus(); Index: linux-rt-rebase.q/fs/gfs2/glock.c =================================================================== --- linux-rt-rebase.q.orig/fs/gfs2/glock.c +++ linux-rt-rebase.q/fs/gfs2/glock.c @@ -1876,7 +1876,7 @@ static int dump_glock(struct glock_iter (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); if (gl->gl_aspace) print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, - gl->gl_aspace->i_mapping->nrpages); + mapping_nrpages(gl->gl_aspace->i_mapping)); else print_dbg(gi, " aspace = no\n"); print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count)); Index: linux-rt-rebase.q/fs/gfs2/glops.c =================================================================== --- linux-rt-rebase.q.orig/fs/gfs2/glops.c +++ linux-rt-rebase.q/fs/gfs2/glops.c @@ -258,7 +258,7 @@ static int inode_go_demote_ok(struct gfs struct gfs2_sbd *sdp = gl->gl_sbd; int demote = 0; - if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages) + if (!gl->gl_object && !mapping_nrpages(gl->gl_aspace->i_mapping)) demote = 1; else if (!sdp->sd_args.ar_localcaching && time_after_eq(jiffies, gl->gl_stamp + @@ -325,7 +325,7 @@ static void inode_go_unlock(struct gfs2_ static int rgrp_go_demote_ok(struct gfs2_glock *gl) { - return !gl->gl_aspace->i_mapping->nrpages; + return !mapping_nrpages(gl->gl_aspace->i_mapping); } /** Index: linux-rt-rebase.q/fs/gfs2/meta_io.c =================================================================== --- linux-rt-rebase.q.orig/fs/gfs2/meta_io.c +++ linux-rt-rebase.q/fs/gfs2/meta_io.c @@ -104,7 +104,7 @@ void gfs2_meta_inval(struct gfs2_glock * truncate_inode_pages(mapping, 0); atomic_dec(&aspace->i_writecount); - gfs2_assert_withdraw(sdp, !mapping->nrpages); + gfs2_assert_withdraw(sdp, !mapping_nrpages(mapping)); } /** Index: linux-rt-rebase.q/fs/hugetlbfs/inode.c =================================================================== --- linux-rt-rebase.q.orig/fs/hugetlbfs/inode.c +++ linux-rt-rebase.q/fs/hugetlbfs/inode.c @@ -236,7 +236,7 @@ static void truncate_hugepages(struct in } huge_pagevec_release(&pvec); } - BUG_ON(!lstart && mapping->nrpages); + BUG_ON(!lstart && mapping_nrpages(mapping)); hugetlb_unreserve_pages(inode, start, freed); } Index: linux-rt-rebase.q/fs/inode.c =================================================================== --- linux-rt-rebase.q.orig/fs/inode.c +++ linux-rt-rebase.q/fs/inode.c @@ -244,7 +244,7 @@ void clear_inode(struct inode *inode) might_sleep(); invalidate_inode_buffers(inode); - BUG_ON(inode->i_data.nrpages); + BUG_ON(mapping_nrpages(&inode->i_data)); BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); wait_on_inode(inode); @@ -277,7 +277,7 @@ static void dispose_list(struct list_hea inode = list_first_entry(head, struct inode, i_list); list_del(&inode->i_list); - if (inode->i_data.nrpages) + if (mapping_nrpages(&inode->i_data)) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); @@ -369,7 +369,7 @@ static int can_unuse(struct inode *inode return 0; if (atomic_read(&inode->i_count)) return 0; - if (inode->i_data.nrpages) + if (mapping_nrpages(&inode->i_data)) return 0; return 1; } @@ -408,7 +408,7 @@ static void prune_icache(int nr_to_scan) list_move(&inode->i_list, &inode_unused); continue; } - if (inode_has_buffers(inode) || inode->i_data.nrpages) { + if (inode_has_buffers(inode) || mapping_nrpages(&inode->i_data)) { __iget(inode); spin_unlock(&inode_lock); if (remove_inode_buffers(inode)) @@ -1073,7 +1073,7 @@ static void generic_forget_inode(struct inode->i_state |= I_FREEING; inodes_stat.nr_inodes--; spin_unlock(&inode_lock); - if (inode->i_data.nrpages) + if (mapping_nrpages(&inode->i_data)) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); wake_up_inode(inode); Index: linux-rt-rebase.q/fs/jffs2/dir.c =================================================================== --- linux-rt-rebase.q.orig/fs/jffs2/dir.c +++ linux-rt-rebase.q/fs/jffs2/dir.c @@ -203,7 +203,7 @@ static int jffs2_create(struct inode *di inode->i_op = &jffs2_file_inode_operations; inode->i_fop = &jffs2_file_operations; inode->i_mapping->a_ops = &jffs2_file_address_operations; - inode->i_mapping->nrpages = 0; + mapping_nrpages_init(inode->i_mapping); f = JFFS2_INODE_INFO(inode); dir_f = JFFS2_INODE_INFO(dir_i); @@ -227,7 +227,7 @@ static int jffs2_create(struct inode *di d_instantiate(dentry, inode); D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", - inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages)); + inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, mapping_nrpages(inode->i_mapping))); return 0; fail: Index: linux-rt-rebase.q/fs/jffs2/fs.c =================================================================== --- linux-rt-rebase.q.orig/fs/jffs2/fs.c +++ linux-rt-rebase.q/fs/jffs2/fs.c @@ -291,7 +291,7 @@ void jffs2_read_inode (struct inode *ino inode->i_op = &jffs2_file_inode_operations; inode->i_fop = &jffs2_file_operations; inode->i_mapping->a_ops = &jffs2_file_address_operations; - inode->i_mapping->nrpages = 0; + mapping_nrpages_init(inode->i_mapping); break; case S_IFBLK: Index: linux-rt-rebase.q/fs/libfs.c =================================================================== --- linux-rt-rebase.q.orig/fs/libfs.c +++ linux-rt-rebase.q/fs/libfs.c @@ -16,7 +16,7 @@ int simple_getattr(struct vfsmount *mnt, { struct inode *inode = dentry->d_inode; generic_fillattr(inode, stat); - stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); + stat->blocks = mapping_nrpages(inode->i_mapping) << (PAGE_CACHE_SHIFT - 9); return 0; } Index: linux-rt-rebase.q/fs/nfs/inode.c =================================================================== --- linux-rt-rebase.q.orig/fs/nfs/inode.c +++ linux-rt-rebase.q/fs/nfs/inode.c @@ -97,7 +97,7 @@ int nfs_sync_mapping(struct address_spac { int ret; - if (mapping->nrpages == 0) + if (mapping_nrpages(mapping) == 0) return 0; unmap_mapping_range(mapping, 0, 0, 0); ret = filemap_write_and_wait(mapping); @@ -137,7 +137,7 @@ void nfs_zap_caches(struct inode *inode) void nfs_zap_mapping(struct inode *inode, struct address_space *mapping) { - if (mapping->nrpages != 0) { + if (mapping_nrpages(mapping) != 0) { spin_lock(&inode->i_lock); NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); @@ -684,7 +684,7 @@ static int nfs_invalidate_mapping_nolock { struct nfs_inode *nfsi = NFS_I(inode); - if (mapping->nrpages != 0) { + if (mapping_nrpages(mapping) != 0) { int ret = invalidate_inode_pages2(mapping); if (ret < 0) return ret; Index: linux-rt-rebase.q/fs/xfs/linux-2.6/xfs_vnode.h =================================================================== --- linux-rt-rebase.q.orig/fs/xfs/linux-2.6/xfs_vnode.h +++ linux-rt-rebase.q/fs/xfs/linux-2.6/xfs_vnode.h @@ -537,7 +537,7 @@ static inline void vn_atime_to_time_t(bh * Some useful predicates. */ #define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping) -#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages) +#define VN_CACHED(vp) mapping_nrpages(vn_to_inode(vp)->i_mapping) #define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ PAGECACHE_TAG_DIRTY) #define VN_TRUNC(vp) ((vp)->v_flag & VTRUNCATED) Index: linux-rt-rebase.q/include/linux/fs.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/fs.h +++ linux-rt-rebase.q/include/linux/fs.h @@ -447,7 +447,7 @@ struct address_space { struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ spinlock_t i_mmap_lock; /* protect tree, count, list */ unsigned int truncate_count; /* Cover race condition with truncate */ - unsigned long nrpages; /* number of total pages */ + unsigned long __nrpages; /* number of total pages */ pgoff_t writeback_index;/* writeback starts here */ const struct address_space_operations *a_ops; /* methods */ unsigned long flags; /* error bits/gfp mask */ @@ -462,6 +462,26 @@ struct address_space { * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */ +static inline void mapping_nrpages_init(struct address_space *mapping) +{ + mapping->__nrpages = 0; +} + +static inline unsigned long mapping_nrpages(struct address_space *mapping) +{ + return mapping->__nrpages; +} + +static inline void mapping_nrpages_inc(struct address_space *mapping) +{ + mapping->__nrpages++; +} + +static inline void mapping_nrpages_dec(struct address_space *mapping) +{ + mapping->__nrpages--; +} + struct block_device { dev_t bd_dev; /* not a kdev_t - it's a search key */ struct inode * bd_inode; /* will die */ Index: linux-rt-rebase.q/include/linux/swap.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/swap.h +++ linux-rt-rebase.q/include/linux/swap.h @@ -225,7 +225,7 @@ extern int end_swap_bio_read(struct bio /* linux/mm/swap_state.c */ extern struct address_space swapper_space; -#define total_swapcache_pages swapper_space.nrpages +#define total_swapcache_pages mapping_nrpages(&swapper_space) extern void show_swap_cache_info(void); extern int add_to_swap(struct page *, gfp_t); extern void __delete_from_swap_cache(struct page *); Index: linux-rt-rebase.q/ipc/shm.c =================================================================== --- linux-rt-rebase.q.orig/ipc/shm.c +++ linux-rt-rebase.q/ipc/shm.c @@ -566,11 +566,11 @@ static void shm_get_stat(struct ipc_name if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; - *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; + *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping_nrpages(mapping); } else { struct shmem_inode_info *info = SHMEM_I(inode); spin_lock(&info->lock); - *rss += inode->i_mapping->nrpages; + *rss += mapping_nrpages(inode->i_mapping); *swp += info->swapped; spin_unlock(&info->lock); } Index: linux-rt-rebase.q/mm/filemap.c =================================================================== --- linux-rt-rebase.q.orig/mm/filemap.c +++ linux-rt-rebase.q/mm/filemap.c @@ -118,7 +118,7 @@ void __remove_from_page_cache(struct pag radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; - mapping->nrpages--; + mapping_nrpages_dec(mapping); __dec_zone_page_state(page, NR_FILE_PAGES); BUG_ON(page_mapped(page)); } @@ -191,7 +191,7 @@ int __filemap_fdatawrite_range(struct ad int ret; struct writeback_control wbc = { .sync_mode = sync_mode, - .nr_to_write = mapping->nrpages * 2, + .nr_to_write = mapping_nrpages(mapping) * 2, .range_start = start, .range_end = end, }; @@ -373,7 +373,7 @@ int filemap_write_and_wait(struct addres { int err = 0; - if (mapping->nrpages) { + if (mapping_nrpages(mapping)) { err = filemap_fdatawrite(mapping); /* * Even if the above returned error, the pages may be @@ -407,7 +407,7 @@ int filemap_write_and_wait_range(struct { int err = 0; - if (mapping->nrpages) { + if (mapping_nrpages(mapping)) { err = __filemap_fdatawrite_range(mapping, lstart, lend, WB_SYNC_ALL); /* See comment of filemap_write_and_wait() */ @@ -449,7 +449,7 @@ int add_to_page_cache(struct page *page, SetPageLocked(page); page->mapping = mapping; page->index = offset; - mapping->nrpages++; + mapping_nrpages_inc(mapping); __inc_zone_page_state(page, NR_FILE_PAGES); } spin_unlock_irq(&mapping->tree_lock); @@ -2297,7 +2297,7 @@ generic_file_direct_IO(int rw, struct ki * about to write. We do this *before* the write so that we can return * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). */ - if (rw == WRITE && mapping->nrpages) { + if (rw == WRITE && mapping_nrpages(mapping)) { retval = invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end); if (retval) @@ -2315,7 +2315,7 @@ generic_file_direct_IO(int rw, struct ki * thing to do, so we don't support it 100%. If this invalidation * fails and we have -EIOCBQUEUED we ignore the failure. */ - if (rw == WRITE && mapping->nrpages) { + if (rw == WRITE && mapping_nrpages(mapping)) { int err = invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end); if (err && retval >= 0) Index: linux-rt-rebase.q/mm/shmem.c =================================================================== --- linux-rt-rebase.q.orig/mm/shmem.c +++ linux-rt-rebase.q/mm/shmem.c @@ -216,8 +216,8 @@ static void shmem_free_blocks(struct ino * We have to calculate the free blocks since the mm can drop * undirtied hole pages behind our back. * - * But normally info->alloced == inode->i_mapping->nrpages + info->swapped - * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) + * But normally info->alloced == mapping_nrpages(inode->i_mapping) + info->swapped + * So mm freed is info->alloced - (mapping_nrpages(inode->i_mapping) + info->swapped) * * It has to be called with the spinlock held. */ @@ -226,7 +226,7 @@ static void shmem_recalc_inode(struct in struct shmem_inode_info *info = SHMEM_I(inode); long freed; - freed = info->alloced - info->swapped - inode->i_mapping->nrpages; + freed = info->alloced - info->swapped - mapping_nrpages(inode->i_mapping); if (freed > 0) { info->alloced -= freed; shmem_unacct_blocks(info->flags, freed); @@ -672,7 +672,7 @@ static void shmem_truncate_range(struct done1: shmem_dir_unmap(dir); done2: - if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { + if (mapping_nrpages(inode->i_mapping) && (info->flags & SHMEM_PAGEIN)) { /* * Call truncate_inode_pages again: racing shmem_unuse_inode * may have swizzled a page in from swap since vmtruncate or Index: linux-rt-rebase.q/mm/swap_state.c =================================================================== --- linux-rt-rebase.q.orig/mm/swap_state.c +++ linux-rt-rebase.q/mm/swap_state.c @@ -87,7 +87,7 @@ static int __add_to_swap_cache(struct pa page_cache_get(page); SetPageSwapCache(page); set_page_private(page, entry.val); - total_swapcache_pages++; + mapping_nrpages_inc(&swapper_space); __inc_zone_page_state(page, NR_FILE_PAGES); } spin_unlock_irq(&swapper_space.tree_lock); @@ -133,7 +133,7 @@ void __delete_from_swap_cache(struct pag radix_tree_delete(&swapper_space.page_tree, page_private(page)); set_page_private(page, 0); ClearPageSwapCache(page); - total_swapcache_pages--; + mapping_nrpages_dec(&swapper_space); __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); } Index: linux-rt-rebase.q/mm/truncate.c =================================================================== --- linux-rt-rebase.q.orig/mm/truncate.c +++ linux-rt-rebase.q/mm/truncate.c @@ -164,7 +164,7 @@ void truncate_inode_pages_range(struct a pgoff_t next; int i; - if (mapping->nrpages == 0) + if (mapping_nrpages(mapping) == 0) return; BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); patches/highmem-redo-mainline.patch0000664000077200007720000000103310655544576016717 0ustar mingomingo--- mm/highmem.c | 8 ++++++++ 1 file changed, 8 insertions(+) Index: linux-rt-rebase.q/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/mm/highmem.c +++ linux-rt-rebase.q/mm/highmem.c @@ -214,6 +214,14 @@ static unsigned long pkmap_insert(struct return vaddr; } +/* + * Flush all unused kmap mappings in order to remove stray mappings. + */ +void kmap_flush_unused(void) +{ + WARN_ON_ONCE(1); +} + fastcall void *kmap_high(struct page *page) { unsigned long vaddr; patches/preempt-realtime-debug-sysctl.patch0000664000077200007720000001000010655544575020430 0ustar mingomingo--- drivers/char/sysrq.c | 18 ++++++++++++++- drivers/char/tty_io.c | 1 kernel/panic.c | 1 kernel/sysctl.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 77 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/drivers/char/sysrq.c =================================================================== --- linux-rt-rebase.q.orig/drivers/char/sysrq.c +++ linux-rt-rebase.q/drivers/char/sysrq.c @@ -208,6 +208,22 @@ static struct sysrq_key_op sysrq_showreg .enable_mask = SYSRQ_ENABLE_DUMP, }; +#if defined(__i386__) + +static void sysrq_handle_showallregs(int key, struct tty_struct *tty) +{ + nmi_show_all_regs(); +} + +static struct sysrq_key_op sysrq_showallregs_op = { + .handler = sysrq_handle_showallregs, + .help_msg = "showalLcpupc", + .action_msg = "Show Regs On All CPUs", +}; +#else +#define sysrq_showallregs_op (*(struct sysrq_key_op *)0) +#endif + static void sysrq_handle_showstate(int key, struct tty_struct *tty) { show_state(); @@ -340,7 +356,7 @@ static struct sysrq_key_op *sysrq_key_ta &sysrq_kill_op, /* i */ NULL, /* j */ &sysrq_SAK_op, /* k */ - NULL, /* l */ + &sysrq_showallregs_op, /* l */ &sysrq_showmem_op, /* m */ &sysrq_unrt_op, /* n */ /* o: This will often be registered as 'Off' at init time */ Index: linux-rt-rebase.q/drivers/char/tty_io.c =================================================================== --- linux-rt-rebase.q.orig/drivers/char/tty_io.c +++ linux-rt-rebase.q/drivers/char/tty_io.c @@ -257,6 +257,7 @@ static int check_tty_count(struct tty_st printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) " "!= #fd's(%d) in %s\n", tty->name, tty->count, count, routine); + dump_stack(); return count; } #endif Index: linux-rt-rebase.q/kernel/panic.c =================================================================== --- linux-rt-rebase.q.orig/kernel/panic.c +++ linux-rt-rebase.q/kernel/panic.c @@ -80,6 +80,7 @@ NORET_TYPE void panic(const char * fmt, vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); + dump_stack(); bust_spinlocks(0); /* Index: linux-rt-rebase.q/kernel/sysctl.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sysctl.c +++ linux-rt-rebase.q/kernel/sysctl.c @@ -323,6 +323,54 @@ static ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = KERN_PANIC, + .procname = "prof_pid", + .data = &prof_pid, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#ifdef CONFIG_PREEMPT + { + .ctl_name = KERN_PANIC, + .procname = "kernel_preemption", + .data = &kernel_preemption, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_PREEMPT_VOLUNTARY + { + .ctl_name = KERN_PANIC, + .procname = "voluntary_preemption", + .data = &voluntary_preemption, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif +#if defined(CONFIG_PREEMPT_SOFTIRQS) && !defined(CONFIG_PREEMPT_RT) + { + .ctl_name = KERN_PANIC, + .procname = "softirq_preemption", + .data = &softirq_preemption, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif +#if defined(CONFIG_PREEMPT_HARDIRQS) && !defined(CONFIG_PREEMPT_RT) + { + .ctl_name = KERN_PANIC, + .procname = "hardirq_preemption", + .data = &hardirq_preemption, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif #ifdef CONFIG_WAKEUP_TIMING { .ctl_name = CTL_UNNUMBERED, @@ -457,6 +505,16 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif +#ifdef CONFIG_GENERIC_HARDIRQS + { + .ctl_name = KERN_PANIC, + .procname = "debug_direct_keyboard", + .data = &debug_direct_keyboard, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif { .ctl_name = KERN_CORE_USES_PID, .procname = "core_uses_pid", patches/preempt-irqs-hrtimer.patch0000664000077200007720000001036210655544573016662 0ustar mingomingo include/linux/hrtimer.h | 10 ++++++++++ kernel/hrtimer.c | 35 ++++++++++++++++++++++++++++++++++- kernel/itimer.c | 1 + kernel/posix-timers.c | 3 +++ 4 files changed, 48 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/include/linux/hrtimer.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/hrtimer.h +++ linux-rt-rebase.q/include/linux/hrtimer.h @@ -200,6 +200,9 @@ struct hrtimer_cpu_base { struct list_head cb_pending; unsigned long nr_events; #endif +#ifdef CONFIG_PREEMPT_SOFTIRQS + wait_queue_head_t wait; +#endif }; #ifdef CONFIG_HIGH_RES_TIMERS @@ -276,6 +279,13 @@ static inline int hrtimer_restart(struct return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); } +/* Softirq preemption could deadlock timer removal */ +#ifdef CONFIG_PREEMPT_SOFTIRQS + extern void hrtimer_wait_for_timer(const struct hrtimer *timer); +#else +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) +#endif + /* Query timers: */ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); Index: linux-rt-rebase.q/kernel/hrtimer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/hrtimer.c +++ linux-rt-rebase.q/kernel/hrtimer.c @@ -911,7 +911,7 @@ int hrtimer_cancel(struct hrtimer *timer if (ret >= 0) return ret; - cpu_relax(); + hrtimer_wait_for_timer(timer); } } EXPORT_SYMBOL_GPL(hrtimer_cancel); @@ -1022,6 +1022,32 @@ int hrtimer_get_res(const clockid_t whic } EXPORT_SYMBOL_GPL(hrtimer_get_res); +#ifdef CONFIG_PREEMPT_SOFTIRQS +# define wake_up_timer_waiters(b) wake_up(&(b)->wait) + +/** + * hrtimer_wait_for_timer - Wait for a running timer + * + * @timer: timer to wait for + * + * The function waits in case the timers callback function is + * currently executed on the waitqueue of the timer base. The + * waitqueue is woken up after the timer callback function has + * finished execution. + */ +void hrtimer_wait_for_timer(const struct hrtimer *timer) +{ + struct hrtimer_clock_base *base = timer->base; + + if (base && base->cpu_base) + wait_event(base->cpu_base->wait, + !(timer->state & HRTIMER_STATE_CALLBACK)); +} + +#else +# define wake_up_timer_waiters(b) do { } while (0) +#endif + #ifdef CONFIG_HIGH_RES_TIMERS /* @@ -1157,6 +1183,8 @@ static void run_hrtimer_softirq(struct s } } spin_unlock_irq(&cpu_base->lock); + + wake_up_timer_waiters(cpu_base); } #endif /* CONFIG_HIGH_RES_TIMERS */ @@ -1207,6 +1235,8 @@ static inline void run_hrtimer_queue(str } } spin_unlock_irq(&cpu_base->lock); + + wake_up_timer_waiters(cpu_base); } /* @@ -1382,6 +1412,9 @@ static void __devinit init_hrtimers_cpu( cpu_base->clock_base[i].cpu_base = cpu_base; hrtimer_init_hres(cpu_base); +#ifdef CONFIG_PREEMPT_SOFTIRQS + init_waitqueue_head(&cpu_base->wait); +#endif } #ifdef CONFIG_HOTPLUG_CPU Index: linux-rt-rebase.q/kernel/itimer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/itimer.c +++ linux-rt-rebase.q/kernel/itimer.c @@ -170,6 +170,7 @@ again: /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); + hrtimer_wait_for_timer(&tsk->signal->real_timer); goto again; } expires = timeval_to_ktime(value->it_value); Index: linux-rt-rebase.q/kernel/posix-timers.c =================================================================== --- linux-rt-rebase.q.orig/kernel/posix-timers.c +++ linux-rt-rebase.q/kernel/posix-timers.c @@ -805,6 +805,7 @@ retry: unlock_timer(timr, flag); if (error == TIMER_RETRY) { + hrtimer_wait_for_timer(&timr->it.real.timer); rtn = NULL; // We already got the old time... goto retry; } @@ -844,6 +845,7 @@ retry_delete: if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); + hrtimer_wait_for_timer(&timer->it.real.timer); goto retry_delete; } @@ -876,6 +878,7 @@ retry_delete: if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); + hrtimer_wait_for_timer(&timer->it.real.timer); goto retry_delete; } list_del(&timer->list); patches/random-driver-latency-fix.patch0000664000077200007720000000167610655544572017563 0ustar mingomingo drivers/char/random.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) Index: linux/drivers/char/random.c =================================================================== --- linux.orig/drivers/char/random.c +++ linux/drivers/char/random.c @@ -580,8 +580,11 @@ static void add_timer_randomness(struct preempt_disable(); /* if over the trickle threshold, use only 1 in 4096 samples */ if (input_pool.entropy_count > trickle_thresh && - (__get_cpu_var(trickle_count)++ & 0xfff)) - goto out; + (__get_cpu_var(trickle_count)++ & 0xfff)) { + preempt_enable(); + return; + } + preempt_enable(); sample.jiffies = jiffies; sample.cycles = get_cycles(); @@ -626,9 +629,6 @@ static void add_timer_randomness(struct if(input_pool.entropy_count >= random_read_wakeup_thresh) wake_up_interruptible(&random_read_wait); - -out: - preempt_enable(); } void add_input_randomness(unsigned int type, unsigned int code, patches/atl-flags-fix.patch0000664000077200007720000000173410655544577015227 0ustar mingomingoSubject: [patch] drivers/net/atl1/atl1_main.c: use spin_trylock_irqsave() From: Ingo Molnar use the simpler spin_trylock_irqsave() API to get the adapter lock. [ this is also a fix for -rt where adapter->lock is a sleeping lock. ] Signed-off-by: Ingo Molnar --- drivers/net/atl1/atl1_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) Index: linux-rt-rebase.q/drivers/net/atl1/atl1_main.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/atl1/atl1_main.c +++ linux-rt-rebase.q/drivers/net/atl1/atl1_main.c @@ -1704,10 +1704,8 @@ static int atl1_xmit_frame(struct sk_buf } } - local_irq_save(flags); - if (!spin_trylock(&adapter->lock)) { + if (!spin_trylock_irqsave(&adapter->lock, flags)) { /* Can't get lock - tell upper layer to requeue */ - local_irq_restore(flags); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); return NETDEV_TX_LOCKED; } patches/ppc-mcount-dummy-functions.patch0000664000077200007720000000245310655544572020007 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Mon May 14 17:16:37 2007 Date: Mon, 14 May 2007 17:16:37 +0900 From: Tsutomu OWA To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [patch 2/5] powerpc 2.6.21-rt1: dummy functions and export _mcount to compile add dummy functions save_stack_trace(), early_printk() for now and export _mcount to compile. Signed-off-by: Tsutomu OWA -- owa --- arch/powerpc/kernel/setup_64.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) Index: linux/arch/powerpc/kernel/setup_64.c =================================================================== --- linux.orig/arch/powerpc/kernel/setup_64.c +++ linux/arch/powerpc/kernel/setup_64.c @@ -605,3 +605,22 @@ struct ppc_pci_io ppc_pci_io; EXPORT_SYMBOL(ppc_pci_io); #endif /* CONFIG_PPC_INDIRECT_IO */ +#ifdef CONFIG_STACKTRACE +#include +void notrace save_stack_trace(struct stack_trace *trace, + struct task_struct *task) +{ +} +#endif /* CONFIG_STACKTRACE */ + +#ifdef CONFIG_EARLY_PRINTK +void notrace early_printk(const char *fmt, ...) +{ + BUG(); +} +#endif /* CONFIG_EARLY_PRINTK */ + +#ifdef CONFIG_MCOUNT +extern void _mcount(void); +EXPORT_SYMBOL(_mcount); +#endif /* CONFIG_MCOUNT */ patches/preempt-realtime-ipc.patch0000664000077200007720000000575610655544575016624 0ustar mingomingo--- ipc/mqueue.c | 5 +++++ ipc/msg.c | 25 +++++++++++++++++++------ ipc/sem.c | 6 ++++++ 3 files changed, 30 insertions(+), 6 deletions(-) Index: linux-rt-rebase.q/ipc/mqueue.c =================================================================== --- linux-rt-rebase.q.orig/ipc/mqueue.c +++ linux-rt-rebase.q/ipc/mqueue.c @@ -783,12 +783,17 @@ static inline void pipelined_send(struct struct msg_msg *message, struct ext_wait_queue *receiver) { + /* + * Keep them in one critical section for PREEMPT_RT: + */ + preempt_disable(); receiver->msg = message; list_del(&receiver->list); receiver->state = STATE_PENDING; wake_up_process(receiver->task); smp_wmb(); receiver->state = STATE_READY; + preempt_enable(); } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() Index: linux-rt-rebase.q/ipc/msg.c =================================================================== --- linux-rt-rebase.q.orig/ipc/msg.c +++ linux-rt-rebase.q/ipc/msg.c @@ -213,12 +213,19 @@ static void expunge_all(struct msg_queue while (tmp != &msq->q_receivers) { struct msg_receiver *msr; + /* + * Make sure that the wakeup doesnt preempt + * this CPU prematurely. (on PREEMPT_RT) + */ + preempt_disable(); + msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; msr->r_msg = NULL; - wake_up_process(msr->r_tsk); - smp_mb(); + wake_up_process(msr->r_tsk); /* serializes */ msr->r_msg = ERR_PTR(res); + + preempt_enable(); } } @@ -603,22 +610,28 @@ static inline int pipelined_send(struct !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { + /* + * Make sure that the wakeup doesnt preempt + * this CPU prematurely. (on PREEMPT_RT) + */ + preempt_disable(); + list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; - wake_up_process(msr->r_tsk); - smp_mb(); + wake_up_process(msr->r_tsk); /* serializes */ msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; msq->q_lrpid = msr->r_tsk->pid; msq->q_rtime = get_seconds(); - wake_up_process(msr->r_tsk); - smp_mb(); + wake_up_process(msr->r_tsk); /* serializes */ msr->r_msg = msg; + preempt_enable(); return 1; } + preempt_enable(); } } return 0; Index: linux-rt-rebase.q/ipc/sem.c =================================================================== --- linux-rt-rebase.q.orig/ipc/sem.c +++ linux-rt-rebase.q/ipc/sem.c @@ -412,6 +412,11 @@ static void update_queue (struct sem_arr if (error <= 0) { struct sem_queue *n; remove_from_queue(sma,q); + /* + * make sure that the wakeup doesnt preempt + * _this_ cpu prematurely. (on preempt_rt) + */ + preempt_disable(); q->status = IN_WAKEUP; /* * Continue scanning. The next operation @@ -434,6 +439,7 @@ static void update_queue (struct sem_arr */ smp_wmb(); q->status = error; + preempt_enable(); q = n; } else { q = q->next; patches/softlockup-cleanups.patch0000664000077200007720000000420710655544576016567 0ustar mingomingoSubject: softlockup watchdog: style cleanups From: Ingo Molnar kernel/softirq.c grew a few style uncleanlinesses in the past few months, clean that up. No functional changes: text data bss dec hex filename 1126 76 4 1206 4b6 softlockup.o.before 1129 76 4 1209 4b9 softlockup.o.after ( the 3 bytes .text increase is due to the "<1>" appended to one of the printk messages. ) Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) Index: linux-rt-rebase.q/kernel/softlockup.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softlockup.c +++ linux-rt-rebase.q/kernel/softlockup.c @@ -23,7 +23,7 @@ static DEFINE_PER_CPU(unsigned long, tou static DEFINE_PER_CPU(unsigned long, print_timestamp); static DEFINE_PER_CPU(struct task_struct *, watchdog_task); -static int did_panic = 0; +static int did_panic; static int softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) @@ -122,7 +122,7 @@ void softlockup_tick(void) /* * The watchdog thread - runs every second and touches the timestamp. */ -static int watchdog(void * __bind_cpu) +static int watchdog(void *__bind_cpu) { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; @@ -160,13 +160,13 @@ cpu_callback(struct notifier_block *nfb, BUG_ON(per_cpu(watchdog_task, hotcpu)); p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); if (IS_ERR(p)) { - printk("watchdog for %i failed\n", hotcpu); + printk(KERN_ERR "watchdog for %i failed\n", hotcpu); return NOTIFY_BAD; } - per_cpu(touch_timestamp, hotcpu) = 0; - per_cpu(watchdog_task, hotcpu) = p; + per_cpu(touch_timestamp, hotcpu) = 0; + per_cpu(watchdog_task, hotcpu) = p; kthread_bind(p, hotcpu); - break; + break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: wake_up_process(per_cpu(watchdog_task, hotcpu)); @@ -186,7 +186,7 @@ cpu_callback(struct notifier_block *nfb, kthread_stop(p); break; #endif /* CONFIG_HOTPLUG_CPU */ - } + } return NOTIFY_OK; } patches/preempt-irqs-ppc-fix-b5.patch0000664000077200007720000000316610655544573017066 0ustar mingomingo To fix the following boot time error by removing ack member added by the rt patch. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Processor 1 found. Brought up 2 CPUs ------------[ cut here ]------------ kernel BUG at arch/powerpc/platforms/cell/interrupt.c:86! pu 0x1: Vector: 700 (Program Check) at [c00000000fff3c80] pc: c000000000033f9c: .iic_eoi+0x58/0x64 lr: c00000000009add8: .handle_percpu_irq+0xd4/0xf4 sp: c00000000fff3f00 msr: 9000000000021032 current = 0xc000000000fee040 paca = 0xc000000000509e80 pid = 0, comm = swapper kernel BUG at arch/powerpc/platforms/cell/interrupt.c:86! enter ? for help [link register ] c00000000009add8 .handle_percpu_irq+0xd4/0xf4 [c00000000fff3f00] c00000000009ada8 .handle_percpu_irq+0xa4/0xf4 (unreliable) [c00000000fff3f90] c000000000023bb8 .call_handle_irq+0x1c/0x2c [c000000000ff7950] c00000000000c910 .do_IRQ+0xf8/0x1b8 [c000000000ff79f0] c000000000034f34 .cbe_system_reset_exception+0x74/0xb4 [c000000000ff7a70] c000000000022610 .system_reset_exception+0x40/0xe0 [c000000000ff7af0] c000000000003378 system_reset_common+0xf8/0x100 --- arch/powerpc/platforms/cell/interrupt.c | 1 - 1 file changed, 1 deletion(-) Index: linux-rt-rebase.q/arch/powerpc/platforms/cell/interrupt.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/cell/interrupt.c +++ linux-rt-rebase.q/arch/powerpc/platforms/cell/interrupt.c @@ -90,7 +90,6 @@ static struct irq_chip iic_chip = { .typename = " CELL-IIC ", .mask = iic_mask, .unmask = iic_unmask, - .ack = iic_eoi, .eoi = iic_eoi, }; patches/latency-tracing-ppc.patch0000664000077200007720000000225210655544571016423 0ustar mingomingo arch/powerpc/kernel/time.c | 1 + arch/ppc/boot/Makefile | 9 +++++++++ 2 files changed, 10 insertions(+) Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -922,6 +922,7 @@ void __init time_init(void) tb_ticks_per_jiffy = ppc_tb_freq / HZ; tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; + cpu_khz = ppc_tb_freq / 1000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); calc_cputime_factors(); Index: linux/arch/ppc/boot/Makefile =================================================================== --- linux.orig/arch/ppc/boot/Makefile +++ linux/arch/ppc/boot/Makefile @@ -14,6 +14,15 @@ # CFLAGS += -fno-builtin -D__BOOTER__ -Iarch/$(ARCH)/boot/include + +ifdef CONFIG_MCOUNT +# do not trace the boot loader +nullstring := +space := $(nullstring) # end of the line +pg_flag = $(nullstring) -pg # end of the line +CFLAGS := $(subst ${pg_flag},${space},${CFLAGS}) +endif + HOSTCFLAGS += -Iarch/$(ARCH)/boot/include BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd patches/acpi-move-timer-broadcast-and-pmtimer-access-before-c3-arbiter-shutdown.patch0000664000077200007720000000274310655544570030235 0ustar mingomingoFrom: Udo A. Steinberg The chipset doc for IHC4 tells us: 1.In general, software should not attempt any non-posted accesses during arbiter disable except to the ICH4's power management registers. This implies that interrupt handlers for any unmasked hardware interrupts and SMI/NMI should check ARB_DIS status before reading from ICH devices. So it's not a good idea to access ICH devices after arbiter shut down. Signed-off-by: Udo A. Steinberg Signed-off-by: Thomas Gleixner Cc: Len Brown Signed-off-by: Andrew Morton --- drivers/acpi/processor_idle.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) Index: linux/drivers/acpi/processor_idle.c =================================================================== --- linux.orig/drivers/acpi/processor_idle.c +++ linux/drivers/acpi/processor_idle.c @@ -989,6 +989,12 @@ static int acpi_idle_enter_c3(struct cpu return 0; } + /* + * Must be done before busmaster disable as we might need to + * access HPET ! + */ + acpi_state_timer_broadcast(pr, cx, 1); + /* disable bus master */ if (pr->flags.bm_check) { spin_lock(&c3_lock); @@ -1008,7 +1014,6 @@ static int acpi_idle_enter_c3(struct cpu /* Get start time (ticks) */ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); - acpi_state_timer_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); patches/preempt-realtime-prevent-idle-boosting.patch0000664000077200007720000000342710655544575022262 0ustar mingomingoSubject: Premmpt-RT: Preevent boosting of idle task Idle task boosting is a nono in general. There is one exception, when NOHZ is active: The idle task calls get_next_timer_interrupt() and holds the timer wheel base->lock on the CPU and another CPU wants to access the timer (probably to cancel it). We can safely ignore the boosting request, as the idle CPU runs this code with interrupts disabled and will complete the lock protected section without being interrupted. So there is no real need to boost. Signed-off-by: Thomas Gleixner --- kernel/sched.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -4392,6 +4392,25 @@ void rt_mutex_setprio(struct task_struct BUG_ON(prio < 0 || prio > MAX_PRIO); rq = task_rq_lock(p, &flags); + + /* + * Idle task boosting is a nono in general. There is one + * exception, when NOHZ is active: + * + * The idle task calls get_next_timer_interrupt() and holds + * the timer wheel base->lock on the CPU and another CPU wants + * to access the timer (probably to cancel it). We can safely + * ignore the boosting request, as the idle CPU runs this code + * with interrupts disabled and will complete the lock + * protected section without being interrupted. So there is no + * real need to boost. + */ + if (unlikely(p == rq->idle)) { + WARN_ON(p != rq->curr); + WARN_ON(p->pi_blocked_on); + goto out_unlock; + } + now = rq_clock(rq); oldprio = p->prio; @@ -4425,6 +4444,7 @@ void rt_mutex_setprio(struct task_struct } trace_special(prev_resched, _need_resched(), 0); +out_unlock: task_rq_unlock(rq, &flags); } patches/lockstat-rt-hooks.patch0000664000077200007720000001237410655544576016162 0ustar mingomingo--- include/linux/lockdep.h | 28 ++++++++++++++++++++++++++++ kernel/rt.c | 25 ++++++++++++++++--------- kernel/rtmutex.c | 4 ++-- 3 files changed, 46 insertions(+), 11 deletions(-) Index: linux-rt-rebase.q/include/linux/lockdep.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/lockdep.h +++ linux-rt-rebase.q/include/linux/lockdep.h @@ -344,6 +344,28 @@ do { \ lock_acquired(&(_lock)->dep_map); \ } while (0) +#define LOCK_CONTENDED_RT(_lock, f_try, f_lock) \ +do { \ + if (!f_try(&(_lock)->lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + f_lock(&(_lock)->lock); \ + lock_acquired(&(_lock)->dep_map); \ + } \ +} while (0) + + +#define LOCK_CONTENDED_RT_RET(_lock, f_try, f_lock) \ +({ \ + int ret = 0; \ + if (!f_try(&(_lock)->lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + ret = f_lock(&(_lock)->lock); \ + if (!ret) \ + lock_acquired(&(_lock)->dep_map); \ + } \ + ret; \ +}) + #else /* CONFIG_LOCK_STAT */ #define lock_contended(lockdep_map, ip) do {} while (0) @@ -352,6 +374,12 @@ do { \ #define LOCK_CONTENDED(_lock, try, lock) \ lock(_lock) +#define LOCK_CONTENDED_RT(_lock, f_try, f_lock) \ + f_lock(&(_lock)->lock) + +#define LOCK_CONTENDED_RT_RET(_lock, f_try, f_lock) \ + f_lock(&(_lock)->lock) + #endif /* CONFIG_LOCK_STAT */ #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) Index: linux-rt-rebase.q/kernel/rt.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rt.c +++ linux-rt-rebase.q/kernel/rt.c @@ -98,16 +98,22 @@ EXPORT_SYMBOL(_mutex_init); void __lockfunc _mutex_lock(struct mutex *lock) { mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - rt_mutex_lock(&lock->lock); + LOCK_CONTENDED_RT(lock, rt_mutex_trylock, rt_mutex_lock); } EXPORT_SYMBOL(_mutex_lock); +static int __lockfunc __rt_mutex_lock_interruptible(struct rt_mutex *lock) +{ + return rt_mutex_lock_interruptible(lock, 0); +} + int __lockfunc _mutex_lock_interruptible(struct mutex *lock) { int ret; mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_lock_interruptible(&lock->lock, 0); + ret = LOCK_CONTENDED_RT_RET(lock, rt_mutex_trylock, + __rt_mutex_lock_interruptible); if (ret) mutex_release(&lock->dep_map, 1, _RET_IP_); return ret; @@ -118,7 +124,7 @@ EXPORT_SYMBOL(_mutex_lock_interruptible) void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) { mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - rt_mutex_lock(&lock->lock); + LOCK_CONTENDED_RT(lock, rt_mutex_trylock, rt_mutex_lock); } EXPORT_SYMBOL(_mutex_lock_nested); @@ -127,7 +133,8 @@ int __lockfunc _mutex_lock_interruptible int ret; mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - ret = rt_mutex_lock_interruptible(&lock->lock, 0); + ret = LOCK_CONTENDED_RT_RET(lock, rt_mutex_trylock, + __rt_mutex_lock_interruptible); if (ret) mutex_release(&lock->dep_map, 1, _RET_IP_); return ret; @@ -203,7 +210,7 @@ EXPORT_SYMBOL(rt_read_trylock); void __lockfunc rt_write_lock(rwlock_t *rwlock) { rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); - __rt_spin_lock(&rwlock->lock); + LOCK_CONTENDED_RT(rwlock, rt_mutex_trylock, __rt_spin_lock); } EXPORT_SYMBOL(rt_write_lock); @@ -223,7 +230,7 @@ void __lockfunc rt_read_lock(rwlock_t *r return; } spin_unlock_irqrestore(&lock->wait_lock, flags); - __rt_spin_lock(lock); + LOCK_CONTENDED_RT(rwlock, rt_mutex_trylock, __rt_spin_lock); } EXPORT_SYMBOL(rt_read_lock); @@ -359,14 +366,14 @@ EXPORT_SYMBOL(rt_down_write_trylock); void fastcall rt_down_write(struct rw_semaphore *rwsem) { rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); - rt_mutex_lock(&rwsem->lock); + LOCK_CONTENDED_RT(rwsem, rt_mutex_trylock, rt_mutex_lock); } EXPORT_SYMBOL(rt_down_write); void fastcall rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) { rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); - rt_mutex_lock(&rwsem->lock); + LOCK_CONTENDED_RT(rwsem, rt_mutex_trylock, rt_mutex_lock); } EXPORT_SYMBOL(rt_down_write_nested); @@ -411,7 +418,7 @@ static void __rt_down_read(struct rw_sem return; } spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); - rt_mutex_lock(&rwsem->lock); + LOCK_CONTENDED_RT(rwsem, rt_mutex_trylock, rt_mutex_lock); } void fastcall rt_down_read(struct rw_semaphore *rwsem) Index: linux-rt-rebase.q/kernel/rtmutex.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rtmutex.c +++ linux-rt-rebase.q/kernel/rtmutex.c @@ -785,8 +785,8 @@ rt_spin_lock_slowunlock(struct rt_mutex void __lockfunc rt_spin_lock(spinlock_t *lock) { - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_RT(lock, rt_mutex_trylock, __rt_spin_lock); } EXPORT_SYMBOL(rt_spin_lock); @@ -800,8 +800,8 @@ EXPORT_SYMBOL(__rt_spin_lock); void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) { - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + LOCK_CONTENDED_RT(lock, rt_mutex_trylock, __rt_spin_lock); } EXPORT_SYMBOL(rt_spin_lock_nested); patches/preempt-realtime-fs-block.patch0000664000077200007720000003123010655544575017533 0ustar mingomingo--- block/ll_rw_blk.c | 6 ++-- fs/aio.c | 6 +++- fs/block_dev.c | 34 +++++++++++++++++++++------ fs/dcache.c | 5 ++-- fs/dnotify.c | 2 - fs/exec.c | 10 ++++++-- fs/file.c | 5 ++-- fs/lockd/svc.c | 8 +----- fs/pipe.c | 12 +++++++++ fs/proc/proc_misc.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++ fs/proc/task_mmu.c | 4 ++- fs/xfs/linux-2.6/mrlock.h | 4 +-- fs/xfs/xfs_mount.h | 2 - include/linux/genhd.h | 11 +++++++-- 14 files changed, 133 insertions(+), 32 deletions(-) Index: linux-rt-rebase.q/block/ll_rw_blk.c =================================================================== --- linux-rt-rebase.q.orig/block/ll_rw_blk.c +++ linux-rt-rebase.q/block/ll_rw_blk.c @@ -1544,7 +1544,7 @@ static int ll_merge_requests_fn(struct r */ void blk_plug_device(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); /* * don't plug a stopped queue, it must be paired with blk_start_queue() @@ -1567,7 +1567,7 @@ EXPORT_SYMBOL(blk_plug_device); */ int blk_remove_plug(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) return 0; @@ -1660,7 +1660,7 @@ static void blk_unplug_timeout(unsigned **/ void blk_start_queue(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); Index: linux-rt-rebase.q/fs/aio.c =================================================================== --- linux-rt-rebase.q.orig/fs/aio.c +++ linux-rt-rebase.q/fs/aio.c @@ -582,13 +582,15 @@ static void use_mm(struct mm_struct *mm) tsk->flags |= PF_BORROWED_MM; active_mm = tsk->active_mm; atomic_inc(&mm->mm_count); - tsk->mm = mm; - tsk->active_mm = mm; + local_irq_disable(); // FIXME /* * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise * it won't work. Update it accordingly if you change it here */ switch_mm(active_mm, mm, tsk); + tsk->mm = mm; + tsk->active_mm = mm; + local_irq_enable(); task_unlock(tsk); mmdrop(active_mm); Index: linux-rt-rebase.q/fs/block_dev.c =================================================================== --- linux-rt-rebase.q.orig/fs/block_dev.c +++ linux-rt-rebase.q/fs/block_dev.c @@ -1213,14 +1213,32 @@ static int __blkdev_get(struct block_dev * For now, block device ->open() routine must _not_ * examine anything in 'inode' argument except ->i_rdev. */ - struct file fake_file = {}; - struct dentry fake_dentry = {}; - fake_file.f_mode = mode; - fake_file.f_flags = flags; - fake_file.f_path.dentry = &fake_dentry; - fake_dentry.d_inode = bdev->bd_inode; - - return do_open(bdev, &fake_file, for_part); + struct file *fake_file; + struct dentry *fake_dentry; + int err = -ENOMEM; + + fake_file = kmalloc(sizeof(*fake_file), GFP_KERNEL); + if (!fake_file) + goto out; + memset(fake_file, 0, sizeof(*fake_file)); + + fake_dentry = kmalloc(sizeof(*fake_dentry), GFP_KERNEL); + if (!fake_dentry) + goto out_free_file; + memset(fake_dentry, 0, sizeof(*fake_dentry)); + + fake_file->f_mode = mode; + fake_file->f_flags = flags; + fake_file->f_path.dentry = fake_dentry; + fake_dentry->d_inode = bdev->bd_inode; + + err = do_open(bdev, fake_file, for_part); + + kfree(fake_dentry); +out_free_file: + kfree(fake_file); +out: + return err; } int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) Index: linux-rt-rebase.q/fs/dcache.c =================================================================== --- linux-rt-rebase.q.orig/fs/dcache.c +++ linux-rt-rebase.q/fs/dcache.c @@ -708,8 +708,9 @@ void shrink_dcache_for_umount(struct sup { struct dentry *dentry; - if (down_read_trylock(&sb->s_umount)) - BUG(); +// -rt: this might succeed there ... +// if (down_read_trylock(&sb->s_umount)) +// BUG(); dentry = sb->s_root; sb->s_root = NULL; Index: linux-rt-rebase.q/fs/dnotify.c =================================================================== --- linux-rt-rebase.q.orig/fs/dnotify.c +++ linux-rt-rebase.q/fs/dnotify.c @@ -162,7 +162,7 @@ void dnotify_parent(struct dentry *dentr spin_lock(&dentry->d_lock); parent = dentry->d_parent; - if (parent->d_inode->i_dnotify_mask & event) { + if (unlikely(parent->d_inode->i_dnotify_mask & event)) { dget(parent); spin_unlock(&dentry->d_lock); __inode_dir_notify(parent->d_inode, event); Index: linux-rt-rebase.q/fs/exec.c =================================================================== --- linux-rt-rebase.q.orig/fs/exec.c +++ linux-rt-rebase.q/fs/exec.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -749,11 +750,16 @@ static int exec_mmap(struct mm_struct *m } } task_lock(tsk); + + local_irq_disable(); active_mm = tsk->active_mm; + activate_mm(active_mm, mm); tsk->mm = mm; tsk->active_mm = mm; - activate_mm(active_mm, mm); + local_irq_enable(); + task_unlock(tsk); + arch_pick_mmap_layout(mm); if (old_mm) { up_read(&old_mm->mmap_sem); @@ -875,7 +881,7 @@ static int de_thread(struct task_struct */ leader = tsk->group_leader; while (leader->exit_state != EXIT_ZOMBIE) - yield(); + msleep(1); /* * The only record we have of the real-time age of a Index: linux-rt-rebase.q/fs/file.c =================================================================== --- linux-rt-rebase.q.orig/fs/file.c +++ linux-rt-rebase.q/fs/file.c @@ -96,14 +96,15 @@ void free_fdtable_rcu(struct rcu_head *r kfree(fdt->open_fds); kfree(fdt); } else { - fddef = &get_cpu_var(fdtable_defer_list); + + fddef = &per_cpu(fdtable_defer_list, raw_smp_processor_id()); + spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; /* vmallocs are handled from the workqueue context */ schedule_work(&fddef->wq); spin_unlock(&fddef->lock); - put_cpu_var(fdtable_defer_list); } } Index: linux-rt-rebase.q/fs/lockd/svc.c =================================================================== --- linux-rt-rebase.q.orig/fs/lockd/svc.c +++ linux-rt-rebase.q/fs/lockd/svc.c @@ -349,16 +349,12 @@ lockd_down(void) * Wait for the lockd process to exit, but since we're holding * the lockd semaphore, we can't wait around forever ... */ - clear_thread_flag(TIF_SIGPENDING); - interruptible_sleep_on_timeout(&lockd_exit, HZ); - if (nlmsvc_pid) { + if (wait_event_interruptible_timeout(lockd_exit, + nlmsvc_pid == 0, HZ) <= 0) { printk(KERN_WARNING "lockd_down: lockd failed to exit, clearing pid\n"); nlmsvc_pid = 0; } - spin_lock_irq(¤t->sighand->siglock); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); out: mutex_unlock(&nlmsvc_mutex); } Index: linux-rt-rebase.q/fs/pipe.c =================================================================== --- linux-rt-rebase.q.orig/fs/pipe.c +++ linux-rt-rebase.q/fs/pipe.c @@ -386,8 +386,14 @@ redo: wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } + /* + * Hack: we turn off atime updates for -RT kernels. + * Who uses them on pipes anyway? + */ +#ifndef CONFIG_PREEMPT_RT if (ret > 0) file_accessed(filp); +#endif return ret; } @@ -559,8 +565,14 @@ out: wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } + /* + * Hack: we turn off atime updates for -RT kernels. + * Who uses them on pipes anyway? + */ +#ifndef CONFIG_PREEMPT_RT if (ret > 0) file_update_time(filp); +#endif return ret; } Index: linux-rt-rebase.q/fs/proc/proc_misc.c =================================================================== --- linux-rt-rebase.q.orig/fs/proc/proc_misc.c +++ linux-rt-rebase.q/fs/proc/proc_misc.c @@ -96,6 +96,27 @@ static int loadavg_read_proc(char *page, return proc_calc_metrics(page, start, off, count, eof, len); } +#ifdef CONFIG_PREEMPT_RT +static int loadavg_rt_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + extern unsigned long avenrun_rt[]; + extern unsigned long rt_nr_running(void); + int a, b, c; + int len; + + a = avenrun_rt[0] + (FIXED_1/200); + b = avenrun_rt[1] + (FIXED_1/200); + c = avenrun_rt[2] + (FIXED_1/200); + len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n", + LOAD_INT(a), LOAD_FRAC(a), + LOAD_INT(b), LOAD_FRAC(b), + LOAD_INT(c), LOAD_FRAC(c), + rt_nr_running(), nr_threads, current->nsproxy->pid_ns->last_pid); + return proc_calc_metrics(page, start, off, count, eof, len); +} +#endif + static int uptime_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -539,6 +560,38 @@ static int show_stat(struct seq_file *p, nr_iowait()); kfree(per_irq_sum); +#ifdef CONFIG_PREEMPT_RT + { + unsigned long nr_uninterruptible_cpu(int cpu); + extern int pi_initialized; + unsigned long rt_nr_running(void); + unsigned long rt_nr_running_cpu(int cpu); + unsigned long rt_nr_uninterruptible(void); + unsigned long rt_nr_uninterruptible_cpu(int cpu); + + int i; + + seq_printf(p, "pi_init: %d\n", pi_initialized); + seq_printf(p, "nr_running(): %ld\n", + nr_running()); + seq_printf(p, "nr_uninterruptible(): %ld\n", + nr_uninterruptible()); + for_each_online_cpu(i) + seq_printf(p, "nr_uninterruptible(%d): %ld\n", + i, nr_uninterruptible_cpu(i)); + seq_printf(p, "rt_nr_running(): %ld\n", + rt_nr_running()); + for_each_online_cpu(i) + seq_printf(p, "rt_nr_running(%d): %ld\n", + i, rt_nr_running_cpu(i)); + seq_printf(p, "nr_rt_uninterruptible(): %ld\n", + rt_nr_uninterruptible()); + for_each_online_cpu(i) + seq_printf(p, "nr_rt_uninterruptible(%d): %ld\n", + i, rt_nr_uninterruptible_cpu(i)); + } +#endif + return 0; } @@ -700,6 +753,9 @@ void __init proc_misc_init(void) int (*read_proc)(char*,char**,off_t,int,int*,void*); } *p, simple_ones[] = { {"loadavg", loadavg_read_proc}, +#ifdef CONFIG_PREEMPT_RT + {"loadavgrt", loadavg_rt_read_proc}, +#endif {"uptime", uptime_read_proc}, {"meminfo", meminfo_read_proc}, {"version", version_read_proc}, Index: linux-rt-rebase.q/fs/proc/task_mmu.c =================================================================== --- linux-rt-rebase.q.orig/fs/proc/task_mmu.c +++ linux-rt-rebase.q/fs/proc/task_mmu.c @@ -417,8 +417,10 @@ static void *m_start(struct seq_file *m, vma = NULL; if ((unsigned long)l < mm->map_count) { vma = mm->mmap; - while (l-- && vma) + while (l-- && vma) { vma = vma->vm_next; + cond_resched(); + } goto out; } Index: linux-rt-rebase.q/fs/xfs/linux-2.6/mrlock.h =================================================================== --- linux-rt-rebase.q.orig/fs/xfs/linux-2.6/mrlock.h +++ linux-rt-rebase.q/fs/xfs/linux-2.6/mrlock.h @@ -23,8 +23,8 @@ enum { MR_NONE, MR_ACCESS, MR_UPDATE }; typedef struct { - struct rw_semaphore mr_lock; - int mr_writer; + struct compat_rw_semaphore mr_lock; + int mr_writer; } mrlock_t; #define mrinit(mrp, name) \ Index: linux-rt-rebase.q/fs/xfs/xfs_mount.h =================================================================== --- linux-rt-rebase.q.orig/fs/xfs/xfs_mount.h +++ linux-rt-rebase.q/fs/xfs/xfs_mount.h @@ -377,7 +377,7 @@ typedef struct xfs_mount { uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */ struct xfs_perag *m_perag; /* per-ag accounting info */ - struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ + struct compat_rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ sema_t m_growlock; /* growfs mutex */ int m_fixedfsid[2]; /* unchanged for life of FS */ uint m_dmevmask; /* DMI events for this FS */ Index: linux-rt-rebase.q/include/linux/genhd.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/genhd.h +++ linux-rt-rebase.q/include/linux/genhd.h @@ -157,15 +157,22 @@ struct disk_attribute { * variants disable/enable preemption. */ #ifdef CONFIG_SMP -#define __disk_stat_add(gendiskp, field, addnd) \ - (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd) +#define __disk_stat_add(gendiskp, field, addnd) \ +do { \ + preempt_disable(); \ + (per_cpu_ptr(gendiskp->dkstats, \ + smp_processor_id())->field += addnd); \ + preempt_enable(); \ +} while (0) #define disk_stat_read(gendiskp, field) \ ({ \ typeof(gendiskp->dkstats->field) res = 0; \ int i; \ + preempt_disable(); \ for_each_possible_cpu(i) \ res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ + preempt_enable(); \ res; \ }) patches/ppc-gtod-support.patch0000664000077200007720000002433510655544572016015 0ustar mingomingoEarly pass on powerpc conversion to generic timekeeping. Signed-off-by: John Stultz arch/powerpc/Kconfig | 4 arch/powerpc/kernel/time.c | 273 +++++---------------------------------------- 2 files changed, 37 insertions(+), 240 deletions(-) linux-2.6.18-rc6_timeofday-arch-ppc_C6.patch ============================================ Index: linux/arch/powerpc/Kconfig =================================================================== --- linux.orig/arch/powerpc/Kconfig +++ linux/arch/powerpc/Kconfig @@ -21,6 +21,10 @@ config MMU bool default y +config GENERIC_TIME + bool + default y + config GENERIC_HARDIRQS bool default y Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -116,8 +116,6 @@ static u64 tb_to_ns_scale __read_mostly; static unsigned tb_to_ns_shift __read_mostly; static unsigned long boot_tb __read_mostly; -struct gettimeofday_struct do_gtod; - extern struct timezone sys_tz; static long timezone_offset; @@ -382,160 +380,6 @@ static __inline__ void timer_check_rtc(v } } -/* - * This version of gettimeofday has microsecond resolution. - */ -static inline void __do_gettimeofday(struct timeval *tv) -{ - unsigned long sec, usec; - u64 tb_ticks, xsec; - struct gettimeofday_vars *temp_varp; - u64 temp_tb_to_xs, temp_stamp_xsec; - - /* - * These calculations are faster (gets rid of divides) - * if done in units of 1/2^20 rather than microseconds. - * The conversion to microseconds at the end is done - * without a divide (and in fact, without a multiply) - */ - temp_varp = do_gtod.varp; - - /* Sampling the time base must be done after loading - * do_gtod.varp in order to avoid racing with update_gtod. - */ - data_barrier(temp_varp); - tb_ticks = get_tb() - temp_varp->tb_orig_stamp; - temp_tb_to_xs = temp_varp->tb_to_xs; - temp_stamp_xsec = temp_varp->stamp_xsec; - xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs); - sec = xsec / XSEC_PER_SEC; - usec = (unsigned long)xsec & (XSEC_PER_SEC - 1); - usec = SCALE_XSEC(usec, 1000000); - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -void do_gettimeofday(struct timeval *tv) -{ - if (__USE_RTC()) { - /* do this the old way */ - unsigned long flags, seq; - unsigned int sec, nsec, usec; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - sec = xtime.tv_sec; - nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy); - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - usec = nsec / 1000; - while (usec >= 1000000) { - usec -= 1000000; - ++sec; - } - tv->tv_sec = sec; - tv->tv_usec = usec; - return; - } - __do_gettimeofday(tv); -} - -EXPORT_SYMBOL(do_gettimeofday); - -/* - * There are two copies of tb_to_xs and stamp_xsec so that no - * lock is needed to access and use these values in - * do_gettimeofday. We alternate the copies and as long as a - * reasonable time elapses between changes, there will never - * be inconsistent values. ntpd has a minimum of one minute - * between updates. - */ -static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, - u64 new_tb_to_xs) -{ - unsigned temp_idx; - struct gettimeofday_vars *temp_varp; - - temp_idx = (do_gtod.var_idx == 0); - temp_varp = &do_gtod.vars[temp_idx]; - - temp_varp->tb_to_xs = new_tb_to_xs; - temp_varp->tb_orig_stamp = new_tb_stamp; - temp_varp->stamp_xsec = new_stamp_xsec; - smp_mb(); - do_gtod.varp = temp_varp; - do_gtod.var_idx = temp_idx; - - /* - * tb_update_count is used to allow the userspace gettimeofday code - * to assure itself that it sees a consistent view of the tb_to_xs and - * stamp_xsec variables. It reads the tb_update_count, then reads - * tb_to_xs and stamp_xsec and then reads tb_update_count again. If - * the two values of tb_update_count match and are even then the - * tb_to_xs and stamp_xsec values are consistent. If not, then it - * loops back and reads them again until this criteria is met. - * We expect the caller to have done the first increment of - * vdso_data->tb_update_count already. - */ - vdso_data->tb_orig_stamp = new_tb_stamp; - vdso_data->stamp_xsec = new_stamp_xsec; - vdso_data->tb_to_xs = new_tb_to_xs; - vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; - vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; - smp_wmb(); - ++(vdso_data->tb_update_count); -} - -/* - * When the timebase - tb_orig_stamp gets too big, we do a manipulation - * between tb_orig_stamp and stamp_xsec. The goal here is to keep the - * difference tb - tb_orig_stamp small enough to always fit inside a - * 32 bits number. This is a requirement of our fast 32 bits userland - * implementation in the vdso. If we "miss" a call to this function - * (interrupt latency, CPU locked in a spinlock, ...) and we end up - * with a too big difference, then the vdso will fallback to calling - * the syscall - */ -static __inline__ void timer_recalc_offset(u64 cur_tb) -{ - unsigned long offset; - u64 new_stamp_xsec; - u64 tlen, t2x; - u64 tb, xsec_old, xsec_new; - struct gettimeofday_vars *varp; - - if (__USE_RTC()) - return; - tlen = current_tick_length(); - offset = cur_tb - do_gtod.varp->tb_orig_stamp; - if (tlen == last_tick_len && offset < 0x80000000u) - return; - if (tlen != last_tick_len) { - t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs); - last_tick_len = tlen; - } else - t2x = do_gtod.varp->tb_to_xs; - new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; - do_div(new_stamp_xsec, 1000000000); - new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; - - ++vdso_data->tb_update_count; - smp_mb(); - - /* - * Make sure time doesn't go backwards for userspace gettimeofday. - */ - tb = get_tb(); - varp = do_gtod.varp; - xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs) - + varp->stamp_xsec; - xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec; - if (xsec_new < xsec_old) - new_stamp_xsec += xsec_old - xsec_new; - - update_gtod(cur_tb, new_stamp_xsec, t2x); -} - #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) { @@ -590,11 +434,7 @@ static int __init iSeries_tb_recal(void) tb_ticks_per_sec = new_tb_ticks_per_sec; calc_cputime_factors(); div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); - do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; tb_to_xs = divres.result_low; - do_gtod.varp->tb_to_xs = tb_to_xs; - vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->tb_to_xs = tb_to_xs; } else { printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" @@ -760,71 +600,6 @@ unsigned long long sched_clock(void) return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; } -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, new_sec = tv->tv_sec; - long wtm_nsec, new_nsec = tv->tv_nsec; - unsigned long flags; - u64 new_xsec; - unsigned long tb_delta; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irqsave(&xtime_lock, flags); - - /* - * Updating the RTC is not the job of this code. If the time is - * stepped under NTP, the RTC will be updated after STA_UNSYNC - * is cleared. Tools like clock/hwclock either copy the RTC - * to the system time, in which case there is no point in writing - * to the RTC again, or write to the RTC but then they don't call - * settimeofday to perform this operation. - */ - - /* Make userspace gettimeofday spin until we're done. */ - ++vdso_data->tb_update_count; - smp_mb(); - - /* - * Subtract off the number of nanoseconds since the - * beginning of the last tick. - */ - tb_delta = tb_ticks_since(tb_last_jiffy); - tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ - new_nsec -= SCALE_XSEC(tb_delta, 1000000000); - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); - - set_normalized_timespec(&xtime, new_sec, new_nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - /* In case of a large backwards jump in time with NTP, we want the - * clock to be updated as soon as the PLL is again in lock. - */ - last_rtc_update = new_sec - 658; - - ntp_clear(); - - new_xsec = xtime.tv_nsec; - if (new_xsec != 0) { - new_xsec *= XSEC_PER_SEC; - do_div(new_xsec, NSEC_PER_SEC); - } - new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC; - update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); - - vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; - vdso_data->tz_dsttime = sys_tz.tz_dsttime; - - write_sequnlock_irqrestore(&xtime_lock, flags); - clock_was_set(); - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); - static int __init get_freq(char *name, int cells, unsigned long *val) { struct device_node *cpu; @@ -992,20 +767,6 @@ void __init time_init(void) xtime.tv_sec = tm; xtime.tv_nsec = 0; - do_gtod.varp = &do_gtod.vars[0]; - do_gtod.var_idx = 0; - do_gtod.varp->tb_orig_stamp = tb_last_jiffy; - __get_cpu_var(last_jiffy) = tb_last_jiffy; - do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; - do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; - do_gtod.varp->tb_to_xs = tb_to_xs; - do_gtod.tb_to_us = tb_to_us; - - vdso_data->tb_orig_stamp = tb_last_jiffy; - vdso_data->tb_update_count = 0; - vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; - vdso_data->tb_to_xs = tb_to_xs; time_freq = 0; @@ -1018,7 +779,6 @@ void __init time_init(void) set_dec(tb_ticks_per_jiffy); } - #define FEBRUARY 2 #define STARTOFTIME 1970 #define SECDAY 86400L @@ -1163,3 +923,36 @@ void div128_by_32(u64 dividend_high, u64 dr->result_low = ((u64)y << 32) + z; } + + +/* powerpc clocksource code */ + +#include +static cycle_t timebase_read(void) +{ + return (cycle_t)get_tb(); +} + +struct clocksource clocksource_timebase = { + .name = "timebase", + .rating = 200, + .read = timebase_read, + .mask = (cycle_t)-1, + .mult = 0, + .shift = 22, +}; + + +/* XXX - this should be calculated or properly externed! */ +static int __init init_timebase_clocksource(void) +{ + if (__USE_RTC()) + return -ENODEV; + + clocksource_timebase.mult = clocksource_hz2mult(tb_ticks_per_sec, + clocksource_timebase.shift); + return clocksource_register(&clocksource_timebase); +} + +module_init(init_timebase_clocksource); + patches/jiffies-remove-unused-macros.patch0000664000077200007720000000264710655544570020263 0ustar mingomingoSubject: jiffies: remove unused macros From: Chris Wright The x86 hpet cleanups allow removal of some unused macros. Signed-off-by: Chris Wright Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/jiffies.h | 6 ------ 1 file changed, 6 deletions(-) Index: linux/include/linux/jiffies.h =================================================================== --- linux.orig/include/linux/jiffies.h +++ linux/include/linux/jiffies.h @@ -36,8 +36,6 @@ /* LATCH is used in the interval timer and ftape setup. */ #define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ -#define LATCH_HPET ((HPET_TICK_RATE + HZ/2) / HZ) - /* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can * improve accuracy by shifting LSH bits, hence calculating: * (NOM << LSH) / DEN @@ -53,13 +51,9 @@ /* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */ #define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8)) -#define ACTHZ_HPET (SH_DIV (HPET_TICK_RATE, LATCH_HPET, 8)) - /* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */ #define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8)) -#define TICK_NSEC_HPET (SH_DIV(1000000UL * 1000, ACTHZ_HPET, 8)) - /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) patches/rt-mutex-core.patch0000664000077200007720000052531310655544573015304 0ustar mingomingo--- drivers/input/ff-memless.c | 1 fs/proc/array.c | 28 + include/linux/bit_spinlock.h | 4 include/linux/init_task.h | 3 include/linux/mutex.h | 57 ++ include/linux/plist.h | 4 include/linux/rt_lock.h | 341 ++++++++++++++++ include/linux/rtmutex.h | 4 include/linux/rwsem-spinlock.h | 35 - include/linux/rwsem.h | 108 ++++- include/linux/sched.h | 76 ++- include/linux/semaphore.h | 50 ++ include/linux/seqlock.h | 195 ++++++++- include/linux/spinlock.h | 804 +++++++++++++++++++++++++++++--------- include/linux/spinlock_api_smp.h | 91 ++-- include/linux/spinlock_api_up.h | 74 ++- include/linux/spinlock_types.h | 61 ++ include/linux/spinlock_types_up.h | 6 include/linux/spinlock_up.h | 8 kernel/Makefile | 6 kernel/fork.c | 7 kernel/futex.c | 4 kernel/hrtimer.c | 4 kernel/lockdep.c | 2 kernel/rt.c | 571 ++++++++++++++++++++++++++ kernel/rtmutex-debug.c | 113 +---- kernel/rtmutex.c | 433 ++++++++++++++++++-- kernel/rwsem.c | 44 +- kernel/sched.c | 93 +++- kernel/spinlock.c | 269 ++++++++---- lib/dec_and_lock.c | 4 lib/kernel_lock.c | 4 lib/locking-selftest.c | 6 lib/plist.c | 2 lib/rwsem-spinlock.c | 29 - lib/rwsem.c | 6 lib/semaphore-sleepers.c | 16 lib/spinlock_debug.c | 64 +-- 38 files changed, 2941 insertions(+), 686 deletions(-) Index: linux-rt-rebase.q/drivers/input/ff-memless.c =================================================================== --- linux-rt-rebase.q.orig/drivers/input/ff-memless.c +++ linux-rt-rebase.q/drivers/input/ff-memless.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include Index: linux-rt-rebase.q/fs/proc/array.c =================================================================== --- linux-rt-rebase.q.orig/fs/proc/array.c +++ linux-rt-rebase.q/fs/proc/array.c @@ -130,17 +130,19 @@ static inline char *task_name(struct tas */ static const char *task_state_array[] = { "R (running)", /* 0 */ - "S (sleeping)", /* 1 */ - "D (disk sleep)", /* 2 */ - "T (stopped)", /* 4 */ - "T (tracing stop)", /* 8 */ - "Z (zombie)", /* 16 */ - "X (dead)" /* 32 */ + "M (running-mutex)", /* 1 */ + "S (sleeping)", /* 2 */ + "D (disk sleep)", /* 4 */ + "T (stopped)", /* 8 */ + "T (tracing stop)", /* 16 */ + "Z (zombie)", /* 32 */ + "X (dead)" /* 64 */ }; static inline const char *get_task_state(struct task_struct *tsk) { unsigned int state = (tsk->state & (TASK_RUNNING | + TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE | TASK_STOPPED | @@ -298,6 +300,19 @@ static inline char *task_context_switch_ p->nivcsw); } +#define get_blocked_on(t) (-1) + +static char *show_blocked_on(struct task_struct *task, char *buffer) +{ + pid_t pid = get_blocked_on(task); + + if (pid < 0) + return buffer; + + return buffer + sprintf(buffer,"BlckOn: %d\n",pid); +} + + int proc_pid_status(struct task_struct *task, char *buffer) { char *orig = buffer; @@ -317,6 +332,7 @@ int proc_pid_status(struct task_struct * buffer = task_show_regs(task, buffer); #endif buffer = task_context_switch_counts(task, buffer); + buffer = show_blocked_on(task,buffer); return buffer - orig; } Index: linux-rt-rebase.q/include/linux/bit_spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/bit_spinlock.h +++ linux-rt-rebase.q/include/linux/bit_spinlock.h @@ -1,6 +1,8 @@ #ifndef __LINUX_BIT_SPINLOCK_H #define __LINUX_BIT_SPINLOCK_H +#if 0 + /* * bit-based spin_lock() * @@ -73,5 +75,7 @@ static inline int bit_spin_is_locked(int #endif } +#endif + #endif /* __LINUX_BIT_SPINLOCK_H */ Index: linux-rt-rebase.q/include/linux/init_task.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/init_task.h +++ linux-rt-rebase.q/include/linux/init_task.h @@ -9,6 +9,7 @@ #include #include #include +#include #define INIT_FDTABLE \ { \ @@ -163,7 +164,7 @@ extern struct group_info init_groups; .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ - .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .pi_lock = RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ Index: linux-rt-rebase.q/include/linux/mutex.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/mutex.h +++ linux-rt-rebase.q/include/linux/mutex.h @@ -12,11 +12,66 @@ #include #include +#include #include #include #include +#ifdef CONFIG_PREEMPT_RT + +#include + +struct mutex { + struct rt_mutex lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ + { \ + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ + } + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void +_mutex_init(struct mutex *lock, char *name, struct lock_class_key *key); + +extern void __lockfunc _mutex_lock(struct mutex *lock); +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_trylock(struct mutex *lock); +extern void __lockfunc _mutex_unlock(struct mutex *lock); + +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) +#define mutex_lock(l) _mutex_lock(l) +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) +#define mutex_trylock(l) _mutex_trylock(l) +#define mutex_unlock(l) _mutex_unlock(l) +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible_nested(l, s) +#else +# define mutex_lock_nested(l, s) _mutex_lock(l) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible(l) +#endif + +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + _mutex_init((mutex), #mutex, &__key); \ +} while (0) + +#else /* * Simple, straightforward mutexes with strict semantics: * @@ -140,3 +195,5 @@ extern int fastcall mutex_trylock(struct extern void fastcall mutex_unlock(struct mutex *lock); #endif + +#endif Index: linux-rt-rebase.q/include/linux/plist.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/plist.h +++ linux-rt-rebase.q/include/linux/plist.h @@ -81,7 +81,7 @@ struct plist_head { struct list_head prio_list; struct list_head node_list; #ifdef CONFIG_DEBUG_PI_LIST - spinlock_t *lock; + raw_spinlock_t *lock; #endif }; @@ -125,7 +125,7 @@ struct plist_node { * @lock: list spinlock, remembered for debugging */ static inline void -plist_head_init(struct plist_head *head, spinlock_t *lock) +plist_head_init(struct plist_head *head, raw_spinlock_t *lock) { INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->node_list); Index: linux-rt-rebase.q/include/linux/rt_lock.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/linux/rt_lock.h @@ -0,0 +1,341 @@ +#ifndef __LINUX_RT_LOCK_H +#define __LINUX_RT_LOCK_H + +/* + * Real-Time Preemption Support + * + * started by Ingo Molnar: + * + * Copyright (C) 2004, 2005 Red Hat, Inc., Ingo Molnar + * + * This file contains the main data structure definitions. + */ +#include +#include +#include + +#ifdef CONFIG_PREEMPT_RT +/* + * spinlocks - an RT mutex plus lock-break field: + */ +typedef struct { + struct rt_mutex lock; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __SPIN_LOCK_UNLOCKED(name) \ + (spinlock_t) { { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) \ + , .save_state = 1, .file = __FILE__, .line = __LINE__ }, SPIN_DEP_MAP_INIT(name) } +#else +# define __SPIN_LOCK_UNLOCKED(name) \ + (spinlock_t) { { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) }, SPIN_DEP_MAP_INIT(name) } +#endif +# define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(spin_old_style) +#else /* !PREEMPT_RT */ + typedef raw_spinlock_t spinlock_t; +# ifdef CONFIG_DEBUG_SPINLOCK +# define _SPIN_LOCK_UNLOCKED \ + { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + .magic = SPINLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1 } +# else +# define _SPIN_LOCK_UNLOCKED \ + { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } +# endif +# define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED +# define __SPIN_LOCK_UNLOCKED(name) _SPIN_LOCK_UNLOCKED +#endif + +#define __DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) + +#ifdef CONFIG_PREEMPT_RT + +/* + * RW-semaphores are a spinlock plus a reader-depth count. + * + * Note that the semantics are different from the usual + * Linux rw-sems, in PREEMPT_RT mode we do not allow + * multiple readers to hold the lock at once, we only allow + * a read-lock owner to read-lock recursively. This is + * better for latency, makes the implementation inherently + * fair and makes it simpler as well: + */ +struct rw_semaphore { + struct rt_mutex lock; + int read_depth; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +/* + * rwlocks - an RW semaphore plus lock-break field: + */ +typedef struct { + struct rt_mutex lock; + int read_depth; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +# ifdef CONFIG_DEBUG_RT_MUTEXES +# define __RW_LOCK_UNLOCKED(name) (rwlock_t) \ + { .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name), \ + .save_state = 1, .file = __FILE__, .line = __LINE__ } } +# else +# define __RW_LOCK_UNLOCKED(name) (rwlock_t) \ + { .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) } } +# endif +#else /* !PREEMPT_RT */ + + typedef raw_rwlock_t rwlock_t; +# ifdef CONFIG_DEBUG_SPINLOCK +# define _RW_LOCK_UNLOCKED \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + .magic = RWLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1 } +# else +# define _RW_LOCK_UNLOCKED \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } +# endif +# define __RW_LOCK_UNLOCKED(name) _RW_LOCK_UNLOCKED +#endif + +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(rw_old_style) + +#define DEFINE_RWLOCK(name) \ + rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + +#ifdef CONFIG_PREEMPT_RT + +/* + * Semaphores - a spinlock plus the semaphore count: + */ +struct semaphore { + atomic_t count; + struct rt_mutex lock; +}; + +#define DECLARE_MUTEX(name) \ +struct semaphore name = \ + { .count = { 1 }, .lock = __RT_MUTEX_INITIALIZER(name.lock) } + +/* + * DECLARE_MUTEX_LOCKED() is deprecated: very hard to initialize properly + * and it also often signals abuse of semaphores. So we redirect it to + * compat semaphores: + */ +#define DECLARE_MUTEX_LOCKED COMPAT_DECLARE_MUTEX_LOCKED + +extern void fastcall +__sema_init(struct semaphore *sem, int val, char *name, char *file, int line); + +#define rt_sema_init(sem, val) \ + __sema_init(sem, val, #sem, __FILE__, __LINE__) + +extern void fastcall +__init_MUTEX(struct semaphore *sem, char *name, char *file, int line); +#define rt_init_MUTEX(sem) \ + __init_MUTEX(sem, #sem, __FILE__, __LINE__) + +extern void there_is_no_init_MUTEX_LOCKED_for_RT_semaphores(void); + +/* + * No locked initialization for RT semaphores + */ +#define rt_init_MUTEX_LOCKED(sem) \ + there_is_no_init_MUTEX_LOCKED_for_RT_semaphores() +extern void fastcall rt_down(struct semaphore *sem); +extern int fastcall rt_down_interruptible(struct semaphore *sem); +extern int fastcall rt_down_trylock(struct semaphore *sem); +extern void fastcall rt_up(struct semaphore *sem); + +#define rt_sem_is_locked(s) rt_mutex_is_locked(&(s)->lock) +#define rt_sema_count(s) atomic_read(&(s)->count) + +extern int __bad_func_type(void); + +#undef TYPE_EQUAL +#define TYPE_EQUAL(var, type) \ + __builtin_types_compatible_p(typeof(var), type *) + +#define PICK_FUNC_1ARG(type1, type2, func1, func2, arg) \ +do { \ + if (TYPE_EQUAL((arg), type1)) \ + func1((type1 *)(arg)); \ + else if (TYPE_EQUAL((arg), type2)) \ + func2((type2 *)(arg)); \ + else __bad_func_type(); \ +} while (0) + +#define PICK_FUNC_1ARG_RET(type1, type2, func1, func2, arg) \ +({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((arg), type1)) \ + __ret = func1((type1 *)(arg)); \ + else if (TYPE_EQUAL((arg), type2)) \ + __ret = func2((type2 *)(arg)); \ + else __ret = __bad_func_type(); \ + \ + __ret; \ +}) + +#define PICK_FUNC_2ARG(type1, type2, func1, func2, arg0, arg1) \ +do { \ + if (TYPE_EQUAL((arg0), type1)) \ + func1((type1 *)(arg0), arg1); \ + else if (TYPE_EQUAL((arg0), type2)) \ + func2((type2 *)(arg0), arg1); \ + else __bad_func_type(); \ +} while (0) + +#define sema_init(sem, val) \ + PICK_FUNC_2ARG(struct compat_semaphore, struct semaphore, \ + compat_sema_init, rt_sema_init, sem, val) + +#define init_MUTEX(sem) \ + PICK_FUNC_1ARG(struct compat_semaphore, struct semaphore, \ + compat_init_MUTEX, rt_init_MUTEX, sem) + +#define init_MUTEX_LOCKED(sem) \ + PICK_FUNC_1ARG(struct compat_semaphore, struct semaphore, \ + compat_init_MUTEX_LOCKED, rt_init_MUTEX_LOCKED, sem) + +#define down(sem) \ + PICK_FUNC_1ARG(struct compat_semaphore, struct semaphore, \ + compat_down, rt_down, sem) + +#define down_interruptible(sem) \ + PICK_FUNC_1ARG_RET(struct compat_semaphore, struct semaphore, \ + compat_down_interruptible, rt_down_interruptible, sem) + +#define down_trylock(sem) \ + PICK_FUNC_1ARG_RET(struct compat_semaphore, struct semaphore, \ + compat_down_trylock, rt_down_trylock, sem) + +#define up(sem) \ + PICK_FUNC_1ARG(struct compat_semaphore, struct semaphore, \ + compat_up, rt_up, sem) + +#define sem_is_locked(sem) \ + PICK_FUNC_1ARG_RET(struct compat_semaphore, struct semaphore, \ + compat_sem_is_locked, rt_sem_is_locked, sem) + +#define sema_count(sem) \ + PICK_FUNC_1ARG_RET(struct compat_semaphore, struct semaphore, \ + compat_sema_count, rt_sema_count, sem) + +/* + * rwsems: + */ + +#define __RWSEM_INITIALIZER(name) \ + { .lock = __RT_MUTEX_INITIALIZER(name.lock) } + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +extern void fastcall __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, + struct lock_class_key *key); + +# define rt_init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __rt_rwsem_init((sem), #sem, &__key); \ +} while (0) + +extern void fastcall rt_down_write(struct rw_semaphore *rwsem); +extern void fastcall +rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); +extern void fastcall +rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); +extern void fastcall rt_down_read(struct rw_semaphore *rwsem); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void fastcall rt_down_read_non_owner(struct rw_semaphore *rwsem); +#else +# define rt_down_read_non_owner(rwsem) rt_down_read(rwsem) +#endif +extern int fastcall rt_down_write_trylock(struct rw_semaphore *rwsem); +extern int fastcall rt_down_read_trylock(struct rw_semaphore *rwsem); +extern void fastcall rt_up_read(struct rw_semaphore *rwsem); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void fastcall rt_up_read_non_owner(struct rw_semaphore *rwsem); +#else +# define rt_up_read_non_owner(rwsem) rt_up_read(rwsem) +#endif +extern void fastcall rt_up_write(struct rw_semaphore *rwsem); +extern void fastcall rt_downgrade_write(struct rw_semaphore *rwsem); + +# define rt_rwsem_is_locked(rws) (rt_mutex_is_locked(&(rws)->lock)) + +#define init_rwsem(rwsem) \ + PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_init_rwsem, rt_init_rwsem, rwsem) + +#define down_read(rwsem) \ + PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_down_read, rt_down_read, rwsem) + +#define down_read_non_owner(rwsem) \ + PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_down_read_non_owner, rt_down_read_non_owner, rwsem) + +#define down_read_trylock(rwsem) \ + PICK_FUNC_1ARG_RET(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_down_read_trylock, rt_down_read_trylock, rwsem) + +#define down_write(rwsem) \ + PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_down_write, rt_down_write, rwsem) + +#define down_read_nested(rwsem, subclass) \ + PICK_FUNC_2ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_down_read_nested, rt_down_read_nested, rwsem, subclass) + + +#define down_write_nested(rwsem, subclass) \ + PICK_FUNC_2ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_down_write_nested, rt_down_write_nested, rwsem, subclass) + +#define down_write_trylock(rwsem) \ + PICK_FUNC_1ARG_RET(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_down_write_trylock, rt_down_write_trylock, rwsem) + +#define up_read(rwsem) \ + PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_up_read, rt_up_read, rwsem) + +#define up_read_non_owner(rwsem) \ + PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_up_read_non_owner, rt_up_read_non_owner, rwsem) + +#define up_write(rwsem) \ + PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_up_write, rt_up_write, rwsem) + +#define downgrade_write(rwsem) \ + PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_downgrade_write, rt_downgrade_write, rwsem) + +#define rwsem_is_locked(rwsem) \ + PICK_FUNC_1ARG_RET(struct compat_rw_semaphore, struct rw_semaphore, \ + compat_rwsem_is_locked, rt_rwsem_is_locked, rwsem) + +#endif /* CONFIG_PREEMPT_RT */ + +#endif + Index: linux-rt-rebase.q/include/linux/rtmutex.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/rtmutex.h +++ linux-rt-rebase.q/include/linux/rtmutex.h @@ -24,7 +24,7 @@ * @owner: the mutex owner */ struct rt_mutex { - spinlock_t wait_lock; + raw_spinlock_t wait_lock; struct plist_head wait_list; struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES @@ -63,7 +63,7 @@ struct hrtimer_sleeper; #endif #define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + { .wait_lock = RAW_SPIN_LOCK_UNLOCKED(mutexname) \ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ , .owner = NULL \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} Index: linux-rt-rebase.q/include/linux/rwsem-spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/rwsem-spinlock.h +++ linux-rt-rebase.q/include/linux/rwsem-spinlock.h @@ -28,7 +28,7 @@ struct rwsem_waiter; * - if activity is -1 then there is one active writer * - if wait_list is not empty, then there are processes waiting for the semaphore */ -struct rw_semaphore { +struct compat_rw_semaphore { __s32 activity; spinlock_t wait_lock; struct list_head wait_list; @@ -43,33 +43,32 @@ struct rw_semaphore { # define __RWSEM_DEP_MAP_INIT(lockname) #endif -#define __RWSEM_INITIALIZER(name) \ -{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } +#define __COMPAT_RWSEM_INITIALIZER(name) \ +{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define COMPAT_DECLARE_RWSEM(name) \ + struct compat_rw_semaphore name = __COMPAT_RWSEM_INITIALIZER(name) -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, +extern void __compat_init_rwsem(struct compat_rw_semaphore *sem, const char *name, struct lock_class_key *key); -#define init_rwsem(sem) \ +#define compat_init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __compat_init_rwsem((sem), #sem, &__key); \ } while (0) -extern void FASTCALL(__down_read(struct rw_semaphore *sem)); -extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem)); -extern void FASTCALL(__down_write(struct rw_semaphore *sem)); -extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass)); -extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem)); -extern void FASTCALL(__up_read(struct rw_semaphore *sem)); -extern void FASTCALL(__up_write(struct rw_semaphore *sem)); -extern void FASTCALL(__downgrade_write(struct rw_semaphore *sem)); +extern void FASTCALL(__down_read(struct compat_rw_semaphore *sem)); +extern int FASTCALL(__down_read_trylock(struct compat_rw_semaphore *sem)); +extern void FASTCALL(__down_write(struct compat_rw_semaphore *sem)); +extern void FASTCALL(__down_write_nested(struct compat_rw_semaphore *sem, int subclass)); +extern int FASTCALL(__down_write_trylock(struct compat_rw_semaphore *sem)); +extern void FASTCALL(__up_read(struct compat_rw_semaphore *sem)); +extern void FASTCALL(__up_write(struct compat_rw_semaphore *sem)); +extern void FASTCALL(__downgrade_write(struct compat_rw_semaphore *sem)); -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline int compat_rwsem_is_locked(struct compat_rw_semaphore *sem) { return (sem->activity != 0); } Index: linux-rt-rebase.q/include/linux/rwsem.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/rwsem.h +++ linux-rt-rebase.q/include/linux/rwsem.h @@ -9,6 +9,10 @@ #include +#ifdef CONFIG_PREEMPT_RT +# include +#endif + #ifdef __KERNEL__ #include @@ -16,48 +20,59 @@ #include #include -struct rw_semaphore; +#ifndef CONFIG_PREEMPT_RT +/* + * On !PREEMPT_RT all rw-semaphores are compat: + */ +#define compat_rw_semaphore rw_semaphore +#endif + +struct compat_rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include /* use a generic implementation */ +# include /* use a generic implementation */ +# ifndef CONFIG_PREEMPT_RT +# define __RWSEM_INITIALIZER __COMPAT_RWSEM_INITIALIZER +# define DECLARE_RWSEM COMPAT_DECLARE_RWSEM +# endif #else -#include /* use an arch-specific implementation */ +# include /* use an arch-specific implementation */ #endif /* * lock for reading */ -extern void down_read(struct rw_semaphore *sem); +extern void compat_down_read(struct compat_rw_semaphore *sem); /* * trylock for reading -- returns 1 if successful, 0 if contention */ -extern int down_read_trylock(struct rw_semaphore *sem); +extern int compat_down_read_trylock(struct compat_rw_semaphore *sem); /* * lock for writing */ -extern void down_write(struct rw_semaphore *sem); +extern void compat_down_write(struct compat_rw_semaphore *sem); /* * trylock for writing -- returns 1 if successful, 0 if contention */ -extern int down_write_trylock(struct rw_semaphore *sem); +extern int compat_down_write_trylock(struct compat_rw_semaphore *sem); /* * release a read lock */ -extern void up_read(struct rw_semaphore *sem); +extern void compat_up_read(struct compat_rw_semaphore *sem); /* * release a write lock */ -extern void up_write(struct rw_semaphore *sem); +extern void compat_up_write(struct compat_rw_semaphore *sem); /* * downgrade write lock to read lock */ -extern void downgrade_write(struct rw_semaphore *sem); +extern void compat_downgrade_write(struct compat_rw_semaphore *sem); #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -73,22 +88,79 @@ extern void downgrade_write(struct rw_se * lockdep_set_class() at lock initialization time. * See Documentation/lockdep-design.txt for more details.) */ -extern void down_read_nested(struct rw_semaphore *sem, int subclass); -extern void down_write_nested(struct rw_semaphore *sem, int subclass); +extern void +compat_down_read_nested(struct compat_rw_semaphore *sem, int subclass); +extern void +compat_down_write_nested(struct compat_rw_semaphore *sem, int subclass); /* * Take/release a lock when not the owner will release it. * * [ This API should be avoided as much as possible - the * proper abstraction for this case is completions. ] */ -extern void down_read_non_owner(struct rw_semaphore *sem); -extern void up_read_non_owner(struct rw_semaphore *sem); +extern void +compat_down_read_non_owner(struct compat_rw_semaphore *sem); +extern void +compat_up_read_non_owner(struct compat_rw_semaphore *sem); #else -# define down_read_nested(sem, subclass) down_read(sem) -# define down_write_nested(sem, subclass) down_write(sem) -# define down_read_non_owner(sem) down_read(sem) -# define up_read_non_owner(sem) up_read(sem) +# define compat_down_read_nested(sem, subclass) compat_down_read(sem) +# define compat_down_write_nested(sem, subclass) compat_down_write(sem) +# define compat_down_read_non_owner(sem) compat_down_read(sem) +# define compat_up_read_non_owner(sem) compat_up_read(sem) #endif +#ifndef CONFIG_PREEMPT_RT + +#define DECLARE_RWSEM COMPAT_DECLARE_RWSEM + +/* + * NOTE, lockdep: this has to be a macro, so that separate class-keys + * get generated by the compiler, if the same function does multiple + * init_rwsem() calls to different rwsems. + */ +#define init_rwsem(rwsem) compat_init_rwsem(rwsem) + +static inline void down_read(struct compat_rw_semaphore *rwsem) +{ + compat_down_read(rwsem); +} +static inline int down_read_trylock(struct compat_rw_semaphore *rwsem) +{ + return compat_down_read_trylock(rwsem); +} +static inline void down_write(struct compat_rw_semaphore *rwsem) +{ + compat_down_write(rwsem); +} +static inline int down_write_trylock(struct compat_rw_semaphore *rwsem) +{ + return compat_down_write_trylock(rwsem); +} +static inline void up_read(struct compat_rw_semaphore *rwsem) +{ + compat_up_read(rwsem); +} +static inline void up_write(struct compat_rw_semaphore *rwsem) +{ + compat_up_write(rwsem); +} +static inline void downgrade_write(struct compat_rw_semaphore *rwsem) +{ + compat_downgrade_write(rwsem); +} +static inline int rwsem_is_locked(struct compat_rw_semaphore *sem) +{ + return compat_rwsem_is_locked(sem); +} +# define down_read_nested(sem, subclass) \ + compat_down_read_nested(sem, subclass) +# define down_write_nested(sem, subclass) \ + compat_down_write_nested(sem, subclass) +# define down_read_non_owner(sem) \ + compat_down_read_non_owner(sem) +# define up_read_non_owner(sem) \ + compat_up_read_non_owner(sem) +#endif /* !CONFIG_PREEMPT_RT */ + #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_H */ Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -176,16 +176,17 @@ print_cfs_rq(struct seq_file *m, int cpu * mistake. */ #define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define TASK_STOPPED 4 -#define TASK_TRACED 8 +#define TASK_RUNNING_MUTEX 1 +#define TASK_INTERRUPTIBLE 2 +#define TASK_UNINTERRUPTIBLE 4 +#define TASK_STOPPED 8 +#define TASK_TRACED 16 /* in tsk->exit_state */ -#define EXIT_ZOMBIE 16 -#define EXIT_DEAD 32 +#define EXIT_ZOMBIE 32 +#define EXIT_DEAD 64 /* in tsk->state again */ -#define TASK_NONINTERACTIVE 64 -#define TASK_DEAD 128 +#define TASK_NONINTERACTIVE 128 +#define TASK_DEAD 256 #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) @@ -293,6 +294,10 @@ static inline void touch_all_softlockup_ } #endif +#ifdef CONFIG_PREEMPT_BKL +extern struct semaphore kernel_sem; +#endif + #if defined(CONFIG_PREEMPT_TRACE) || defined(CONFIG_EVENT_TRACE) extern void print_traces(struct task_struct *task); #else @@ -1221,7 +1226,7 @@ struct task_struct { spinlock_t alloc_lock; /* Protection of the PI data structures: */ - spinlock_t pi_lock; + raw_spinlock_t pi_lock; #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task */ @@ -1257,13 +1262,32 @@ struct task_struct { unsigned int lockdep_recursion; #endif -#define MAX_PREEMPT_TRACE 16 +#define MAX_PREEMPT_TRACE 25 #ifdef CONFIG_PREEMPT_TRACE unsigned long preempt_trace_eip[MAX_PREEMPT_TRACE]; unsigned long preempt_trace_parent_eip[MAX_PREEMPT_TRACE]; #endif +#define MAX_LOCK_STACK MAX_PREEMPT_TRACE +#ifdef CONFIG_DEBUG_PREEMPT + int lock_count; +# ifdef CONFIG_PREEMPT_RT + struct rt_mutex *owned_lock[MAX_LOCK_STACK]; +# endif +#endif +#ifdef CONFIG_DETECT_SOFTLOCKUP + unsigned long softlockup_count; /* Count to keep track how long the + * thread is in the kernel without + * sleeping. + */ +#endif + /* realtime bits */ + +#ifdef CONFIG_DEBUG_RT_MUTEXES + void *last_kernel_lock; +#endif + /* journalling filesystem info */ void *journal_info; @@ -1444,6 +1468,7 @@ static inline void put_task_struct(struc #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ +#define PF_NOSCHED 0x00000010 /* Userspace does not expect scheduling */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ @@ -1564,6 +1589,7 @@ extern struct task_struct *curr_task(int extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); +void __yield(void); /* * The default (Linux) execution domain. @@ -1610,6 +1636,9 @@ extern void do_timer(unsigned long ticks extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); extern int FASTCALL(wake_up_process(struct task_struct * tsk)); +extern int FASTCALL(wake_up_process_mutex(struct task_struct * tsk)); +extern int FASTCALL(wake_up_process_sync(struct task_struct * tsk)); +extern int FASTCALL(wake_up_process_mutex_sync(struct task_struct * tsk)); extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, unsigned long clone_flags)); #ifdef CONFIG_SMP @@ -1914,7 +1943,22 @@ static inline int need_resched_delayed(v * cond_resched_softirq() will enable bhs before scheduling. */ extern int cond_resched(void); -extern int cond_resched_lock(spinlock_t * lock); +extern int __cond_resched_raw_spinlock(raw_spinlock_t *lock); +extern int __cond_resched_spinlock(spinlock_t *spinlock); + +#define cond_resched_lock(lock) \ +({ \ + int __ret; \ + \ + if (TYPE_EQUAL((lock), raw_spinlock_t)) \ + __ret = __cond_resched_raw_spinlock((raw_spinlock_t *)lock);\ + else if (TYPE_EQUAL(lock, spinlock_t)) \ + __ret = __cond_resched_spinlock((spinlock_t *)lock); \ + else __ret = __bad_spinlock_type(); \ + \ + __ret; \ +}) + extern int cond_resched_softirq(void); extern int cond_resched_softirq_context(void); extern int cond_resched_hardirq_context(void); @@ -1923,12 +1967,18 @@ extern int cond_resched_hardirq_context( * Does a critical section need to be broken due to another * task waiting?: */ -#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) -# define need_lockbreak(lock) ((lock)->break_lock) +#if (defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)) || defined(CONFIG_PREEMPT_RT) +# define need_lockbreak(lock) ({ int __need = ((lock)->break_lock); if (__need) (lock)->break_lock = 0; __need; }) #else # define need_lockbreak(lock) 0 #endif +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) +# define need_lockbreak_raw(lock) ({ int __need = ((lock)->break_lock); if (__need) (lock)->break_lock = 0; __need; }) +#else +# define need_lockbreak_raw(lock) 0 +#endif + /* * Does a critical section need to be broken due to another * task waiting or preemption being signalled: Index: linux-rt-rebase.q/include/linux/semaphore.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/linux/semaphore.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SEMAPHORE_H +#define _LINUX_SEMAPHORE_H + +#ifdef CONFIG_PREEMPT_RT +# include +#else + +#define DECLARE_MUTEX COMPAT_DECLARE_MUTEX +#define DECLARE_MUTEX_LOCKED COMPAT_DECLARE_MUTEX_LOCKED + +static inline void sema_init(struct compat_semaphore *sem, int val) +{ + compat_sema_init(sem, val); +} +static inline void init_MUTEX(struct compat_semaphore *sem) +{ + compat_init_MUTEX(sem); +} +static inline void init_MUTEX_LOCKED(struct compat_semaphore *sem) +{ + compat_init_MUTEX_LOCKED(sem); +} +static inline void down(struct compat_semaphore *sem) +{ + compat_down(sem); +} +static inline int down_interruptible(struct compat_semaphore *sem) +{ + return compat_down_interruptible(sem); +} +static inline int down_trylock(struct compat_semaphore *sem) +{ + return compat_down_trylock(sem); +} +static inline void up(struct compat_semaphore *sem) +{ + compat_up(sem); +} +static inline int sem_is_locked(struct compat_semaphore *sem) +{ + return compat_sem_is_locked(sem); +} +static inline int sema_count(struct compat_semaphore *sem) +{ + return compat_sema_count(sem); +} + +#endif /* CONFIG_PREEMPT_RT */ + +#endif /* _LINUX_SEMAPHORE_H */ Index: linux-rt-rebase.q/include/linux/seqlock.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/seqlock.h +++ linux-rt-rebase.q/include/linux/seqlock.h @@ -32,46 +32,72 @@ typedef struct { unsigned sequence; spinlock_t lock; -} seqlock_t; +} __seqlock_t; + +typedef struct { + unsigned sequence; + raw_spinlock_t lock; +} __raw_seqlock_t; + +#define seqlock_need_resched(seq) lock_need_resched(&(seq)->lock) + +#ifdef CONFIG_PREEMPT_RT +typedef __seqlock_t seqlock_t; +#else +typedef __raw_seqlock_t seqlock_t; +#endif + +typedef __raw_seqlock_t raw_seqlock_t; /* * These macros triggered gcc-3.x compile-time problems. We think these are * OK now. Be cautious. */ -#define __SEQLOCK_UNLOCKED(lockname) \ - { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#define __RAW_SEQLOCK_UNLOCKED(lockname) \ + { 0, RAW_SPIN_LOCK_UNLOCKED(lockname) } + +#ifdef CONFIG_PREEMPT_RT +# define __SEQLOCK_UNLOCKED(lockname) { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#else +# define __SEQLOCK_UNLOCKED(lockname) __RAW_SEQLOCK_UNLOCKED(lockname) +#endif #define SEQLOCK_UNLOCKED \ __SEQLOCK_UNLOCKED(old_style_seqlock_init) -#define seqlock_init(x) \ - do { \ - (x)->sequence = 0; \ - spin_lock_init(&(x)->lock); \ - } while (0) +#define raw_seqlock_init(x) \ + do { *(x) = (raw_seqlock_t) __RAW_SEQLOCK_UNLOCKED(x); spin_lock_init(&(x)->lock); } while (0) + +#define seqlock_init(x) \ + do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); spin_lock_init(&(x)->lock); } while (0) #define DEFINE_SEQLOCK(x) \ seqlock_t x = __SEQLOCK_UNLOCKED(x) +#define DEFINE_RAW_SEQLOCK(name) \ + raw_seqlock_t name __cacheline_aligned_in_smp = \ + __RAW_SEQLOCK_UNLOCKED(name) + + /* Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. * Don't need preempt_disable() because that is in the spin_lock already. */ -static inline void write_seqlock(seqlock_t *sl) +static inline void __write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); ++sl->sequence; smp_wmb(); } -static inline void write_sequnlock(seqlock_t *sl) +static inline void __write_sequnlock(seqlock_t *sl) { smp_wmb(); sl->sequence++; spin_unlock(&sl->lock); } -static inline int write_tryseqlock(seqlock_t *sl) +static inline int __write_tryseqlock(seqlock_t *sl) { int ret = spin_trylock(&sl->lock); @@ -83,7 +109,7 @@ static inline int write_tryseqlock(seqlo } /* Start of read calculation -- fetch last complete writer token */ -static __always_inline unsigned read_seqbegin(const seqlock_t *sl) +static __always_inline unsigned __read_seqbegin(const seqlock_t *sl) { unsigned ret = sl->sequence; smp_rmb(); @@ -98,12 +124,118 @@ static __always_inline unsigned read_seq * * Using xor saves one conditional branch. */ -static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv) +static inline int __read_seqretry(seqlock_t *sl, unsigned iv) +{ + int ret; + + smp_rmb(); + ret = (iv & 1) | (sl->sequence ^ iv); + /* + * If invalid then serialize with the writer, to make sure we + * are not livelocking it: + */ + if (unlikely(ret)) { + unsigned long flags; + spin_lock_irqsave(&sl->lock, flags); + spin_unlock_irqrestore(&sl->lock, flags); + } + return ret; +} + +static __always_inline void __write_seqlock_raw(raw_seqlock_t *sl) +{ + spin_lock(&sl->lock); + ++sl->sequence; + smp_wmb(); +} + +static __always_inline void __write_sequnlock_raw(raw_seqlock_t *sl) +{ + smp_wmb(); + sl->sequence++; + spin_unlock(&sl->lock); +} + +static __always_inline int __write_tryseqlock_raw(raw_seqlock_t *sl) +{ + int ret = spin_trylock(&sl->lock); + + if (ret) { + ++sl->sequence; + smp_wmb(); + } + return ret; +} + +static __always_inline unsigned __read_seqbegin_raw(const raw_seqlock_t *sl) +{ + unsigned ret = sl->sequence; + smp_rmb(); + return ret; +} + +static __always_inline int __read_seqretry_raw(const raw_seqlock_t *sl, unsigned iv) { smp_rmb(); return (iv & 1) | (sl->sequence ^ iv); } +extern int __bad_seqlock_type(void); + +#define PICK_SEQOP(op, lock) \ +do { \ + if (TYPE_EQUAL((lock), raw_seqlock_t)) \ + op##_raw((raw_seqlock_t *)(lock)); \ + else if (TYPE_EQUAL((lock), seqlock_t)) \ + op((seqlock_t *)(lock)); \ + else __bad_seqlock_type(); \ +} while (0) + +#define PICK_SEQOP_RET(op, lock) \ +({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((lock), raw_seqlock_t)) \ + __ret = op##_raw((raw_seqlock_t *)(lock)); \ + else if (TYPE_EQUAL((lock), seqlock_t)) \ + __ret = op((seqlock_t *)(lock)); \ + else __ret = __bad_seqlock_type(); \ + \ + __ret; \ +}) + +#define PICK_SEQOP_CONST_RET(op, lock) \ +({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((lock), raw_seqlock_t)) \ + __ret = op##_raw((const raw_seqlock_t *)(lock));\ + else if (TYPE_EQUAL((lock), seqlock_t)) \ + __ret = op((seqlock_t *)(lock)); \ + else __ret = __bad_seqlock_type(); \ + \ + __ret; \ +}) + +#define PICK_SEQOP2_CONST_RET(op, lock, arg) \ + ({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((lock), raw_seqlock_t)) \ + __ret = op##_raw((const raw_seqlock_t *)(lock), (arg)); \ + else if (TYPE_EQUAL((lock), seqlock_t)) \ + __ret = op((seqlock_t *)(lock), (arg)); \ + else __ret = __bad_seqlock_type(); \ + \ + __ret; \ +}) + + +#define write_seqlock(sl) PICK_SEQOP(__write_seqlock, sl) +#define write_sequnlock(sl) PICK_SEQOP(__write_sequnlock, sl) +#define write_tryseqlock(sl) PICK_SEQOP_RET(__write_tryseqlock, sl) +#define read_seqbegin(sl) PICK_SEQOP_CONST_RET(__read_seqbegin, sl) +#define read_seqretry(sl, iv) PICK_SEQOP2_CONST_RET(__read_seqretry, sl, iv) /* * Version using sequence counter only. @@ -155,30 +287,51 @@ static inline void write_seqcount_end(se s->sequence++; } +#define PICK_IRQOP(op, lock) \ +do { \ + if (TYPE_EQUAL((lock), raw_seqlock_t)) \ + op(); \ + else if (TYPE_EQUAL((lock), seqlock_t)) \ + { /* nothing */ } \ + else __bad_seqlock_type(); \ +} while (0) + +#define PICK_IRQOP2(op, arg, lock) \ +do { \ + if (TYPE_EQUAL((lock), raw_seqlock_t)) \ + op(arg); \ + else if (TYPE_EQUAL(lock, seqlock_t)) \ + { /* nothing */ } \ + else __bad_seqlock_type(); \ +} while (0) + + + /* * Possible sw/hw IRQ protected versions of the interfaces. */ #define write_seqlock_irqsave(lock, flags) \ - do { local_irq_save(flags); write_seqlock(lock); } while (0) + do { PICK_IRQOP2(local_irq_save, flags, lock); write_seqlock(lock); } while (0) #define write_seqlock_irq(lock) \ - do { local_irq_disable(); write_seqlock(lock); } while (0) + do { PICK_IRQOP(local_irq_disable, lock); write_seqlock(lock); } while (0) #define write_seqlock_bh(lock) \ - do { local_bh_disable(); write_seqlock(lock); } while (0) + do { PICK_IRQOP(local_bh_disable, lock); write_seqlock(lock); } while (0) #define write_sequnlock_irqrestore(lock, flags) \ - do { write_sequnlock(lock); local_irq_restore(flags); } while(0) + do { write_sequnlock(lock); PICK_IRQOP2(local_irq_restore, flags, lock); preempt_check_resched(); } while(0) #define write_sequnlock_irq(lock) \ - do { write_sequnlock(lock); local_irq_enable(); } while(0) + do { write_sequnlock(lock); PICK_IRQOP(local_irq_enable, lock); preempt_check_resched(); } while(0) #define write_sequnlock_bh(lock) \ - do { write_sequnlock(lock); local_bh_enable(); } while(0) + do { write_sequnlock(lock); PICK_IRQOP(local_bh_enable, lock); } while(0) #define read_seqbegin_irqsave(lock, flags) \ - ({ local_irq_save(flags); read_seqbegin(lock); }) + ({ PICK_IRQOP2(local_irq_save, flags, lock); read_seqbegin(lock); }) #define read_seqretry_irqrestore(lock, iv, flags) \ ({ \ int ret = read_seqretry(lock, iv); \ - local_irq_restore(flags); \ + PICK_IRQOP2(local_irq_restore, flags, lock); \ + preempt_check_resched(); \ ret; \ }) Index: linux-rt-rebase.q/include/linux/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/spinlock.h +++ linux-rt-rebase.q/include/linux/spinlock.h @@ -44,6 +44,42 @@ * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. + * + * + * Public types and naming conventions: + * ------------------------------------ + * spinlock_t: type: sleep-lock + * raw_spinlock_t: type: spin-lock (debug) + * + * spin_lock([raw_]spinlock_t): API: acquire lock, both types + * + * + * Internal types and naming conventions: + * ------------------------------------- + * __raw_spinlock_t: type: lowlevel spin-lock + * + * _spin_lock(struct rt_mutex): API: acquire sleep-lock + * __spin_lock(raw_spinlock_t): API: acquire spin-lock (highlevel) + * _raw_spin_lock(raw_spinlock_t): API: acquire spin-lock (debug) + * __raw_spin_lock(__raw_spinlock_t): API: acquire spin-lock (lowlevel) + * + * + * spin_lock(raw_spinlock_t) translates into the following chain of + * calls/inlines/macros, if spin-lock debugging is enabled: + * + * spin_lock() [include/linux/spinlock.h] + * -> __spin_lock() [kernel/spinlock.c] + * -> _raw_spin_lock() [lib/spinlock_debug.c] + * -> __raw_spin_lock() [include/asm/spinlock.h] + * + * spin_lock(spinlock_t) translates into the following chain of + * calls/inlines/macros: + * + * spin_lock() [include/linux/spinlock.h] + * -> _spin_lock() [include/linux/spinlock.h] + * -> rt_spin_lock() [kernel/rtmutex.c] + * -> rt_spin_lock_fastlock() [kernel/rtmutex.c] + * -> rt_spin_lock_slowlock() [kernel/rtmutex.c] */ #include @@ -51,29 +87,14 @@ #include #include #include +#include #include #include +#include #include /* - * Must define these before including other files, inline functions need them - */ -#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME - -#define LOCK_SECTION_START(extra) \ - ".subsection 1\n\t" \ - extra \ - ".ifndef " LOCK_SECTION_NAME "\n\t" \ - LOCK_SECTION_NAME ":\n\t" \ - ".endif\n" - -#define LOCK_SECTION_END \ - ".previous\n\t" - -#define __lockfunc fastcall __attribute__((section(".spinlock.text"))) - -/* * Pull the raw_spinlock_t and raw_rwlock_t definitions: */ #include @@ -89,42 +110,10 @@ extern int __lockfunc generic__raw_read_ # include #endif -#ifdef CONFIG_DEBUG_SPINLOCK - extern void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key); -# define spin_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __spin_lock_init((lock), #lock, &__key); \ -} while (0) - -#else -# define spin_lock_init(lock) \ - do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK - extern void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key); -# define rwlock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __rwlock_init((lock), #lock, &__key); \ -} while (0) -#else -# define rwlock_init(lock) \ - do { *(lock) = RW_LOCK_UNLOCKED; } while (0) -#endif - -#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) - -/** - * spin_unlock_wait - wait until the spinlock gets unlocked - * @lock: the spinlock in question. +/* + * Pull the RT types: */ -#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) +#include /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -136,16 +125,16 @@ do { \ #endif #ifdef CONFIG_DEBUG_SPINLOCK - extern void _raw_spin_lock(spinlock_t *lock); -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) - extern int _raw_spin_trylock(spinlock_t *lock); - extern void _raw_spin_unlock(spinlock_t *lock); - extern void _raw_read_lock(rwlock_t *lock); - extern int _raw_read_trylock(rwlock_t *lock); - extern void _raw_read_unlock(rwlock_t *lock); - extern void _raw_write_lock(rwlock_t *lock); - extern int _raw_write_trylock(rwlock_t *lock); - extern void _raw_write_unlock(rwlock_t *lock); + extern __lockfunc void _raw_spin_lock(raw_spinlock_t *lock); +# define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) + extern __lockfunc int _raw_spin_trylock(raw_spinlock_t *lock); + extern __lockfunc void _raw_spin_unlock(raw_spinlock_t *lock); + extern __lockfunc void _raw_read_lock(raw_rwlock_t *lock); + extern __lockfunc int _raw_read_trylock(raw_rwlock_t *lock); + extern __lockfunc void _raw_read_unlock(raw_rwlock_t *lock); + extern __lockfunc void _raw_write_lock(raw_rwlock_t *lock); + extern __lockfunc int _raw_write_trylock(raw_rwlock_t *lock); + extern __lockfunc void _raw_write_unlock(raw_rwlock_t *lock); #else # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ @@ -160,148 +149,590 @@ do { \ # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) #endif -#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) -#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) +extern int __bad_spinlock_type(void); +extern int __bad_rwlock_type(void); + +extern void +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); + +extern void __lockfunc rt_spin_lock(spinlock_t *lock); +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void __lockfunc rt_spin_unlock(spinlock_t *lock); +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); +extern int __lockfunc +rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); +extern int __lockfunc rt_spin_trylock(spinlock_t *lock); +extern int _atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); + +/* + * lockdep-less calls, for derived types like rwlock: + * (for trylock they can use rt_mutex_trylock() directly. + */ +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); + +#ifdef CONFIG_PREEMPT_RT +# define _spin_lock(l) rt_spin_lock(l) +# define _spin_lock_nested(l, s) rt_spin_lock_nested(l, s) +# define _spin_lock_bh(l) rt_spin_lock(l) +# define _spin_lock_irq(l) rt_spin_lock(l) +# define _spin_unlock(l) rt_spin_unlock(l) +# define _spin_unlock_no_resched(l) rt_spin_unlock(l) +# define _spin_unlock_bh(l) rt_spin_unlock(l) +# define _spin_unlock_irq(l) rt_spin_unlock(l) +# define _spin_unlock_irqrestore(l, f) rt_spin_unlock(l) +static inline unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +{ + rt_spin_lock(lock); + return 0; +} +static inline unsigned long __lockfunc +_spin_lock_irqsave_nested(spinlock_t *lock, int subclass) +{ + rt_spin_lock_nested(lock, subclass); + return 0; +} +#else +static inline unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +{ + return 0; +} +static inline unsigned long __lockfunc +_spin_lock_irqsave_nested(spinlock_t *lock, int subclass) +{ + return 0; +} +# define _spin_lock(l) do { } while (0) +# define _spin_lock_nested(l, s) do { } while (0) +# define _spin_lock_bh(l) do { } while (0) +# define _spin_lock_irq(l) do { } while (0) +# define _spin_unlock(l) do { } while (0) +# define _spin_unlock_no_resched(l) do { } while (0) +# define _spin_unlock_bh(l) do { } while (0) +# define _spin_unlock_irq(l) do { } while (0) +# define _spin_unlock_irqrestore(l, f) do { } while (0) +#endif + +#define _spin_lock_init(sl, n, f, l) \ +do { \ + static struct lock_class_key __key; \ + \ + __rt_spin_lock_init(sl, n, &__key); \ +} while (0) + +# ifdef CONFIG_PREEMPT_RT +# define _spin_can_lock(l) (!rt_mutex_is_locked(&(l)->lock)) +# define _spin_is_locked(l) rt_mutex_is_locked(&(l)->lock) +# define _spin_unlock_wait(l) rt_spin_unlock_wait(l) + +# define _spin_trylock(l) rt_spin_trylock(l) +# define _spin_trylock_bh(l) rt_spin_trylock(l) +# define _spin_trylock_irq(l) rt_spin_trylock(l) +# define _spin_trylock_irqsave(l,f) rt_spin_trylock_irqsave(l, f) +# else + + extern int this_should_never_be_called_on_non_rt(spinlock_t *lock); +# define TSNBCONRT(l) this_should_never_be_called_on_non_rt(l) +# define _spin_can_lock(l) TSNBCONRT(l) +# define _spin_is_locked(l) TSNBCONRT(l) +# define _spin_unlock_wait(l) TSNBCONRT(l) + +# define _spin_trylock(l) TSNBCONRT(l) +# define _spin_trylock_bh(l) TSNBCONRT(l) +# define _spin_trylock_irq(l) TSNBCONRT(l) +# define _spin_trylock_irqsave(l,f) TSNBCONRT(l) +#endif + +#undef TYPE_EQUAL +#define TYPE_EQUAL(lock, type) \ + __builtin_types_compatible_p(typeof(lock), type *) + +#define PICK_OP(op, lock) \ +do { \ + if (TYPE_EQUAL((lock), raw_spinlock_t)) \ + __spin##op((raw_spinlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, spinlock_t)) \ + _spin##op((spinlock_t *)(lock)); \ + else __bad_spinlock_type(); \ +} while (0) + +#define PICK_OP_RET(op, lock...) \ +({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((lock), raw_spinlock_t)) \ + __ret = __spin##op((raw_spinlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, spinlock_t)) \ + __ret = _spin##op((spinlock_t *)(lock)); \ + else __ret = __bad_spinlock_type(); \ + \ + __ret; \ +}) + +#define PICK_OP2(op, lock, flags) \ +do { \ + if (TYPE_EQUAL((lock), raw_spinlock_t)) \ + __spin##op((raw_spinlock_t *)(lock), flags); \ + else if (TYPE_EQUAL(lock, spinlock_t)) \ + _spin##op((spinlock_t *)(lock), flags); \ + else __bad_spinlock_type(); \ +} while (0) + +#define PICK_OP2_RET(op, lock, flags) \ +({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((lock), raw_spinlock_t)) \ + __ret = __spin##op((raw_spinlock_t *)(lock), flags); \ + else if (TYPE_EQUAL(lock, spinlock_t)) \ + __ret = _spin##op((spinlock_t *)(lock), flags); \ + else __bad_spinlock_type(); \ + \ + __ret; \ +}) + +extern void __lockfunc rt_write_lock(rwlock_t *rwlock); +extern void __lockfunc rt_read_lock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); +extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); +extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); +extern void +__rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); + +#define _rwlock_init(rwl, n, f, l) \ +do { \ + static struct lock_class_key __key; \ + \ + __rt_rwlock_init(rwl, n, &__key); \ +} while (0) + +#ifdef CONFIG_PREEMPT_RT +# define rt_read_can_lock(rwl) (!rt_mutex_is_locked(&(rwl)->lock)) +# define rt_write_can_lock(rwl) (!rt_mutex_is_locked(&(rwl)->lock)) +#else + extern int rt_rwlock_can_lock_never_call_on_non_rt(rwlock_t *rwlock); +# define rt_read_can_lock(rwl) rt_rwlock_can_lock_never_call_on_non_rt(rwl) +# define rt_write_can_lock(rwl) rt_rwlock_can_lock_never_call_on_non_rt(rwl) +#endif + +# define _read_can_lock(rwl) rt_read_can_lock(rwl) +# define _write_can_lock(rwl) rt_write_can_lock(rwl) + +# define _read_trylock(rwl) rt_read_trylock(rwl) +# define _write_trylock(rwl) rt_write_trylock(rwl) +# define _write_trylock_irqsave(rwl, flags) \ + rt_write_trylock_irqsave(rwl, flags) + +# define _read_lock(rwl) rt_read_lock(rwl) +# define _write_lock(rwl) rt_write_lock(rwl) +# define _read_unlock(rwl) rt_read_unlock(rwl) +# define _write_unlock(rwl) rt_write_unlock(rwl) + +# define _read_lock_bh(rwl) rt_read_lock(rwl) +# define _write_lock_bh(rwl) rt_write_lock(rwl) +# define _read_unlock_bh(rwl) rt_read_unlock(rwl) +# define _write_unlock_bh(rwl) rt_write_unlock(rwl) + +# define _read_lock_irq(rwl) rt_read_lock(rwl) +# define _write_lock_irq(rwl) rt_write_lock(rwl) +# define _read_unlock_irq(rwl) rt_read_unlock(rwl) +# define _write_unlock_irq(rwl) rt_write_unlock(rwl) + +# define _read_lock_irqsave(rwl) rt_read_lock_irqsave(rwl) +# define _write_lock_irqsave(rwl) rt_write_lock_irqsave(rwl) + +# define _read_unlock_irqrestore(rwl, f) rt_read_unlock(rwl) +# define _write_unlock_irqrestore(rwl, f) rt_write_unlock(rwl) + +#define __PICK_RW_OP(optype, op, lock) \ +do { \ + if (TYPE_EQUAL((lock), raw_rwlock_t)) \ + __##optype##op((raw_rwlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, rwlock_t)) \ + ##op((rwlock_t *)(lock)); \ + else __bad_rwlock_type(); \ +} while (0) + +#define PICK_RW_OP(optype, op, lock) \ +do { \ + if (TYPE_EQUAL((lock), raw_rwlock_t)) \ + __##optype##op((raw_rwlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, rwlock_t)) \ + _##optype##op((rwlock_t *)(lock)); \ + else __bad_rwlock_type(); \ +} while (0) + +#define __PICK_RW_OP_RET(optype, op, lock...) \ +({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((lock), raw_rwlock_t)) \ + __ret = __##optype##op((raw_rwlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, rwlock_t)) \ + __ret = _##optype##op((rwlock_t *)(lock)); \ + else __ret = __bad_rwlock_type(); \ + \ + __ret; \ +}) + +#define PICK_RW_OP_RET(optype, op, lock...) \ +({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((lock), raw_rwlock_t)) \ + __ret = __##optype##op((raw_rwlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, rwlock_t)) \ + __ret = _##optype##op((rwlock_t *)(lock)); \ + else __ret = __bad_rwlock_type(); \ + \ + __ret; \ +}) + +#define PICK_RW_OP2(optype, op, lock, flags) \ +do { \ + if (TYPE_EQUAL((lock), raw_rwlock_t)) \ + __##optype##op((raw_rwlock_t *)(lock), flags); \ + else if (TYPE_EQUAL(lock, rwlock_t)) \ + _##optype##op((rwlock_t *)(lock), flags); \ + else __bad_rwlock_type(); \ +} while (0) + +#define PICK_RW_OP2_RET(optype, op, lock, flags) \ +({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL((lock), raw_rwlock_t)) \ + __ret = __##optype##op((raw_rwlock_t *)(lock), flags); \ + else if (TYPE_EQUAL(lock, rwlock_t)) \ + __ret = _##optype##op((rwlock_t *)(lock), flags); \ + else __bad_rwlock_type(); \ + \ + __ret; \ +}) + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define _raw_spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_spin_lock_init((lock), #lock, &__key); \ +} while (0) + +#else +#define __raw_spin_lock_init(lock) \ + do { *(lock) = RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) +# define _raw_spin_lock_init(lock) __raw_spin_lock_init(lock) +#endif + +#define PICK_OP_INIT(op, lock) \ +do { \ + if (TYPE_EQUAL((lock), raw_spinlock_t)) \ + _raw_spin##op((raw_spinlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, spinlock_t)) \ + _spin##op((spinlock_t *)(lock), #lock, __FILE__, __LINE__); \ + else __bad_spinlock_type(); \ +} while (0) + + +#define spin_lock_init(lock) PICK_OP_INIT(_lock_init, lock) + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __raw_rwlock_init(raw_rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define _raw_rwlock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +#define __raw_rwlock_init(lock) \ + do { *(lock) = RAW_RW_LOCK_UNLOCKED(lock); } while (0) +# define _raw_rwlock_init(lock) __raw_rwlock_init(lock) +#endif + +#define __PICK_RW_OP_INIT(optype, op, lock) \ +do { \ + if (TYPE_EQUAL((lock), raw_rwlock_t)) \ + _raw_##optype##op((raw_rwlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, rwlock_t)) \ + _##optype##op((rwlock_t *)(lock), #lock, __FILE__, __LINE__);\ + else __bad_spinlock_type(); \ +} while (0) + +#define rwlock_init(lock) __PICK_RW_OP_INIT(rwlock, _init, lock) + +#define __spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) + +#define spin_is_locked(lock) PICK_OP_RET(_is_locked, lock) + +#define __spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) +#define spin_unlock_wait(lock) PICK_OP(_unlock_wait, lock) /* * Define the various spin_lock and rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various * methods are defined as nops in the case they are not required. */ -#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) -#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) -#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) +// #define spin_trylock(lock) _spin_trylock(lock) +#define spin_trylock(lock) __cond_lock(lock, PICK_OP_RET(_trylock, lock)) + +//#define read_trylock(lock) _read_trylock(lock) +#define read_trylock(lock) __cond_lock(lock, PICK_RW_OP_RET(read, _trylock, lock)) + +//#define write_trylock(lock) _write_trylock(lock) +#define write_trylock(lock) __cond_lock(lock, PICK_RW_OP_RET(write, _trylock, lock)) + +#define write_trylock_irqsave(lock, flags) \ + __cond_lock(lock, PICK_RW_OP2_RET(write, _trylock_irqsave, lock, &flags)) + +#define __spin_can_lock(lock) __raw_spin_can_lock(&(lock)->raw_lock) +#define __read_can_lock(lock) __raw_read_can_lock(&(lock)->raw_lock) +#define __write_can_lock(lock) __raw_write_can_lock(&(lock)->raw_lock) + +#define spin_can_lock(lock) \ + __cond_lock(lock, PICK_OP_RET(_can_lock, lock)) -#define spin_lock(lock) _spin_lock(lock) +#define read_can_lock(lock) \ + __cond_lock(lock, PICK_RW_OP_RET(read, _can_lock, lock)) + +#define write_can_lock(lock) \ + __cond_lock(lock, PICK_RW_OP_RET(write, _can_lock, lock)) + +// #define spin_lock(lock) _spin_lock(lock) +#define spin_lock(lock) PICK_OP(_lock, lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define spin_lock_nested(lock, subclass) PICK_OP2(_lock_nested, lock, subclass) #else -# define spin_lock_nested(lock, subclass) _spin_lock(lock) +# define spin_lock_nested(lock, subclass) spin_lock(lock) #endif -#define write_lock(lock) _write_lock(lock) -#define read_lock(lock) _read_lock(lock) +//#define write_lock(lock) _write_lock(lock) +#define write_lock(lock) PICK_RW_OP(write, _lock, lock) -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +// #define read_lock(lock) _read_lock(lock) +#define read_lock(lock) PICK_RW_OP(read, _lock, lock) -#define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) -#define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) -#define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) +# define spin_lock_irqsave(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + flags = PICK_OP_RET(_lock_irqsave, lock); \ +} while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - flags = _spin_lock_irqsave_nested(lock, subclass) +# define spin_lock_irqsave_nested(lock, flags, subclass) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + flags = PICK_OP2_RET(_lock_irqsave_nested, lock, subclass); \ +} while (0) #else -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - flags = _spin_lock_irqsave(lock) +# define spin_lock_irqsave_nested(lock, flags, subclass) \ + spin_lock_irqsave(lock, flags) #endif -#else +# define read_lock_irqsave(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + flags = PICK_RW_OP_RET(read, _lock_irqsave, lock); \ +} while (0) -#define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags) -#define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags) -#define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags) -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - spin_lock_irqsave(lock, flags) +# define write_lock_irqsave(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + flags = PICK_RW_OP_RET(write, _lock_irqsave, lock); \ +} while (0) -#endif +// #define spin_lock_irq(lock) _spin_lock_irq(lock) +// #define spin_lock_bh(lock) _spin_lock_bh(lock) +#define spin_lock_irq(lock) PICK_OP(_lock_irq, lock) +#define spin_lock_bh(lock) PICK_OP(_lock_bh, lock) + +// #define read_lock_irq(lock) _read_lock_irq(lock) +// #define read_lock_bh(lock) _read_lock_bh(lock) +#define read_lock_irq(lock) PICK_RW_OP(read, _lock_irq, lock) +#define read_lock_bh(lock) PICK_RW_OP(read, _lock_bh, lock) + +// #define write_lock_irq(lock) _write_lock_irq(lock) +// #define write_lock_bh(lock) _write_lock_bh(lock) +#define write_lock_irq(lock) PICK_RW_OP(write, _lock_irq, lock) +#define write_lock_bh(lock) PICK_RW_OP(write, _lock_bh, lock) + +// #define spin_unlock(lock) _spin_unlock(lock) +// #define write_unlock(lock) _write_unlock(lock) +// #define read_unlock(lock) _read_unlock(lock) +#define spin_unlock(lock) PICK_OP(_unlock, lock) +#define read_unlock(lock) PICK_RW_OP(read, _unlock, lock) +#define write_unlock(lock) PICK_RW_OP(write, _unlock, lock) + +// #define spin_unlock(lock) _spin_unlock_no_resched(lock) +#define spin_unlock_no_resched(lock) \ + PICK_OP(_unlock_no_resched, lock) + +//#define spin_unlock_irqrestore(lock, flags) +// _spin_unlock_irqrestore(lock, flags) +//#define spin_unlock_irq(lock) _spin_unlock_irq(lock) +//#define spin_unlock_bh(lock) _spin_unlock_bh(lock) +#define spin_unlock_irqrestore(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + PICK_OP2(_unlock_irqrestore, lock, flags); \ +} while (0) -#define spin_lock_irq(lock) _spin_lock_irq(lock) -#define spin_lock_bh(lock) _spin_lock_bh(lock) +#define spin_unlock_irq(lock) PICK_OP(_unlock_irq, lock) +#define spin_unlock_bh(lock) PICK_OP(_unlock_bh, lock) -#define read_lock_irq(lock) _read_lock_irq(lock) -#define read_lock_bh(lock) _read_lock_bh(lock) +// #define read_unlock_irqrestore(lock, flags) +// _read_unlock_irqrestore(lock, flags) +// #define read_unlock_irq(lock) _read_unlock_irq(lock) +// #define read_unlock_bh(lock) _read_unlock_bh(lock) +#define read_unlock_irqrestore(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + PICK_RW_OP2(read, _unlock_irqrestore, lock, flags); \ +} while (0) + +#define read_unlock_irq(lock) PICK_RW_OP(read, _unlock_irq, lock) +#define read_unlock_bh(lock) PICK_RW_OP(read, _unlock_bh, lock) + +// #define write_unlock_irqrestore(lock, flags) +// _write_unlock_irqrestore(lock, flags) +// #define write_unlock_irq(lock) _write_unlock_irq(lock) +// #define write_unlock_bh(lock) _write_unlock_bh(lock) +#define write_unlock_irqrestore(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + PICK_RW_OP2(write, _unlock_irqrestore, lock, flags); \ +} while (0) +#define write_unlock_irq(lock) PICK_RW_OP(write, _unlock_irq, lock) +#define write_unlock_bh(lock) PICK_RW_OP(write, _unlock_bh, lock) + +// #define spin_trylock_bh(lock) _spin_trylock_bh(lock) +#define spin_trylock_bh(lock) __cond_lock(lock, PICK_OP_RET(_trylock_bh, lock)) + +// #define spin_trylock_irq(lock) + +#define spin_trylock_irq(lock) __cond_lock(lock, PICK_OP_RET(_trylock_irq, lock)) + +// #define spin_trylock_irqsave(lock, flags) + +#define spin_trylock_irqsave(lock, flags) \ + __cond_lock(lock, PICK_OP2_RET(_trylock_irqsave, lock, &flags)) + +/* "lock on reference count zero" */ +#ifndef ATOMIC_DEC_AND_LOCK +# include + extern int __atomic_dec_and_spin_lock(atomic_t *atomic, raw_spinlock_t *lock); +#endif + +#define atomic_dec_and_lock(atomic, lock) \ +__cond_lock(lock, ({ \ + unsigned long __ret; \ + \ + if (TYPE_EQUAL(lock, raw_spinlock_t)) \ + __ret = __atomic_dec_and_spin_lock(atomic, \ + (raw_spinlock_t *)(lock)); \ + else if (TYPE_EQUAL(lock, spinlock_t)) \ + __ret = _atomic_dec_and_spin_lock(atomic, \ + (spinlock_t *)(lock)); \ + else __ret = __bad_spinlock_type(); \ + \ + __ret; \ +})) -#define write_lock_irq(lock) _write_lock_irq(lock) -#define write_lock_bh(lock) _write_lock_bh(lock) /* - * We inline the unlock functions in the nondebug case: + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. */ -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ - !defined(CONFIG_SMP) -# define spin_unlock(lock) _spin_unlock(lock) -# define read_unlock(lock) _read_unlock(lock) -# define write_unlock(lock) _write_unlock(lock) -# define spin_unlock_irq(lock) _spin_unlock_irq(lock) -# define read_unlock_irq(lock) _read_unlock_irq(lock) -# define write_unlock_irq(lock) _write_unlock_irq(lock) -#else -# define spin_unlock(lock) \ - do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define read_unlock(lock) \ - do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define write_unlock(lock) \ - do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define spin_unlock_irq(lock) \ -do { \ - __raw_spin_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) -# define read_unlock_irq(lock) \ -do { \ - __raw_read_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) -# define write_unlock_irq(lock) \ -do { \ - __raw_write_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) + while (test_and_set_bit(bitnum, addr)) + while (test_bit(bitnum, addr)) + cpu_relax(); #endif + __acquire(bitlock); +} -#define spin_unlock_irqrestore(lock, flags) \ - _spin_unlock_irqrestore(lock, flags) -#define spin_unlock_bh(lock) _spin_unlock_bh(lock) - -#define read_unlock_irqrestore(lock, flags) \ - _read_unlock_irqrestore(lock, flags) -#define read_unlock_bh(lock) _read_unlock_bh(lock) - -#define write_unlock_irqrestore(lock, flags) \ - _write_unlock_irqrestore(lock, flags) -#define write_unlock_bh(lock) _write_unlock_bh(lock) - -#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) - -#define spin_trylock_irq(lock) \ -({ \ - local_irq_disable(); \ - spin_trylock(lock) ? \ - 1 : ({ local_irq_enable(); 0; }); \ -}) +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) + if (test_and_set_bit(bitnum, addr)) + return 0; +#endif + __acquire(bitlock); + return 1; +} -#define spin_trylock_irqsave(lock, flags) \ -({ \ - local_irq_save(flags); \ - spin_trylock(lock) ? \ - 1 : ({ local_irq_restore(flags); 0; }); \ -}) +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) + BUG_ON(!test_bit(bitnum, addr)); + smp_mb__before_clear_bit(); + clear_bit(bitnum, addr); +#endif + __release(bitlock); +} -#define write_trylock_irqsave(lock, flags) \ -({ \ - local_irq_save(flags); \ - write_trylock(lock) ? \ - 1 : ({ local_irq_restore(flags); 0; }); \ -}) +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) + return test_bit(bitnum, addr); +#else + return 1; +#endif +} -#define write_trylock_irqsave(lock, flags) \ -({ \ - local_irq_save(flags); \ - write_trylock(lock) ? \ - 1 : ({ local_irq_restore(flags); 0; }); \ -}) +/** + * __raw_spin_can_lock - would __raw_spin_trylock() succeed? + * @lock: the spinlock in question. + */ +#define __raw_spin_can_lock(lock) (!__raw_spin_is_locked(lock)) /* * Locks two spinlocks l1 and l2. * l1_first indicates if spinlock l1 should be taken first. */ -static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2, - bool l1_first) +static inline void +raw_double_spin_lock(raw_spinlock_t *l1, raw_spinlock_t *l2, bool l1_first) + __acquires(l1) + __acquires(l2) +{ + if (l1_first) { + spin_lock(l1); + spin_lock(l2); + } else { + spin_lock(l2); + spin_lock(l1); + } +} + +static inline void +double_spin_lock(spinlock_t *l1, spinlock_t *l2, bool l1_first) __acquires(l1) __acquires(l2) { @@ -314,13 +745,15 @@ static inline void double_spin_lock(spin } } + /* * Unlocks two spinlocks l1 and l2. * l1_taken_first indicates if spinlock l1 was taken first and therefore * should be released after spinlock l2. */ -static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2, - bool l1_taken_first) +static inline void +raw_double_spin_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2, + bool l1_taken_first) __releases(l1) __releases(l2) { @@ -333,24 +766,19 @@ static inline void double_spin_unlock(sp } } -/* - * Pull the atomic_t declaration: - * (asm-mips/atomic.h needs above definitions) - */ -#include -/** - * atomic_dec_and_lock - lock on reaching reference count zero - * @atomic: the atomic counter - * @lock: the spinlock in question - */ -extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); -#define atomic_dec_and_lock(atomic, lock) \ - __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) - -/** - * spin_can_lock - would spin_trylock() succeed? - * @lock: the spinlock in question. - */ -#define spin_can_lock(lock) (!spin_is_locked(lock)) +static inline void +double_spin_unlock(spinlock_t *l1, spinlock_t *l2, bool l1_taken_first) + __releases(l1) + __releases(l2) +{ + if (l1_taken_first) { + spin_unlock(l2); + spin_unlock(l1); + } else { + spin_unlock(l1); + spin_unlock(l2); + } +} #endif /* __LINUX_SPINLOCK_H */ + Index: linux-rt-rebase.q/include/linux/spinlock_api_smp.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/spinlock_api_smp.h +++ linux-rt-rebase.q/include/linux/spinlock_api_smp.h @@ -19,43 +19,58 @@ int in_lock_functions(unsigned long addr #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) -void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) - __acquires(lock); -void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) - __acquires(lock); -unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) - __acquires(lock); -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) - __acquires(lock); -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) - __acquires(lock); -int __lockfunc _spin_trylock(spinlock_t *lock); -int __lockfunc _read_trylock(rwlock_t *lock); -int __lockfunc _write_trylock(rwlock_t *lock); -int __lockfunc _spin_trylock_bh(spinlock_t *lock); -void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) - __releases(lock); -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) - __releases(lock); -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) - __releases(lock); +#define ACQUIRE_SPIN __acquires(lock) +#define ACQUIRE_RW __acquires(lock) +#define RELEASE_SPIN __releases(lock) +#define RELEASE_RW __releases(lock) + +void __lockfunc __spin_lock(raw_spinlock_t *lock) ACQUIRE_SPIN; +void __lockfunc __spin_lock_nested(raw_spinlock_t *lock, int subclass) + ACQUIRE_SPIN; +void __lockfunc __read_lock(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __write_lock(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __spin_lock_bh(raw_spinlock_t *lock) ACQUIRE_SPIN; +void __lockfunc __read_lock_bh(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __write_lock_bh(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __spin_lock_irq(raw_spinlock_t *lock) ACQUIRE_SPIN; +void __lockfunc __read_lock_irq(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __write_lock_irq(raw_rwlock_t *lock) ACQUIRE_RW; +unsigned long __lockfunc __spin_lock_irqsave(raw_spinlock_t *lock) + ACQUIRE_SPIN; +unsigned long __lockfunc +__spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) ACQUIRE_SPIN; +unsigned long __lockfunc __read_lock_irqsave(raw_rwlock_t *lock) + ACQUIRE_RW; +unsigned long __lockfunc __write_lock_irqsave(raw_rwlock_t *lock) + ACQUIRE_RW; +int __lockfunc __spin_trylock(raw_spinlock_t *lock); +int __lockfunc +__spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags); +int __lockfunc __read_trylock(raw_rwlock_t *lock); +int __lockfunc __write_trylock(raw_rwlock_t *lock); +int __lockfunc +__write_trylock_irqsave(raw_rwlock_t *lock, unsigned long *flags); +int __lockfunc __spin_trylock_bh(raw_spinlock_t *lock); +int __lockfunc __spin_trylock_irq(raw_spinlock_t *lock); +void __lockfunc __spin_unlock(raw_spinlock_t *lock) RELEASE_SPIN; +void __lockfunc __spin_unlock_no_resched(raw_spinlock_t *lock) + RELEASE_SPIN; +void __lockfunc __read_unlock(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __write_unlock(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __spin_unlock_bh(raw_spinlock_t *lock) RELEASE_SPIN; +void __lockfunc __read_unlock_bh(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __write_unlock_bh(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __spin_unlock_irq(raw_spinlock_t *lock) RELEASE_SPIN; +void __lockfunc __read_unlock_irq(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __write_unlock_irq(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc +__spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) + RELEASE_SPIN; +void __lockfunc +__read_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags) + RELEASE_RW; +void +__lockfunc __write_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags) + RELEASE_RW; #endif /* __LINUX_SPINLOCK_API_SMP_H */ Index: linux-rt-rebase.q/include/linux/spinlock_api_up.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/spinlock_api_up.h +++ linux-rt-rebase.q/include/linux/spinlock_api_up.h @@ -33,12 +33,22 @@ #define __LOCK_IRQ(lock) \ do { local_irq_disable(); __LOCK(lock); } while (0) -#define __LOCK_IRQSAVE(lock, flags) \ - do { local_irq_save(flags); __LOCK(lock); } while (0) +#define __LOCK_IRQSAVE(lock) \ + ({ unsigned long __flags; local_irq_save(__flags); __LOCK(lock); __flags; }) + +#define __TRYLOCK_IRQSAVE(lock, flags) \ + ({ local_irq_save(*(flags)); __LOCK(lock); 1; }) + +#define __spin_trylock_irqsave(lock, flags) __TRYLOCK_IRQSAVE(lock, flags) + +#define __write_trylock_irqsave(lock, flags) __TRYLOCK_IRQSAVE(lock, flags) #define __UNLOCK(lock) \ do { preempt_enable(); __release(lock); (void)(lock); } while (0) +#define __UNLOCK_NO_RESCHED(lock) \ + do { __preempt_enable_no_resched(); __release(lock); (void)(lock); } while (0) + #define __UNLOCK_BH(lock) \ do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) @@ -48,34 +58,36 @@ #define __UNLOCK_IRQRESTORE(lock, flags) \ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) -#define _spin_lock(lock) __LOCK(lock) -#define _spin_lock_nested(lock, subclass) __LOCK(lock) -#define _read_lock(lock) __LOCK(lock) -#define _write_lock(lock) __LOCK(lock) -#define _spin_lock_bh(lock) __LOCK_BH(lock) -#define _read_lock_bh(lock) __LOCK_BH(lock) -#define _write_lock_bh(lock) __LOCK_BH(lock) -#define _spin_lock_irq(lock) __LOCK_IRQ(lock) -#define _read_lock_irq(lock) __LOCK_IRQ(lock) -#define _write_lock_irq(lock) __LOCK_IRQ(lock) -#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) -#define _read_trylock(lock) ({ __LOCK(lock); 1; }) -#define _write_trylock(lock) ({ __LOCK(lock); 1; }) -#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) -#define _spin_unlock(lock) __UNLOCK(lock) -#define _read_unlock(lock) __UNLOCK(lock) -#define _write_unlock(lock) __UNLOCK(lock) -#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) -#define _write_unlock_bh(lock) __UNLOCK_BH(lock) -#define _read_unlock_bh(lock) __UNLOCK_BH(lock) -#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) -#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) -#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define __spin_lock(lock) __LOCK(lock) +#define __spin_lock_nested(lock, subclass) __LOCK(lock) +#define __read_lock(lock) __LOCK(lock) +#define __write_lock(lock) __LOCK(lock) +#define __spin_lock_bh(lock) __LOCK_BH(lock) +#define __read_lock_bh(lock) __LOCK_BH(lock) +#define __write_lock_bh(lock) __LOCK_BH(lock) +#define __spin_lock_irq(lock) __LOCK_IRQ(lock) +#define __read_lock_irq(lock) __LOCK_IRQ(lock) +#define __write_lock_irq(lock) __LOCK_IRQ(lock) +#define __spin_lock_irqsave(lock) __LOCK_IRQSAVE(lock) +#define __read_lock_irqsave(lock) __LOCK_IRQSAVE(lock) +#define __write_lock_irqsave(lock) __LOCK_IRQSAVE(lock) +#define __spin_trylock(lock) ({ __LOCK(lock); 1; }) +#define __read_trylock(lock) ({ __LOCK(lock); 1; }) +#define __write_trylock(lock) ({ __LOCK(lock); 1; }) +#define __spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) +#define __spin_trylock_irq(lock) ({ __LOCK_IRQ(lock); 1; }) +#define __spin_unlock(lock) __UNLOCK(lock) +#define __spin_unlock_no_resched(lock) __UNLOCK_NO_RESCHED(lock) +#define __read_unlock(lock) __UNLOCK(lock) +#define __write_unlock(lock) __UNLOCK(lock) +#define __spin_unlock_bh(lock) __UNLOCK_BH(lock) +#define __write_unlock_bh(lock) __UNLOCK_BH(lock) +#define __read_unlock_bh(lock) __UNLOCK_BH(lock) +#define __spin_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define __read_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define __write_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define __spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define __read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define __write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) #endif /* __LINUX_SPINLOCK_API_UP_H */ Index: linux-rt-rebase.q/include/linux/spinlock_types.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/spinlock_types.h +++ linux-rt-rebase.q/include/linux/spinlock_types.h @@ -15,10 +15,27 @@ # include #endif +/* + * Must define these before including other files, inline functions need them + */ +#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME + +#define LOCK_SECTION_START(extra) \ + ".subsection 1\n\t" \ + extra \ + ".ifndef " LOCK_SECTION_NAME "\n\t" \ + LOCK_SECTION_NAME ":\n\t" \ + ".endif\n" + +#define LOCK_SECTION_END \ + ".previous\n\t" + +#define __lockfunc fastcall __attribute__((section(".spinlock.text"))) + #include typedef struct { - raw_spinlock_t raw_lock; + __raw_spinlock_t raw_lock; #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) unsigned int break_lock; #endif @@ -29,12 +46,12 @@ typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif -} spinlock_t; +} raw_spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead typedef struct { - raw_rwlock_t raw_lock; + __raw_rwlock_t raw_lock; #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) unsigned int break_lock; #endif @@ -45,7 +62,7 @@ typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif -} rwlock_t; +} raw_rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed @@ -64,24 +81,24 @@ typedef struct { #endif #ifdef CONFIG_DEBUG_SPINLOCK -# define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ +# define _RAW_SPIN_LOCK_UNLOCKED(lockname) \ + { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ SPIN_DEP_MAP_INIT(lockname) } -#define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ +#define _RAW_RW_LOCK_UNLOCKED(lockname) \ + { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ RW_DEP_MAP_INIT(lockname) } #else -# define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ +# define _RAW_SPIN_LOCK_UNLOCKED(lockname) \ + { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ SPIN_DEP_MAP_INIT(lockname) } -#define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ +# define _RAW_RW_LOCK_UNLOCKED(lockname) \ + { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ RW_DEP_MAP_INIT(lockname) } #endif @@ -91,10 +108,22 @@ typedef struct { * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. */ -#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) -#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) -#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) +# define RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) _RAW_SPIN_LOCK_UNLOCKED(lockname) + +# define RAW_RW_LOCK_UNLOCKED(lockname) \ + (raw_rwlock_t) _RAW_RW_LOCK_UNLOCKED(lockname) + +#define DEFINE_RAW_SPINLOCK(name) \ + raw_spinlock_t name __cacheline_aligned_in_smp = \ + RAW_SPIN_LOCK_UNLOCKED(name) + +#define __DEFINE_RAW_SPINLOCK(name) \ + raw_spinlock_t name = RAW_SPIN_LOCK_UNLOCKED(name) + +#define DEFINE_RAW_RWLOCK(name) \ + raw_rwlock_t name __cacheline_aligned_in_smp = \ + RAW_RW_LOCK_UNLOCKED(name) #endif /* __LINUX_SPINLOCK_TYPES_H */ Index: linux-rt-rebase.q/include/linux/spinlock_types_up.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/spinlock_types_up.h +++ linux-rt-rebase.q/include/linux/spinlock_types_up.h @@ -16,13 +16,13 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } #else -typedef struct { } raw_spinlock_t; +typedef struct { } __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { } @@ -30,7 +30,7 @@ typedef struct { } raw_spinlock_t; typedef struct { /* no debug version on UP */ -} raw_rwlock_t; +} __raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { } Index: linux-rt-rebase.q/include/linux/spinlock_up.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/spinlock_up.h +++ linux-rt-rebase.q/include/linux/spinlock_up.h @@ -20,19 +20,19 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define __raw_spin_is_locked(x) ((x)->slock == 0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(__raw_spinlock_t *lock) { lock->slock = 0; } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +__raw_spin_lock_flags(__raw_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(__raw_spinlock_t *lock) { char oldval = lock->slock; @@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw return oldval > 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(__raw_spinlock_t *lock) { lock->slock = 1; } Index: linux-rt-rebase.q/kernel/Makefile =================================================================== --- linux-rt-rebase.q.orig/kernel/Makefile +++ linux-rt-rebase.q/kernel/Makefile @@ -7,13 +7,16 @@ obj-y = sched.o fork.o exec_domain.o sysctl.o capability.o ptrace.o timer.o user.o user_namespace.o \ signal.o sys.o kmod.o workqueue.o pid.o \ extable.o params.o posix-timers.o \ - kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ + kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \ hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o \ utsname.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ +ifneq ($(CONFIG_PREEMPT_RT),y) +obj-y += mutex.o obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o +endif obj-$(CONFIG_LOCKDEP) += lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o @@ -25,6 +28,7 @@ endif obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o +obj-$(CONFIG_PREEMPT_RT) += rt.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += cpu.o spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o Index: linux-rt-rebase.q/kernel/fork.c =================================================================== --- linux-rt-rebase.q.orig/kernel/fork.c +++ linux-rt-rebase.q/kernel/fork.c @@ -945,6 +945,9 @@ static inline void rt_mutex_init_task(st #ifdef CONFIG_RT_MUTEXES plist_head_init(&p->pi_waiters, &p->pi_lock); p->pi_blocked_on = NULL; +# ifdef CONFIG_DEBUG_RT_MUTEXES + p->last_kernel_lock = NULL; +# endif #endif } @@ -1108,7 +1111,6 @@ static struct task_struct *copy_process( #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif - p->tgid = p->pid; if (clone_flags & CLONE_THREAD) p->tgid = current->tgid; @@ -1137,6 +1139,9 @@ static struct task_struct *copy_process( retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); if (retval) goto bad_fork_cleanup_namespaces; +#ifdef CONFIG_DEBUG_PREEMPT + p->lock_count = 0; +#endif p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* Index: linux-rt-rebase.q/kernel/futex.c =================================================================== --- linux-rt-rebase.q.orig/kernel/futex.c +++ linux-rt-rebase.q/kernel/futex.c @@ -2096,7 +2096,11 @@ static int __init init(void) } for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { +#ifdef CONFIG_PREEMPT_RT + plist_head_init(&futex_queues[i].chain, NULL); +#else plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); +#endif spin_lock_init(&futex_queues[i].lock); } return 0; Index: linux-rt-rebase.q/kernel/hrtimer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/hrtimer.c +++ linux-rt-rebase.q/kernel/hrtimer.c @@ -1449,7 +1449,7 @@ static void migrate_hrtimers(int cpu) tick_cancel_sched_timer(cpu); local_irq_disable(); - double_spin_lock(&new_base->lock, &old_base->lock, + raw_double_spin_lock(&new_base->lock, &old_base->lock, smp_processor_id() < cpu); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { @@ -1457,7 +1457,7 @@ static void migrate_hrtimers(int cpu) &new_base->clock_base[i]); } - double_spin_unlock(&new_base->lock, &old_base->lock, + raw_double_spin_unlock(&new_base->lock, &old_base->lock, smp_processor_id() < cpu); local_irq_enable(); put_cpu_var(hrtimer_bases); Index: linux-rt-rebase.q/kernel/lockdep.c =================================================================== --- linux-rt-rebase.q.orig/kernel/lockdep.c +++ linux-rt-rebase.q/kernel/lockdep.c @@ -66,7 +66,7 @@ module_param(lock_stat, int, 0644); * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ -static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static __raw_spinlock_t lockdep_lock = (__raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { Index: linux-rt-rebase.q/kernel/rt.c =================================================================== --- /dev/null +++ linux-rt-rebase.q/kernel/rt.c @@ -0,0 +1,571 @@ +/* + * kernel/rt.c + * + * Real-Time Preemption Support + * + * started by Ingo Molnar: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * + * historic credit for proving that Linux spinlocks can be implemented via + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow + * and others) who prototyped it on 2.4 and did lots of comparative + * research and analysis; TimeSys, for proving that you can implement a + * fully preemptible kernel via the use of IRQ threading and mutexes; + * Bill Huey for persuasively arguing on lkml that the mutex model is the + * right one; and to MontaVista, who ported pmutexes to 2.6. + * + * This code is a from-scratch implementation and is not based on pmutexes, + * but the idea of converting spinlocks to mutexes is used here too. + * + * lock debugging, locking tree, deadlock detection: + * + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey + * Released under the General Public License (GPL). + * + * Includes portions of the generic R/W semaphore implementation from: + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from idea by Andrea Arcangeli + * - Derived also from comments by Linus + * + * Pending ownership of locks and ownership stealing: + * + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt + * + * (also by Steven Rostedt) + * - Converted single pi_lock to individual task locks. + * + * By Esben Nielsen: + * Doing priority inheritance with help of the scheduler. + * + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * - major rework based on Esben Nielsens initial patch + * - replaced thread_info references by task_struct refs + * - removed task->pending_owner dependency + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks + * in the scheduler return path as discussed with Steven Rostedt + * + * Copyright (C) 2006, Kihon Technologies Inc. + * Steven Rostedt + * - debugged and patched Thomas Gleixner's rework. + * - added back the cmpxchg to the rework. + * - turned atomic require back on for SMP. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtmutex_common.h" + +#ifdef CONFIG_PREEMPT_RT +/* + * Unlock these on crash: + */ +void zap_rt_locks(void) +{ + //trace_lock_init(); +} +#endif + +/* + * struct mutex functions + */ +void _mutex_init(struct mutex *lock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif + __rt_mutex_init(&lock->lock, name); +} +EXPORT_SYMBOL(_mutex_init); + +void __lockfunc _mutex_lock(struct mutex *lock) +{ + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock); + +int __lockfunc _mutex_lock_interruptible(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) +{ + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock_nested); + +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible_nested); +#endif + +int __lockfunc _mutex_trylock(struct mutex *lock) +{ + int ret = rt_mutex_trylock(&lock->lock); + + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(_mutex_trylock); + +void __lockfunc _mutex_unlock(struct mutex *lock) +{ + mutex_release(&lock->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_unlock); + +/* + * rwlock_t functions + */ +int __lockfunc rt_write_trylock(rwlock_t *rwlock) +{ + int ret = rt_mutex_trylock(&rwlock->lock); + + if (ret) + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(rt_write_trylock); + +int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) +{ + *flags = 0; + return rt_write_trylock(rwlock); +} + +int __lockfunc rt_read_trylock(rwlock_t *rwlock) +{ + struct rt_mutex *lock = &rwlock->lock; + unsigned long flags; + int ret; + + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&lock->wait_lock, flags); + if (rt_mutex_real_owner(lock) == current) { + spin_unlock_irqrestore(&lock->wait_lock, flags); + rwlock->read_depth++; + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); + return 1; + } + spin_unlock_irqrestore(&lock->wait_lock, flags); + + ret = rt_mutex_trylock(lock); + if (ret) + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(rt_read_trylock); + +void __lockfunc rt_write_lock(rwlock_t *rwlock) +{ + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + __rt_spin_lock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_write_lock); + +void __lockfunc rt_read_lock(rwlock_t *rwlock) +{ + unsigned long flags; + struct rt_mutex *lock = &rwlock->lock; + + rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); + /* + * Read locks within the write lock succeed. + */ + spin_lock_irqsave(&lock->wait_lock, flags); + if (rt_mutex_real_owner(lock) == current) { + spin_unlock_irqrestore(&lock->wait_lock, flags); + rwlock->read_depth++; + return; + } + spin_unlock_irqrestore(&lock->wait_lock, flags); + __rt_spin_lock(lock); +} + +EXPORT_SYMBOL(rt_read_lock); + +void __lockfunc rt_write_unlock(rwlock_t *rwlock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + __rt_spin_unlock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_write_unlock); + +void __lockfunc rt_read_unlock(rwlock_t *rwlock) +{ + struct rt_mutex *lock = &rwlock->lock; + unsigned long flags; + + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + // TRACE_WARN_ON(lock->save_state != 1); + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&lock->wait_lock, flags); + if (rt_mutex_real_owner(lock) == current && rwlock->read_depth) { + spin_unlock_irqrestore(&lock->wait_lock, flags); + rwlock->read_depth--; + return; + } + spin_unlock_irqrestore(&lock->wait_lock, flags); + __rt_spin_unlock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_read_unlock); + +unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); + + return 0; +} +EXPORT_SYMBOL(rt_write_lock_irqsave); + +unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); + + return 0; +} +EXPORT_SYMBOL(rt_read_lock_irqsave); + +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); + lockdep_init_map(&rwlock->dep_map, name, key, 0); +#endif + __rt_mutex_init(&rwlock->lock, name); + rwlock->read_depth = 0; +} +EXPORT_SYMBOL(__rt_rwlock_init); + +/* + * rw_semaphores + */ + +void fastcall rt_up_write(struct rw_semaphore *rwsem) +{ + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_up_write); + +void fastcall rt_up_read(struct rw_semaphore *rwsem) +{ + unsigned long flags; + + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem->read_depth--; + return; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rt_mutex_unlock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_up_read); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void fastcall rt_up_read_non_owner(struct rw_semaphore *rwsem) +{ + unsigned long flags; + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem->read_depth--; + return; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rt_mutex_unlock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_up_read_non_owner); +#endif + +/* + * downgrade a write lock into a read lock + * - just wake up any readers at the front of the queue + */ +void fastcall rt_downgrade_write(struct rw_semaphore *rwsem) +{ + BUG(); +} +EXPORT_SYMBOL(rt_downgrade_write); + +int fastcall rt_down_write_trylock(struct rw_semaphore *rwsem) +{ + int ret = rt_mutex_trylock(&rwsem->lock); + + if (ret) + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(rt_down_write_trylock); + +void fastcall rt_down_write(struct rw_semaphore *rwsem) +{ + rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_down_write); + +void fastcall rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) +{ + rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_down_write_nested); + +int fastcall rt_down_read_trylock(struct rw_semaphore *rwsem) +{ + unsigned long flags; + int ret; + + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + if (rt_mutex_real_owner(&rwsem->lock) == current) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem_acquire_read(&rwsem->dep_map, 0, 1, _RET_IP_); + rwsem->read_depth++; + return 1; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + + ret = rt_mutex_trylock(&rwsem->lock); + if (ret) + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(rt_down_read_trylock); + +static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) +{ + unsigned long flags; + + rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); + + /* + * Read locks within the write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + + if (rt_mutex_real_owner(&rwsem->lock) == current) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem->read_depth++; + return; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rt_mutex_lock(&rwsem->lock); +} + +void fastcall rt_down_read(struct rw_semaphore *rwsem) +{ + __rt_down_read(rwsem, 0); +} +EXPORT_SYMBOL(rt_down_read); + +void fastcall rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) +{ + __rt_down_read(rwsem, subclass); +} +EXPORT_SYMBOL(rt_down_read_nested); + + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +/* + * Same as rt_down_read() but no lockdep calls: + */ +void fastcall rt_down_read_non_owner(struct rw_semaphore *rwsem) +{ + unsigned long flags; + /* + * Read locks within the write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + + if (rt_mutex_real_owner(&rwsem->lock) == current) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem->read_depth++; + return; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rt_mutex_lock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_down_read_non_owner); + +#endif + +void fastcall __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); + lockdep_init_map(&rwsem->dep_map, name, key, 0); +#endif + __rt_mutex_init(&rwsem->lock, name); + rwsem->read_depth = 0; +} +EXPORT_SYMBOL(__rt_rwsem_init); + +/* + * Semaphores + */ +/* + * Linux Semaphores implemented via RT-mutexes. + * + * In the down() variants we use the mutex as the semaphore blocking + * object: we always acquire it, decrease the counter and keep the lock + * locked if we did the 1->0 transition. The next down() will then block. + * + * In the up() path we atomically increase the counter and do the + * unlock if we were the one doing the 0->1 transition. + */ + +static inline void __down_complete(struct semaphore *sem) +{ + int count = atomic_dec_return(&sem->count); + + if (unlikely(count > 0)) + rt_mutex_unlock(&sem->lock); +} + +void fastcall rt_down(struct semaphore *sem) +{ + rt_mutex_lock(&sem->lock); + __down_complete(sem); +} +EXPORT_SYMBOL(rt_down); + +int fastcall rt_down_interruptible(struct semaphore *sem) +{ + int ret; + + ret = rt_mutex_lock_interruptible(&sem->lock, 0); + if (ret) + return ret; + __down_complete(sem); + return 0; +} +EXPORT_SYMBOL(rt_down_interruptible); + +/* + * try to down the semaphore, 0 on success and 1 on failure. (inverted) + */ +int fastcall rt_down_trylock(struct semaphore *sem) +{ + /* + * Here we are a tiny bit different from ordinary Linux semaphores, + * because we can get 'transient' locking-failures when say a + * process decreases the count from 9 to 8 and locks/releases the + * embedded mutex internally. It would be quite complex to remove + * these transient failures so lets try it the simple way first: + */ + if (rt_mutex_trylock(&sem->lock)) { + __down_complete(sem); + return 0; + } + return 1; +} +EXPORT_SYMBOL(rt_down_trylock); + +void fastcall rt_up(struct semaphore *sem) +{ + int count; + + /* + * Disable preemption to make sure a highprio trylock-er cannot + * preempt us here and get into an infinite loop: + */ + preempt_disable(); + count = atomic_inc_return(&sem->count); + /* + * If we did the 0 -> 1 transition then we are the ones to unlock it: + */ + if (likely(count == 1)) + rt_mutex_unlock(&sem->lock); + preempt_enable(); +} +EXPORT_SYMBOL(rt_up); + +void fastcall __sema_init(struct semaphore *sem, int val, + char *name, char *file, int line) +{ + atomic_set(&sem->count, val); + switch (val) { + case 0: + __rt_mutex_init(&sem->lock, name); + rt_mutex_lock(&sem->lock); + break; + default: + __rt_mutex_init(&sem->lock, name); + break; + } +} +EXPORT_SYMBOL(__sema_init); + +void fastcall __init_MUTEX(struct semaphore *sem, char *name, char *file, + int line) +{ + __sema_init(sem, 1, name, file, line); +} +EXPORT_SYMBOL(__init_MUTEX); + Index: linux-rt-rebase.q/kernel/rtmutex-debug.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rtmutex-debug.c +++ linux-rt-rebase.q/kernel/rtmutex-debug.c @@ -16,6 +16,7 @@ * * See rt.c in preempt-rt for proper credits and further information */ +#include #include #include #include @@ -29,66 +30,6 @@ #include "rtmutex_common.h" -# define TRACE_WARN_ON(x) WARN_ON(x) -# define TRACE_BUG_ON(x) BUG_ON(x) - -# define TRACE_OFF() \ -do { \ - if (rt_trace_on) { \ - rt_trace_on = 0; \ - console_verbose(); \ - if (spin_is_locked(¤t->pi_lock)) \ - spin_unlock(¤t->pi_lock); \ - } \ -} while (0) - -# define TRACE_OFF_NOLOCK() \ -do { \ - if (rt_trace_on) { \ - rt_trace_on = 0; \ - console_verbose(); \ - } \ -} while (0) - -# define TRACE_BUG_LOCKED() \ -do { \ - TRACE_OFF(); \ - BUG(); \ -} while (0) - -# define TRACE_WARN_ON_LOCKED(c) \ -do { \ - if (unlikely(c)) { \ - TRACE_OFF(); \ - WARN_ON(1); \ - } \ -} while (0) - -# define TRACE_BUG_ON_LOCKED(c) \ -do { \ - if (unlikely(c)) \ - TRACE_BUG_LOCKED(); \ -} while (0) - -#ifdef CONFIG_SMP -# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c) -#else -# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0) -#endif - -/* - * deadlock detection flag. We turn it off when we detect - * the first problem because we dont want to recurse back - * into the tracing code when doing error printk or - * executing a BUG(): - */ -int rt_trace_on = 1; - -void deadlock_trace_off(void) -{ - rt_trace_on = 0; -} - static void printk_task(struct task_struct *p) { if (p) @@ -116,8 +57,8 @@ static void printk_lock(struct rt_mutex void rt_mutex_debug_task_free(struct task_struct *task) { - WARN_ON(!plist_head_empty(&task->pi_waiters)); - WARN_ON(task->pi_blocked_on); + DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters)); + DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); } /* @@ -130,7 +71,7 @@ void debug_rt_mutex_deadlock(int detect, { struct task_struct *task; - if (!rt_trace_on || detect || !act_waiter) + if (!debug_locks || detect || !act_waiter) return; task = rt_mutex_owner(act_waiter->lock); @@ -144,14 +85,15 @@ void debug_rt_mutex_print_deadlock(struc { struct task_struct *task; - if (!waiter->deadlock_lock || !rt_trace_on) + if (!waiter->deadlock_lock || !debug_locks) return; task = find_task_by_pid(waiter->deadlock_task_pid); if (!task) return; - TRACE_OFF_NOLOCK(); + if (!debug_locks_off()) + return; printk("\n============================================\n"); printk( "[ BUG: circular locking deadlock detected! ]\n"); @@ -178,7 +120,6 @@ void debug_rt_mutex_print_deadlock(struc printk("[ turning off deadlock detection." "Please report this trace. ]\n\n"); - local_irq_disable(); } void debug_rt_mutex_lock(struct rt_mutex *lock) @@ -187,7 +128,8 @@ void debug_rt_mutex_lock(struct rt_mutex void debug_rt_mutex_unlock(struct rt_mutex *lock) { - TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); + if (debug_locks) + DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); } void @@ -197,7 +139,7 @@ debug_rt_mutex_proxy_lock(struct rt_mute void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) { - TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock)); + DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); } void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) @@ -209,9 +151,9 @@ void debug_rt_mutex_init_waiter(struct r void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) { - TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); - TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); - TRACE_WARN_ON(waiter->task); + DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry)); + DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); + DEBUG_LOCKS_WARN_ON(waiter->task); memset(waiter, 0x22, sizeof(*waiter)); } @@ -227,9 +169,36 @@ void debug_rt_mutex_init(struct rt_mutex void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) { +#ifdef CONFIG_DEBUG_PREEMPT + if (task->lock_count >= MAX_LOCK_STACK) { + if (!debug_locks_off()) + return; + printk("BUG: %s/%d: lock count overflow!\n", + task->comm, task->pid); + dump_stack(); + return; + } +#ifdef CONFIG_PREEMPT_RT + task->owned_lock[task->lock_count] = lock; +#endif + task->lock_count++; +#endif } void rt_mutex_deadlock_account_unlock(struct task_struct *task) { +#ifdef CONFIG_DEBUG_PREEMPT + if (!task->lock_count) { + if (!debug_locks_off()) + return; + printk("BUG: %s/%d: lock count underflow!\n", + task->comm, task->pid); + dump_stack(); + return; + } + task->lock_count--; +#ifdef CONFIG_PREEMPT_RT + task->owned_lock[task->lock_count] = NULL; +#endif +#endif } - Index: linux-rt-rebase.q/kernel/rtmutex.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rtmutex.c +++ linux-rt-rebase.q/kernel/rtmutex.c @@ -97,6 +97,22 @@ static inline void mark_rt_mutex_waiters } #endif +int pi_initialized; + +/* + * we initialize the wait_list runtime. (Could be done build-time and/or + * boot-time.) + */ +static inline void init_lists(struct rt_mutex *lock) +{ + if (unlikely(!lock->wait_list.prio_list.prev)) { + plist_head_init(&lock->wait_list, &lock->wait_lock); +#ifdef CONFIG_DEBUG_RT_MUTEXES + pi_initialized++; +#endif + } +} + /* * Calculate task priority from the waiter list priority * @@ -253,13 +269,13 @@ static int rt_mutex_adjust_prio_chain(st plist_add(&waiter->list_entry, &lock->wait_list); /* Release the task */ - spin_unlock_irqrestore(&task->pi_lock, flags); + spin_unlock(&task->pi_lock); put_task_struct(task); /* Grab the next task */ task = rt_mutex_owner(lock); get_task_struct(task); - spin_lock_irqsave(&task->pi_lock, flags); + spin_lock(&task->pi_lock); if (waiter == rt_mutex_top_waiter(lock)) { /* Boost the owner */ @@ -277,10 +293,10 @@ static int rt_mutex_adjust_prio_chain(st __rt_mutex_adjust_prio(task); } - spin_unlock_irqrestore(&task->pi_lock, flags); + spin_unlock(&task->pi_lock); top_waiter = rt_mutex_top_waiter(lock); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); if (!detect_deadlock && waiter != top_waiter) goto out_put_task; @@ -304,7 +320,6 @@ static inline int try_to_steal_lock(stru { struct task_struct *pendowner = rt_mutex_owner(lock); struct rt_mutex_waiter *next; - unsigned long flags; if (!rt_mutex_owner_pending(lock)) return 0; @@ -312,9 +327,9 @@ static inline int try_to_steal_lock(stru if (pendowner == current) return 1; - spin_lock_irqsave(&pendowner->pi_lock, flags); + spin_lock(&pendowner->pi_lock); if (current->prio >= pendowner->prio) { - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + spin_unlock(&pendowner->pi_lock); return 0; } @@ -324,7 +339,7 @@ static inline int try_to_steal_lock(stru * priority. */ if (likely(!rt_mutex_has_waiters(lock))) { - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + spin_unlock(&pendowner->pi_lock); return 1; } @@ -332,7 +347,7 @@ static inline int try_to_steal_lock(stru next = rt_mutex_top_waiter(lock); plist_del(&next->pi_list_entry, &pendowner->pi_waiters); __rt_mutex_adjust_prio(pendowner); - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + spin_unlock(&pendowner->pi_lock); /* * We are going to steal the lock and a waiter was @@ -349,10 +364,10 @@ static inline int try_to_steal_lock(stru * might be current: */ if (likely(next->task != current)) { - spin_lock_irqsave(¤t->pi_lock, flags); + spin_lock(¤t->pi_lock); plist_add(&next->pi_list_entry, ¤t->pi_waiters); __rt_mutex_adjust_prio(current); - spin_unlock_irqrestore(¤t->pi_lock, flags); + spin_unlock(¤t->pi_lock); } return 1; } @@ -411,14 +426,13 @@ static int try_to_take_rt_mutex(struct r */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, - int detect_deadlock) + int detect_deadlock, unsigned long flags) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; - unsigned long flags; int chain_walk = 0, res; - spin_lock_irqsave(¤t->pi_lock, flags); + spin_lock(¤t->pi_lock); __rt_mutex_adjust_prio(current); waiter->task = current; waiter->lock = lock; @@ -432,17 +446,17 @@ static int task_blocks_on_rt_mutex(struc current->pi_blocked_on = waiter; - spin_unlock_irqrestore(¤t->pi_lock, flags); + spin_unlock(¤t->pi_lock); if (waiter == rt_mutex_top_waiter(lock)) { - spin_lock_irqsave(&owner->pi_lock, flags); + spin_lock(&owner->pi_lock); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; - spin_unlock_irqrestore(&owner->pi_lock, flags); + spin_unlock(&owner->pi_lock); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; @@ -457,12 +471,12 @@ static int task_blocks_on_rt_mutex(struc */ get_task_struct(owner); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, current); - spin_lock(&lock->wait_lock); + spin_lock_irq(&lock->wait_lock); return res; } @@ -475,13 +489,12 @@ static int task_blocks_on_rt_mutex(struc * * Called with lock->wait_lock held. */ -static void wakeup_next_waiter(struct rt_mutex *lock) +static void wakeup_next_waiter(struct rt_mutex *lock, int savestate) { struct rt_mutex_waiter *waiter; struct task_struct *pendowner; - unsigned long flags; - spin_lock_irqsave(¤t->pi_lock, flags); + spin_lock(¤t->pi_lock); waiter = rt_mutex_top_waiter(lock); plist_del(&waiter->list_entry, &lock->wait_list); @@ -498,7 +511,7 @@ static void wakeup_next_waiter(struct rt rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); - spin_unlock_irqrestore(¤t->pi_lock, flags); + spin_unlock(¤t->pi_lock); /* * Clear the pi_blocked_on variable and enqueue a possible @@ -507,7 +520,7 @@ static void wakeup_next_waiter(struct rt * waiter with higher priority than pending-owner->normal_prio * is blocked on the unboosted (pending) owner. */ - spin_lock_irqsave(&pendowner->pi_lock, flags); + spin_lock(&pendowner->pi_lock); WARN_ON(!pendowner->pi_blocked_on); WARN_ON(pendowner->pi_blocked_on != waiter); @@ -521,9 +534,12 @@ static void wakeup_next_waiter(struct rt next = rt_mutex_top_waiter(lock); plist_add(&next->pi_list_entry, &pendowner->pi_waiters); } - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + spin_unlock(&pendowner->pi_lock); - wake_up_process(pendowner); + if (savestate) + wake_up_process_mutex(pendowner); + else + wake_up_process(pendowner); } /* @@ -532,22 +548,22 @@ static void wakeup_next_waiter(struct rt * Must be called with lock->wait_lock held */ static void remove_waiter(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter) + struct rt_mutex_waiter *waiter, + unsigned long flags) { int first = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); - unsigned long flags; int chain_walk = 0; - spin_lock_irqsave(¤t->pi_lock, flags); + spin_lock(¤t->pi_lock); plist_del(&waiter->list_entry, &lock->wait_list); waiter->task = NULL; current->pi_blocked_on = NULL; - spin_unlock_irqrestore(¤t->pi_lock, flags); + spin_unlock(¤t->pi_lock); if (first && owner != current) { - spin_lock_irqsave(&owner->pi_lock, flags); + spin_lock(&owner->pi_lock); plist_del(&waiter->pi_list_entry, &owner->pi_waiters); @@ -562,7 +578,7 @@ static void remove_waiter(struct rt_mute if (owner->pi_blocked_on) chain_walk = 1; - spin_unlock_irqrestore(&owner->pi_lock, flags); + spin_unlock(&owner->pi_lock); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); @@ -573,11 +589,11 @@ static void remove_waiter(struct rt_mute /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); - spin_lock(&lock->wait_lock); + spin_lock_irq(&lock->wait_lock); } /* @@ -598,14 +614,307 @@ void rt_mutex_adjust_pi(struct task_stru return; } - spin_unlock_irqrestore(&task->pi_lock, flags); - /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); + spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } /* + * preemptible spin_lock functions: + */ + +#ifdef CONFIG_PREEMPT_RT + +static inline void +rt_spin_lock_fastlock(struct rt_mutex *lock, + void fastcall (*slowfn)(struct rt_mutex *lock)) +{ + if (likely(rt_mutex_cmpxchg(lock, NULL, current))) + rt_mutex_deadlock_account_lock(lock, current); + else + slowfn(lock); +} + +static inline void +rt_spin_lock_fastunlock(struct rt_mutex *lock, + void fastcall (*slowfn)(struct rt_mutex *lock)) +{ + if (likely(rt_mutex_cmpxchg(lock, current, NULL))) + rt_mutex_deadlock_account_unlock(current); + else + slowfn(lock); +} + +/* + * Slow path lock function spin_lock style: this variant is very + * careful not to miss any non-lock wakeups. + * + * The wakeup side uses wake_up_process_mutex, which, combined with + * the xchg code of this function is a transparent sleep/wakeup + * mechanism nested within any existing sleep/wakeup mechanism. This + * enables the seemless use of arbitrary (blocking) spinlocks within + * sleep/wakeup event loops. + */ +static void fastcall noinline __sched +rt_spin_lock_slowlock(struct rt_mutex *lock) +{ + struct rt_mutex_waiter waiter; + unsigned long saved_state, state, flags; + + debug_rt_mutex_init_waiter(&waiter); + waiter.task = NULL; + + spin_lock_irqsave(&lock->wait_lock, flags); + init_lists(lock); + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock)) { + spin_unlock_irqrestore(&lock->wait_lock, flags); + return; + } + + BUG_ON(rt_mutex_owner(lock) == current); + + /* + * Here we save whatever state the task was in originally, + * we'll restore it at the end of the function and we'll take + * any intermediate wakeup into account as well, independently + * of the lock sleep/wakeup mechanism. When we get a real + * wakeup the task->state is TASK_RUNNING and we change + * saved_state accordingly. If we did not get a real wakeup + * then we return with the saved state. + */ + saved_state = xchg(¤t->state, TASK_UNINTERRUPTIBLE); + + for (;;) { + unsigned long saved_flags; + int saved_lock_depth = current->lock_depth; + + /* Try to acquire the lock */ + if (try_to_take_rt_mutex(lock)) + break; + /* + * waiter.task is NULL the first time we come here and + * when we have been woken up by the previous owner + * but the lock got stolen by an higher prio task. + */ + if (!waiter.task) { + task_blocks_on_rt_mutex(lock, &waiter, 0, flags); + /* Wakeup during boost ? */ + if (unlikely(!waiter.task)) + continue; + } + + /* + * Prevent schedule() to drop BKL, while waiting for + * the lock ! We restore lock_depth when we come back. + */ + saved_flags = current->flags & PF_NOSCHED; + current->lock_depth = -1; + current->flags &= ~PF_NOSCHED; + spin_unlock_irqrestore(&lock->wait_lock, flags); + + debug_rt_mutex_print_deadlock(&waiter); + + schedule_rt_mutex(lock); + + spin_lock_irqsave(&lock->wait_lock, flags); + current->flags |= saved_flags; + current->lock_depth = saved_lock_depth; + state = xchg(¤t->state, TASK_UNINTERRUPTIBLE); + if (unlikely(state == TASK_RUNNING)) + saved_state = TASK_RUNNING; + } + + state = xchg(¤t->state, saved_state); + if (unlikely(state == TASK_RUNNING)) + current->state = TASK_RUNNING; + + /* + * Extremely rare case, if we got woken up by a non-mutex wakeup, + * and we managed to steal the lock despite us not being the + * highest-prio waiter (due to SCHED_OTHER changing prio), then we + * can end up with a non-NULL waiter.task: + */ + if (unlikely(waiter.task)) + remove_waiter(lock, &waiter, flags); + /* + * try_to_take_rt_mutex() sets the waiter bit + * unconditionally. We might have to fix that up: + */ + fixup_rt_mutex_waiters(lock); + + spin_unlock_irqrestore(&lock->wait_lock, flags); + + debug_rt_mutex_free_waiter(&waiter); +} + +/* + * Slow path to release a rt_mutex spin_lock style + */ +static void fastcall noinline __sched +rt_spin_lock_slowunlock(struct rt_mutex *lock) +{ + unsigned long flags; + + spin_lock_irqsave(&lock->wait_lock, flags); + + debug_rt_mutex_unlock(lock); + + rt_mutex_deadlock_account_unlock(current); + + if (!rt_mutex_has_waiters(lock)) { + lock->owner = NULL; + spin_unlock_irqrestore(&lock->wait_lock, flags); + return; + } + + wakeup_next_waiter(lock, 1); + + spin_unlock_irqrestore(&lock->wait_lock, flags); + + /* Undo pi boosting.when necessary */ + rt_mutex_adjust_prio(current); +} + +void __lockfunc rt_spin_lock(spinlock_t *lock) +{ + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +} +EXPORT_SYMBOL(rt_spin_lock); + +void __lockfunc __rt_spin_lock(struct rt_mutex *lock) +{ + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); +} +EXPORT_SYMBOL(__rt_spin_lock); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) +{ + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); +} +EXPORT_SYMBOL(rt_spin_lock_nested); + +#endif + +void __lockfunc rt_spin_unlock(spinlock_t *lock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(rt_spin_unlock); + +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) +{ + rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(__rt_spin_unlock); + +/* + * Wait for the lock to get unlocked: instead of polling for an unlock + * (like raw spinlocks do), we lock and unlock, to force the kernel to + * schedule if there's contention: + */ +void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) +{ + spin_lock(lock); + spin_unlock(lock); +} +EXPORT_SYMBOL(rt_spin_unlock_wait); + +int __lockfunc rt_spin_trylock(spinlock_t *lock) +{ + int ret = rt_mutex_trylock(&lock->lock); + + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock); + +int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) +{ + int ret; + + *flags = 0; + ret = rt_mutex_trylock(&lock->lock); + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock_irqsave); + +int _atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + rt_spin_lock(lock); + if (atomic_dec_and_test(atomic)) + return 1; + rt_spin_unlock(lock); + return 0; +} +EXPORT_SYMBOL(_atomic_dec_and_spin_lock); + +void +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif + __rt_mutex_init(&lock->lock, name); +} +EXPORT_SYMBOL(__rt_spin_lock_init); + +#endif + +#ifdef CONFIG_PREEMPT_BKL + +static inline int rt_release_bkl(struct rt_mutex *lock, unsigned long flags) +{ + int saved_lock_depth = current->lock_depth; + + current->lock_depth = -1; + /* + * try_to_take_lock set the waiters, make sure it's + * still correct. + */ + fixup_rt_mutex_waiters(lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); + + up(&kernel_sem); + + spin_lock_irq(&lock->wait_lock); + + return saved_lock_depth; +} + +static inline void rt_reacquire_bkl(int saved_lock_depth) +{ + down(&kernel_sem); + current->lock_depth = saved_lock_depth; +} + +#else +# define rt_release_bkl(lock, flags) (-1) +# define rt_reacquire_bkl(depth) do { } while (0) +#endif + +/* * Slow path lock function: */ static int __sched @@ -613,20 +922,29 @@ rt_mutex_slowlock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock) { + int ret = 0, saved_lock_depth = -1; struct rt_mutex_waiter waiter; - int ret = 0; + unsigned long flags; debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; - spin_lock(&lock->wait_lock); + spin_lock_irqsave(&lock->wait_lock, flags); + init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); return 0; } + /* + * We drop the BKL here before we go into the wait loop to avoid a + * possible deadlock in the scheduler. + */ + if (unlikely(current->lock_depth >= 0)) + saved_lock_depth = rt_release_bkl(lock, flags); + set_current_state(state); /* Setup the timer, when timeout != NULL */ @@ -635,6 +953,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, HRTIMER_MODE_ABS); for (;;) { + unsigned long saved_flags; + /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock)) break; @@ -660,7 +980,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, */ if (!waiter.task) { ret = task_blocks_on_rt_mutex(lock, &waiter, - detect_deadlock); + detect_deadlock, flags); /* * If we got woken up by the owner then start loop * all over without going into schedule to try @@ -679,22 +999,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, if (unlikely(ret)) break; } + saved_flags = current->flags & PF_NOSCHED; + current->flags &= ~PF_NOSCHED; - spin_unlock(&lock->wait_lock); + spin_unlock_irq(&lock->wait_lock); debug_rt_mutex_print_deadlock(&waiter); if (waiter.task) schedule_rt_mutex(lock); - spin_lock(&lock->wait_lock); + spin_lock_irq(&lock->wait_lock); + + current->flags |= saved_flags; set_current_state(state); } set_current_state(TASK_RUNNING); if (unlikely(waiter.task)) - remove_waiter(lock, &waiter); + remove_waiter(lock, &waiter, flags); /* * try_to_take_rt_mutex() sets the waiter bit @@ -702,7 +1026,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, */ fixup_rt_mutex_waiters(lock); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); /* Remove pending timer: */ if (unlikely(timeout)) @@ -716,6 +1040,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, if (unlikely(ret)) rt_mutex_adjust_prio(current); + /* Must we reaquire the BKL? */ + if (unlikely(saved_lock_depth >= 0)) + rt_reacquire_bkl(saved_lock_depth); + debug_rt_mutex_free_waiter(&waiter); return ret; @@ -727,12 +1055,15 @@ rt_mutex_slowlock(struct rt_mutex *lock, static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) { + unsigned long flags; int ret = 0; - spin_lock(&lock->wait_lock); + spin_lock_irqsave(&lock->wait_lock, flags); if (likely(rt_mutex_owner(lock) != current)) { + init_lists(lock); + ret = try_to_take_rt_mutex(lock); /* * try_to_take_rt_mutex() sets the lock waiters @@ -741,7 +1072,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo fixup_rt_mutex_waiters(lock); } - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); return ret; } @@ -752,7 +1083,9 @@ rt_mutex_slowtrylock(struct rt_mutex *lo static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { - spin_lock(&lock->wait_lock); + unsigned long flags; + + spin_lock_irqsave(&lock->wait_lock, flags); debug_rt_mutex_unlock(lock); @@ -760,13 +1093,13 @@ rt_mutex_slowunlock(struct rt_mutex *loc if (!rt_mutex_has_waiters(lock)) { lock->owner = NULL; - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); return; } - wakeup_next_waiter(lock); + wakeup_next_waiter(lock, 0); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); /* Undo pi boosting if necessary: */ rt_mutex_adjust_prio(current); Index: linux-rt-rebase.q/kernel/rwsem.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rwsem.c +++ linux-rt-rebase.q/kernel/rwsem.c @@ -15,7 +15,7 @@ /* * lock for reading */ -void down_read(struct rw_semaphore *sem) +void compat_down_read(struct compat_rw_semaphore *sem) { might_sleep(); rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); @@ -23,12 +23,12 @@ void down_read(struct rw_semaphore *sem) LOCK_CONTENDED(sem, __down_read_trylock, __down_read); } -EXPORT_SYMBOL(down_read); +EXPORT_SYMBOL(compat_down_read); /* * trylock for reading -- returns 1 if successful, 0 if contention */ -int down_read_trylock(struct rw_semaphore *sem) +int compat_down_read_trylock(struct compat_rw_semaphore *sem) { int ret = __down_read_trylock(sem); @@ -37,12 +37,12 @@ int down_read_trylock(struct rw_semaphor return ret; } -EXPORT_SYMBOL(down_read_trylock); +EXPORT_SYMBOL(compat_down_read_trylock); /* * lock for writing */ -void down_write(struct rw_semaphore *sem) +void compat_down_write(struct compat_rw_semaphore *sem) { might_sleep(); rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); @@ -50,12 +50,12 @@ void down_write(struct rw_semaphore *sem LOCK_CONTENDED(sem, __down_write_trylock, __down_write); } -EXPORT_SYMBOL(down_write); +EXPORT_SYMBOL(compat_down_write); /* * trylock for writing -- returns 1 if successful, 0 if contention */ -int down_write_trylock(struct rw_semaphore *sem) +int compat_down_write_trylock(struct compat_rw_semaphore *sem) { int ret = __down_write_trylock(sem); @@ -64,36 +64,36 @@ int down_write_trylock(struct rw_semapho return ret; } -EXPORT_SYMBOL(down_write_trylock); +EXPORT_SYMBOL(compat_down_write_trylock); /* * release a read lock */ -void up_read(struct rw_semaphore *sem) +void compat_up_read(struct compat_rw_semaphore *sem) { rwsem_release(&sem->dep_map, 1, _RET_IP_); __up_read(sem); } -EXPORT_SYMBOL(up_read); +EXPORT_SYMBOL(compat_up_read); /* * release a write lock */ -void up_write(struct rw_semaphore *sem) +void compat_up_write(struct compat_rw_semaphore *sem) { rwsem_release(&sem->dep_map, 1, _RET_IP_); __up_write(sem); } -EXPORT_SYMBOL(up_write); +EXPORT_SYMBOL(compat_up_write); /* * downgrade write lock to read lock */ -void downgrade_write(struct rw_semaphore *sem) +void compat_downgrade_write(struct compat_rw_semaphore *sem) { /* * lockdep: a downgraded write will live on as a write @@ -102,11 +102,11 @@ void downgrade_write(struct rw_semaphore __downgrade_write(sem); } -EXPORT_SYMBOL(downgrade_write); +EXPORT_SYMBOL(compat_downgrade_write); #ifdef CONFIG_DEBUG_LOCK_ALLOC -void down_read_nested(struct rw_semaphore *sem, int subclass) +void compat_down_read_nested(struct compat_rw_semaphore *sem, int subclass) { might_sleep(); rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); @@ -114,18 +114,18 @@ void down_read_nested(struct rw_semaphor LOCK_CONTENDED(sem, __down_read_trylock, __down_read); } -EXPORT_SYMBOL(down_read_nested); +EXPORT_SYMBOL(compat_down_read_nested); -void down_read_non_owner(struct rw_semaphore *sem) +void compat_down_read_non_owner(struct compat_rw_semaphore *sem) { might_sleep(); __down_read(sem); } -EXPORT_SYMBOL(down_read_non_owner); +EXPORT_SYMBOL(compat_down_read_non_owner); -void down_write_nested(struct rw_semaphore *sem, int subclass) +void compat_down_write_nested(struct compat_rw_semaphore *sem, int subclass) { might_sleep(); rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); @@ -133,14 +133,14 @@ void down_write_nested(struct rw_semapho LOCK_CONTENDED(sem, __down_write_trylock, __down_write); } -EXPORT_SYMBOL(down_write_nested); +EXPORT_SYMBOL(compat_down_write_nested); -void up_read_non_owner(struct rw_semaphore *sem) +void compat_up_read_non_owner(struct compat_rw_semaphore *sem) { __up_read(sem); } -EXPORT_SYMBOL(up_read_non_owner); +EXPORT_SYMBOL(compat_up_read_non_owner); #endif Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -1404,7 +1404,8 @@ static inline int wake_idle(int cpu, str * * returns failure only if the task is already active. */ -static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) +static int +try_to_wake_up(struct task_struct *p, unsigned int state, int sync, int mutex) { int cpu, this_cpu, success = 0; unsigned long flags; @@ -1546,17 +1547,51 @@ out: int fastcall wake_up_process(struct task_struct *p) { - int ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | - TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); + int ret; + + ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | + TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | + TASK_UNINTERRUPTIBLE, 0, 0); mcount(); return ret; } EXPORT_SYMBOL(wake_up_process); -int fastcall wake_up_state(struct task_struct *p, unsigned int state) +int fastcall wake_up_process_sync(struct task_struct * p) +{ + int ret; + + ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | + TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | + TASK_UNINTERRUPTIBLE, 1, 0); + mcount(); + return ret; +} +EXPORT_SYMBOL(wake_up_process_sync); + +int fastcall wake_up_process_mutex(struct task_struct * p) +{ + int ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | + TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | + TASK_UNINTERRUPTIBLE, 0, 1); + mcount(); + return ret; +} +EXPORT_SYMBOL(wake_up_process_mutex); + +int fastcall wake_up_process_mutex_sync(struct task_struct * p) { - int ret = try_to_wake_up(p, state, 0); + int ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | + TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | + TASK_UNINTERRUPTIBLE, 1, 1); + mcount(); + return ret; +} +EXPORT_SYMBOL(wake_up_process_mutex_sync); +int fastcall wake_up_state(struct task_struct *p, unsigned int state) +{ + int ret = try_to_wake_up(p, state | TASK_RUNNING_MUTEX, 0, 0); mcount(); return ret; } @@ -3550,7 +3585,8 @@ need_resched: int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) { - return try_to_wake_up(curr->private, mode, sync); + return try_to_wake_up(curr->private, mode | TASK_RUNNING_MUTEX, + sync, 0); } EXPORT_SYMBOL(default_wake_function); @@ -3591,8 +3627,9 @@ void fastcall __wake_up(wait_queue_head_ unsigned long flags; spin_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr_exclusive, 0, key); + __wake_up_common(q, mode, nr_exclusive, 1, key); spin_unlock_irqrestore(&q->lock, flags); + preempt_check_resched_delayed(); } EXPORT_SYMBOL(__wake_up); @@ -3642,8 +3679,9 @@ void fastcall complete(struct completion spin_lock_irqsave(&x->wait.lock, flags); x->done++; __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, - 1, 0, NULL); + 1, 1, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); + preempt_check_resched_delayed(); } EXPORT_SYMBOL(complete); @@ -3654,11 +3692,18 @@ void fastcall complete_all(struct comple spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, - 0, 0, NULL); + 0, 1, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); + preempt_check_resched_delayed(); } EXPORT_SYMBOL(complete_all); +unsigned int fastcall completion_done(struct completion *x) +{ + return x->done; +} +EXPORT_SYMBOL(completion_done); + void fastcall __sched wait_for_completion(struct completion *x) { might_sleep(); @@ -4499,10 +4544,7 @@ asmlinkage long sys_sched_yield(void) * Since we are going to call schedule() anyway, there's * no need to preempt or enable interrupts: */ - __release(rq->lock); - spin_release(&rq->lock.dep_map, 1, _THIS_IP_); - _raw_spin_unlock(&rq->lock); - preempt_enable_no_resched(); + spin_unlock_no_resched(&rq->lock); schedule(); @@ -4545,7 +4587,7 @@ EXPORT_SYMBOL(cond_resched); * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */ -int cond_resched_lock(spinlock_t *lock) +int __cond_resched_raw_spinlock(raw_spinlock_t *lock) { int ret = 0; @@ -4556,24 +4598,23 @@ int cond_resched_lock(spinlock_t *lock) spin_lock(lock); } if (need_resched() && system_state == SYSTEM_RUNNING) { - spin_release(&lock->dep_map, 1, _THIS_IP_); - _raw_spin_unlock(lock); - preempt_enable_no_resched(); + spin_unlock_no_resched(lock); __cond_resched(); ret = 1; spin_lock(lock); } return ret; } -EXPORT_SYMBOL(cond_resched_lock); +EXPORT_SYMBOL(__cond_resched_raw_spinlock); /* * Voluntarily preempt a process context that has softirqs disabled: */ int __sched cond_resched_softirq(void) { +#ifndef CONFIG_PREEMPT_RT WARN_ON_ONCE(!in_softirq()); - +#endif if (need_resched() && system_state == SYSTEM_RUNNING) { local_bh_enable(); __cond_resched(); @@ -4757,7 +4798,7 @@ out_unlock: return retval; } -static const char stat_nam[] = "RSDTtZX"; +static const char stat_nam[] = "RMSDTtZX"; static void show_task(struct task_struct *p) { @@ -4765,19 +4806,23 @@ static void show_task(struct task_struct unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; - printk("%-13.13s %c", p->comm, - state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); + printk("%-13.13s %c [%p]", p->comm, + state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?', p); #if BITS_PER_LONG == 32 - if (state == TASK_RUNNING) + if (0 && (state == TASK_RUNNING)) printk(" running "); else printk(" %08lx ", thread_saved_pc(p)); #else - if (state == TASK_RUNNING) + if (0 && (state == TASK_RUNNING)) printk(" running task "); else printk(" %016lx ", thread_saved_pc(p)); #endif + if (task_curr(p)) + printk("[curr] "); + else if (p->se.on_rq) + printk("[on rq #%d] ", task_cpu(p)); #ifdef CONFIG_DEBUG_STACK_USAGE { unsigned long *n = end_of_stack(p); Index: linux-rt-rebase.q/kernel/spinlock.c =================================================================== --- linux-rt-rebase.q.orig/kernel/spinlock.c +++ linux-rt-rebase.q/kernel/spinlock.c @@ -21,7 +21,7 @@ #include #include -int __lockfunc _spin_trylock(spinlock_t *lock) +int __lockfunc __spin_trylock(raw_spinlock_t *lock) { preempt_disable(); if (_raw_spin_trylock(lock)) { @@ -32,9 +32,46 @@ int __lockfunc _spin_trylock(spinlock_t preempt_enable(); return 0; } -EXPORT_SYMBOL(_spin_trylock); +EXPORT_SYMBOL(__spin_trylock); -int __lockfunc _read_trylock(rwlock_t *lock) +int __lockfunc __spin_trylock_irq(raw_spinlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + + if (_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + + __preempt_enable_no_resched(); + local_irq_enable(); + preempt_check_resched(); + + return 0; +} +EXPORT_SYMBOL(__spin_trylock_irq); + +int __lockfunc __spin_trylock_irqsave(raw_spinlock_t *lock, + unsigned long *flags) +{ + local_irq_save(*flags); + preempt_disable(); + + if (_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + + __preempt_enable_no_resched(); + local_irq_restore(*flags); + preempt_check_resched(); + + return 0; +} +EXPORT_SYMBOL(__spin_trylock_irqsave); + +int __lockfunc __read_trylock(raw_rwlock_t *lock) { preempt_disable(); if (_raw_read_trylock(lock)) { @@ -45,9 +82,9 @@ int __lockfunc _read_trylock(rwlock_t *l preempt_enable(); return 0; } -EXPORT_SYMBOL(_read_trylock); +EXPORT_SYMBOL(__read_trylock); -int __lockfunc _write_trylock(rwlock_t *lock) +int __lockfunc __write_trylock(raw_rwlock_t *lock) { preempt_disable(); if (_raw_write_trylock(lock)) { @@ -58,7 +95,21 @@ int __lockfunc _write_trylock(rwlock_t * preempt_enable(); return 0; } -EXPORT_SYMBOL(_write_trylock); +EXPORT_SYMBOL(__write_trylock); + +int __lockfunc __write_trylock_irqsave(raw_rwlock_t *lock, unsigned long *flags) +{ + int ret; + + local_irq_save(*flags); + ret = __write_trylock(lock); + if (ret) + return ret; + + local_irq_restore(*flags); + return 0; +} +EXPORT_SYMBOL(__write_trylock_irqsave); /* * If lockdep is enabled then we use the non-preemption spin-ops @@ -66,17 +117,17 @@ EXPORT_SYMBOL(_write_trylock); * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ - defined(CONFIG_DEBUG_LOCK_ALLOC) + defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_PREEMPT_RT) -void __lockfunc _read_lock(rwlock_t *lock) +void __lockfunc __read_lock(raw_rwlock_t *lock) { preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); } -EXPORT_SYMBOL(_read_lock); +EXPORT_SYMBOL(__read_lock); -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +unsigned long __lockfunc __spin_lock_irqsave(raw_spinlock_t *lock) { unsigned long flags; @@ -95,27 +146,27 @@ unsigned long __lockfunc _spin_lock_irqs #endif return flags; } -EXPORT_SYMBOL(_spin_lock_irqsave); +EXPORT_SYMBOL(__spin_lock_irqsave); -void __lockfunc _spin_lock_irq(spinlock_t *lock) +void __lockfunc __spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock_irq); +EXPORT_SYMBOL(__spin_lock_irq); -void __lockfunc _spin_lock_bh(spinlock_t *lock) +void __lockfunc __spin_lock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock_bh); +EXPORT_SYMBOL(__spin_lock_bh); -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) +unsigned long __lockfunc __read_lock_irqsave(raw_rwlock_t *lock) { unsigned long flags; @@ -125,27 +176,27 @@ unsigned long __lockfunc _read_lock_irqs LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); return flags; } -EXPORT_SYMBOL(_read_lock_irqsave); +EXPORT_SYMBOL(__read_lock_irqsave); -void __lockfunc _read_lock_irq(rwlock_t *lock) +void __lockfunc __read_lock_irq(raw_rwlock_t *lock) { local_irq_disable(); preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); } -EXPORT_SYMBOL(_read_lock_irq); +EXPORT_SYMBOL(__read_lock_irq); -void __lockfunc _read_lock_bh(rwlock_t *lock) +void __lockfunc __read_lock_bh(raw_rwlock_t *lock) { local_bh_disable(); preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); } -EXPORT_SYMBOL(_read_lock_bh); +EXPORT_SYMBOL(__read_lock_bh); -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) +unsigned long __lockfunc __write_lock_irqsave(raw_rwlock_t *lock) { unsigned long flags; @@ -155,43 +206,43 @@ unsigned long __lockfunc _write_lock_irq LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); return flags; } -EXPORT_SYMBOL(_write_lock_irqsave); +EXPORT_SYMBOL(__write_lock_irqsave); -void __lockfunc _write_lock_irq(rwlock_t *lock) +void __lockfunc __write_lock_irq(raw_rwlock_t *lock) { local_irq_disable(); preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); } -EXPORT_SYMBOL(_write_lock_irq); +EXPORT_SYMBOL(__write_lock_irq); -void __lockfunc _write_lock_bh(rwlock_t *lock) +void __lockfunc __write_lock_bh(raw_rwlock_t *lock) { local_bh_disable(); preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); } -EXPORT_SYMBOL(_write_lock_bh); +EXPORT_SYMBOL(__write_lock_bh); -void __lockfunc _spin_lock(spinlock_t *lock) +void __lockfunc __spin_lock(raw_spinlock_t *lock) { preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock); +EXPORT_SYMBOL(__spin_lock); -void __lockfunc _write_lock(rwlock_t *lock) +void __lockfunc __write_lock(raw_rwlock_t *lock) { preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); } -EXPORT_SYMBOL(_write_lock); +EXPORT_SYMBOL(__write_lock); #else /* CONFIG_PREEMPT: */ @@ -204,7 +255,7 @@ EXPORT_SYMBOL(_write_lock); */ #define BUILD_LOCK_OPS(op, locktype) \ -void __lockfunc _##op##_lock(locktype##_t *lock) \ +void __lockfunc __##op##_lock(locktype##_t *lock) \ { \ for (;;) { \ preempt_disable(); \ @@ -214,15 +265,16 @@ void __lockfunc _##op##_lock(locktype##_ \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + while (!__raw_##op##_can_lock(&(lock)->raw_lock) && \ + (lock)->break_lock) \ + __raw_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ } \ \ -EXPORT_SYMBOL(_##op##_lock); \ +EXPORT_SYMBOL(__##op##_lock); \ \ -unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ +unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ { \ unsigned long flags; \ \ @@ -236,23 +288,24 @@ unsigned long __lockfunc _##op##_lock_ir \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + while (!__raw_##op##_can_lock(&(lock)->raw_lock) && \ + (lock)->break_lock) \ + __raw_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ return flags; \ } \ \ -EXPORT_SYMBOL(_##op##_lock_irqsave); \ +EXPORT_SYMBOL(__##op##_lock_irqsave); \ \ -void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ +void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ { \ - _##op##_lock_irqsave(lock); \ + __##op##_lock_irqsave(lock); \ } \ \ -EXPORT_SYMBOL(_##op##_lock_irq); \ +EXPORT_SYMBOL(__##op##_lock_irq); \ \ -void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ +void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ { \ unsigned long flags; \ \ @@ -261,39 +314,40 @@ void __lockfunc _##op##_lock_bh(locktype /* irq-disabling. We use the generic preemption-aware */ \ /* function: */ \ /**/ \ - flags = _##op##_lock_irqsave(lock); \ + flags = __##op##_lock_irqsave(lock); \ local_bh_disable(); \ local_irq_restore(flags); \ } \ \ -EXPORT_SYMBOL(_##op##_lock_bh) +EXPORT_SYMBOL(__##op##_lock_bh) /* * Build preemption-friendly versions of the following * lock-spinning functions: * - * _[spin|read|write]_lock() - * _[spin|read|write]_lock_irq() - * _[spin|read|write]_lock_irqsave() - * _[spin|read|write]_lock_bh() + * __[spin|read|write]_lock() + * __[spin|read|write]_lock_irq() + * __[spin|read|write]_lock_irqsave() + * __[spin|read|write]_lock_bh() */ -BUILD_LOCK_OPS(spin, spinlock); -BUILD_LOCK_OPS(read, rwlock); -BUILD_LOCK_OPS(write, rwlock); +BUILD_LOCK_OPS(spin, raw_spinlock); +BUILD_LOCK_OPS(read, raw_rwlock); +BUILD_LOCK_OPS(write, raw_rwlock); #endif /* CONFIG_PREEMPT */ #ifdef CONFIG_DEBUG_LOCK_ALLOC -void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) +void __lockfunc __spin_lock_nested(raw_spinlock_t *lock, int subclass) { preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } +EXPORT_SYMBOL(__spin_lock_nested); -EXPORT_SYMBOL(_spin_lock_nested); -unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) +unsigned long __lockfunc +__spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) { unsigned long flags; @@ -312,117 +366,130 @@ unsigned long __lockfunc _spin_lock_irqs #endif return flags; } - -EXPORT_SYMBOL(_spin_lock_irqsave_nested); +EXPORT_SYMBOL(__spin_lock_irqsave_nested); #endif -void __lockfunc _spin_unlock(spinlock_t *lock) +void __lockfunc __spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); preempt_enable(); } -EXPORT_SYMBOL(_spin_unlock); +EXPORT_SYMBOL(__spin_unlock); + +void __lockfunc __spin_unlock_no_resched(raw_spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + _raw_spin_unlock(lock); + __preempt_enable_no_resched(); +} +/* not exported */ -void __lockfunc _write_unlock(rwlock_t *lock) +void __lockfunc __write_unlock(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); preempt_enable(); } -EXPORT_SYMBOL(_write_unlock); +EXPORT_SYMBOL(__write_unlock); -void __lockfunc _read_unlock(rwlock_t *lock) +void __lockfunc __read_unlock(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); preempt_enable(); } -EXPORT_SYMBOL(_read_unlock); +EXPORT_SYMBOL(__read_unlock); -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +void __lockfunc __spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); + __preempt_enable_no_resched(); local_irq_restore(flags); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_spin_unlock_irqrestore); +EXPORT_SYMBOL(__spin_unlock_irqrestore); -void __lockfunc _spin_unlock_irq(spinlock_t *lock) +void __lockfunc __spin_unlock_irq(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); + __preempt_enable_no_resched(); local_irq_enable(); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_spin_unlock_irq); +EXPORT_SYMBOL(__spin_unlock_irq); -void __lockfunc _spin_unlock_bh(spinlock_t *lock) +void __lockfunc __spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -EXPORT_SYMBOL(_spin_unlock_bh); +EXPORT_SYMBOL(__spin_unlock_bh); -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +void __lockfunc __read_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); + __preempt_enable_no_resched(); local_irq_restore(flags); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_read_unlock_irqrestore); +EXPORT_SYMBOL(__read_unlock_irqrestore); -void __lockfunc _read_unlock_irq(rwlock_t *lock) +void __lockfunc __read_unlock_irq(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); + __preempt_enable_no_resched(); local_irq_enable(); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_read_unlock_irq); +EXPORT_SYMBOL(__read_unlock_irq); -void __lockfunc _read_unlock_bh(rwlock_t *lock) +void __lockfunc __read_unlock_bh(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -EXPORT_SYMBOL(_read_unlock_bh); +EXPORT_SYMBOL(__read_unlock_bh); -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +void __lockfunc __write_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); + __preempt_enable_no_resched(); local_irq_restore(flags); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_write_unlock_irqrestore); +EXPORT_SYMBOL(__write_unlock_irqrestore); -void __lockfunc _write_unlock_irq(rwlock_t *lock) +void __lockfunc __write_unlock_irq(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); + __preempt_enable_no_resched(); local_irq_enable(); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_write_unlock_irq); +EXPORT_SYMBOL(__write_unlock_irq); -void __lockfunc _write_unlock_bh(rwlock_t *lock) +void __lockfunc __write_unlock_bh(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -EXPORT_SYMBOL(_write_unlock_bh); +EXPORT_SYMBOL(__write_unlock_bh); -int __lockfunc _spin_trylock_bh(spinlock_t *lock) +int __lockfunc __spin_trylock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); @@ -431,18 +498,30 @@ int __lockfunc _spin_trylock_bh(spinlock return 1; } - preempt_enable_no_resched(); + __preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); + return 0; } -EXPORT_SYMBOL(_spin_trylock_bh); +EXPORT_SYMBOL(__spin_trylock_bh); -int in_lock_functions(unsigned long addr) +int notrace in_lock_functions(unsigned long addr) { /* Linker adds these: start and end of __lockfunc functions */ extern char __lock_text_start[], __lock_text_end[]; return addr >= (unsigned long)__lock_text_start - && addr < (unsigned long)__lock_text_end; + && addr < (unsigned long)__lock_text_end; } EXPORT_SYMBOL(in_lock_functions); + +void notrace __debug_atomic_dec_and_test(atomic_t *v) +{ + static int warn_once = 1; + + if (!atomic_read(v) && warn_once) { + warn_once = 0; + printk("BUG: atomic counter underflow!\n"); + WARN_ON(1); + } +} Index: linux-rt-rebase.q/lib/dec_and_lock.c =================================================================== --- linux-rt-rebase.q.orig/lib/dec_and_lock.c +++ linux-rt-rebase.q/lib/dec_and_lock.c @@ -17,7 +17,7 @@ * because the spin-lock and the decrement must be * "atomic". */ -int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) +int __atomic_dec_and_spin_lock(atomic_t *atomic, raw_spinlock_t *lock) { #ifdef CONFIG_SMP /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ @@ -32,4 +32,4 @@ int _atomic_dec_and_lock(atomic_t *atomi return 0; } -EXPORT_SYMBOL(_atomic_dec_and_lock); +EXPORT_SYMBOL(__atomic_dec_and_spin_lock); Index: linux-rt-rebase.q/lib/kernel_lock.c =================================================================== --- linux-rt-rebase.q.orig/lib/kernel_lock.c +++ linux-rt-rebase.q/lib/kernel_lock.c @@ -24,7 +24,7 @@ * * Don't use in new code. */ -static DECLARE_MUTEX(kernel_sem); +DECLARE_MUTEX(kernel_sem); /* * Re-acquire the kernel semaphore. @@ -44,7 +44,7 @@ int __lockfunc __reacquire_kernel_lock(v BUG_ON(saved_lock_depth < 0); task->lock_depth = -1; - preempt_enable_no_resched(); + __preempt_enable_no_resched(); down(&kernel_sem); Index: linux-rt-rebase.q/lib/locking-selftest.c =================================================================== --- linux-rt-rebase.q.orig/lib/locking-selftest.c +++ linux-rt-rebase.q/lib/locking-selftest.c @@ -940,6 +940,9 @@ static void dotest(void (*testcase_fn)(v { unsigned long saved_preempt_count = preempt_count(); int expected_failure = 0; +#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_DEBUG_RT_MUTEXES) + int saved_lock_count = current->lock_count; +#endif WARN_ON(irqs_disabled()); @@ -989,6 +992,9 @@ static void dotest(void (*testcase_fn)(v #endif reset_locks(); +#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_DEBUG_RT_MUTEXES) + current->lock_count = saved_lock_count; +#endif } static inline void print_testname(const char *testname) Index: linux-rt-rebase.q/lib/plist.c =================================================================== --- linux-rt-rebase.q.orig/lib/plist.c +++ linux-rt-rebase.q/lib/plist.c @@ -53,7 +53,9 @@ static void plist_check_list(struct list static void plist_check_head(struct plist_head *head) { +#ifndef CONFIG_PREEMPT_RT WARN_ON(!head->lock); +#endif if (head->lock) WARN_ON_SMP(!spin_is_locked(head->lock)); plist_check_list(&head->prio_list); Index: linux-rt-rebase.q/lib/rwsem-spinlock.c =================================================================== --- linux-rt-rebase.q.orig/lib/rwsem-spinlock.c +++ linux-rt-rebase.q/lib/rwsem-spinlock.c @@ -20,7 +20,7 @@ struct rwsem_waiter { /* * initialise the semaphore */ -void __init_rwsem(struct rw_semaphore *sem, const char *name, +void __compat_init_rwsem(struct compat_rw_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -44,8 +44,8 @@ void __init_rwsem(struct rw_semaphore *s * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ -static inline struct rw_semaphore * -__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) +static inline struct compat_rw_semaphore * +__rwsem_do_wake(struct compat_rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; @@ -103,8 +103,8 @@ __rwsem_do_wake(struct rw_semaphore *sem /* * wake a single writer */ -static inline struct rw_semaphore * -__rwsem_wake_one_writer(struct rw_semaphore *sem) +static inline struct compat_rw_semaphore * +__rwsem_wake_one_writer(struct compat_rw_semaphore *sem) { struct rwsem_waiter *waiter; struct task_struct *tsk; @@ -125,7 +125,7 @@ __rwsem_wake_one_writer(struct rw_semaph /* * get a read lock on the semaphore */ -void fastcall __sched __down_read(struct rw_semaphore *sem) +void fastcall __sched __down_read(struct compat_rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -168,7 +168,7 @@ void fastcall __sched __down_read(struct /* * trylock for reading -- returns 1 if successful, 0 if contention */ -int fastcall __down_read_trylock(struct rw_semaphore *sem) +int fastcall __down_read_trylock(struct compat_rw_semaphore *sem) { unsigned long flags; int ret = 0; @@ -191,7 +191,8 @@ int fastcall __down_read_trylock(struct * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ -void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass) +void fastcall __sched +__down_write_nested(struct compat_rw_semaphore *sem, int subclass) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -231,7 +232,7 @@ void fastcall __sched __down_write_neste ; } -void fastcall __sched __down_write(struct rw_semaphore *sem) +void fastcall __sched __down_write(struct compat_rw_semaphore *sem) { __down_write_nested(sem, 0); } @@ -239,7 +240,7 @@ void fastcall __sched __down_write(struc /* * trylock for writing -- returns 1 if successful, 0 if contention */ -int fastcall __down_write_trylock(struct rw_semaphore *sem) +int fastcall __down_write_trylock(struct compat_rw_semaphore *sem) { unsigned long flags; int ret = 0; @@ -260,7 +261,7 @@ int fastcall __down_write_trylock(struct /* * release a read lock on the semaphore */ -void fastcall __up_read(struct rw_semaphore *sem) +void fastcall __up_read(struct compat_rw_semaphore *sem) { unsigned long flags; @@ -275,7 +276,7 @@ void fastcall __up_read(struct rw_semaph /* * release a write lock on the semaphore */ -void fastcall __up_write(struct rw_semaphore *sem) +void fastcall __up_write(struct compat_rw_semaphore *sem) { unsigned long flags; @@ -292,7 +293,7 @@ void fastcall __up_write(struct rw_semap * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ -void fastcall __downgrade_write(struct rw_semaphore *sem) +void fastcall __downgrade_write(struct compat_rw_semaphore *sem) { unsigned long flags; @@ -305,7 +306,7 @@ void fastcall __downgrade_write(struct r spin_unlock_irqrestore(&sem->wait_lock, flags); } -EXPORT_SYMBOL(__init_rwsem); +EXPORT_SYMBOL(__compat_init_rwsem); EXPORT_SYMBOL(__down_read); EXPORT_SYMBOL(__down_read_trylock); EXPORT_SYMBOL(__down_write_nested); Index: linux-rt-rebase.q/lib/rwsem.c =================================================================== --- linux-rt-rebase.q.orig/lib/rwsem.c +++ linux-rt-rebase.q/lib/rwsem.c @@ -11,8 +11,8 @@ /* * Initialize an rwsem: */ -void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key) +void __compat_init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -26,7 +26,7 @@ void __init_rwsem(struct rw_semaphore *s INIT_LIST_HEAD(&sem->wait_list); } -EXPORT_SYMBOL(__init_rwsem); +EXPORT_SYMBOL(__compat_init_rwsem); struct rwsem_waiter { struct list_head list; Index: linux-rt-rebase.q/lib/semaphore-sleepers.c =================================================================== --- linux-rt-rebase.q.orig/lib/semaphore-sleepers.c +++ linux-rt-rebase.q/lib/semaphore-sleepers.c @@ -15,6 +15,7 @@ #include #include #include +#include #include /* @@ -48,12 +49,12 @@ * we cannot lose wakeup events. */ -fastcall void __up(struct semaphore *sem) +fastcall void __compat_up(struct compat_semaphore *sem) { wake_up(&sem->wait); } -fastcall void __sched __down(struct semaphore * sem) +fastcall void __sched __compat_down(struct compat_semaphore * sem) { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); @@ -90,7 +91,7 @@ fastcall void __sched __down(struct sema tsk->state = TASK_RUNNING; } -fastcall int __sched __down_interruptible(struct semaphore * sem) +fastcall int __sched __compat_down_interruptible(struct compat_semaphore * sem) { int retval = 0; struct task_struct *tsk = current; @@ -153,7 +154,7 @@ fastcall int __sched __down_interruptibl * single "cmpxchg" without failure cases, * but then it wouldn't work on a 386. */ -fastcall int __down_trylock(struct semaphore * sem) +fastcall int __compat_down_trylock(struct compat_semaphore * sem) { int sleepers; unsigned long flags; @@ -174,3 +175,10 @@ fastcall int __down_trylock(struct semap spin_unlock_irqrestore(&sem->wait.lock, flags); return 1; } + +int fastcall compat_sem_is_locked(struct compat_semaphore *sem) +{ + return (int) atomic_read(&sem->count) < 0; +} + +EXPORT_SYMBOL(compat_sem_is_locked); Index: linux-rt-rebase.q/lib/spinlock_debug.c =================================================================== --- linux-rt-rebase.q.orig/lib/spinlock_debug.c +++ linux-rt-rebase.q/lib/spinlock_debug.c @@ -13,8 +13,8 @@ #include #include -void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) +void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -23,16 +23,16 @@ void __spin_lock_init(spinlock_t *lock, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (__raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } -EXPORT_SYMBOL(__spin_lock_init); +EXPORT_SYMBOL(__raw_spin_lock_init); -void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key) +void __raw_rwlock_init(raw_rwlock_t *lock, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -41,15 +41,15 @@ void __rwlock_init(rwlock_t *lock, const debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; + lock->raw_lock = (__raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; lock->magic = RWLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } -EXPORT_SYMBOL(__rwlock_init); +EXPORT_SYMBOL(__raw_rwlock_init); -static void spin_bug(spinlock_t *lock, const char *msg) +static void spin_bug(raw_spinlock_t *lock, const char *msg) { struct task_struct *owner = NULL; @@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, c #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) static inline void -debug_spin_lock_before(spinlock_t *lock) +debug_spin_lock_before(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(lock->owner == current, lock, "recursion"); @@ -81,13 +81,13 @@ debug_spin_lock_before(spinlock_t *lock) lock, "cpu recursion"); } -static inline void debug_spin_lock_after(spinlock_t *lock) +static inline void debug_spin_lock_after(raw_spinlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } -static inline void debug_spin_unlock(spinlock_t *lock) +static inline void debug_spin_unlock(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); @@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spi lock->owner_cpu = -1; } -static void __spin_lock_debug(spinlock_t *lock) +static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t } } -void _raw_spin_lock(spinlock_t *lock) +void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) @@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock) debug_spin_lock_after(lock); } -int _raw_spin_trylock(spinlock_t *lock) +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) { int ret = __raw_spin_trylock(&lock->raw_lock); @@ -148,13 +148,13 @@ int _raw_spin_trylock(spinlock_t *lock) return ret; } -void _raw_spin_unlock(spinlock_t *lock) +void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); __raw_spin_unlock(&lock->raw_lock); } -static void rwlock_bug(rwlock_t *lock, const char *msg) +static void rwlock_bug(raw_rwlock_t *lock, const char *msg) { if (!debug_locks_off()) return; @@ -167,8 +167,8 @@ static void rwlock_bug(rwlock_t *lock, c #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) -#if 0 /* __write_lock_debug() can lock up - maybe this can too? */ -static void __read_lock_debug(rwlock_t *lock) +#if 1 /* __write_lock_debug() can lock up - maybe this can too? */ +static void __raw_read_lock_debug(raw_rwlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -193,13 +193,13 @@ static void __read_lock_debug(rwlock_t * } #endif -void _raw_read_lock(rwlock_t *lock) +void __lockfunc _raw_read_lock(raw_rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_lock(&lock->raw_lock); + __raw_read_lock_debug(lock); } -int _raw_read_trylock(rwlock_t *lock) +int __lockfunc _raw_read_trylock(raw_rwlock_t *lock) { int ret = __raw_read_trylock(&lock->raw_lock); @@ -212,13 +212,13 @@ int _raw_read_trylock(rwlock_t *lock) return ret; } -void _raw_read_unlock(rwlock_t *lock) +void __lockfunc _raw_read_unlock(raw_rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); __raw_read_unlock(&lock->raw_lock); } -static inline void debug_write_lock_before(rwlock_t *lock) +static inline void debug_write_lock_before(raw_rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); @@ -226,13 +226,13 @@ static inline void debug_write_lock_befo lock, "cpu recursion"); } -static inline void debug_write_lock_after(rwlock_t *lock) +static inline void debug_write_lock_after(raw_rwlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } -static inline void debug_write_unlock(rwlock_t *lock) +static inline void debug_write_unlock(raw_rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); @@ -242,8 +242,8 @@ static inline void debug_write_unlock(rw lock->owner_cpu = -1; } -#if 0 /* This can cause lockups */ -static void __write_lock_debug(rwlock_t *lock) +#if 1 /* This can cause lockups */ +static void __raw_write_lock_debug(raw_rwlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -268,14 +268,14 @@ static void __write_lock_debug(rwlock_t } #endif -void _raw_write_lock(rwlock_t *lock) +void __lockfunc _raw_write_lock(raw_rwlock_t *lock) { debug_write_lock_before(lock); - __raw_write_lock(&lock->raw_lock); + __raw_write_lock_debug(lock); debug_write_lock_after(lock); } -int _raw_write_trylock(rwlock_t *lock) +int __lockfunc _raw_write_trylock(raw_rwlock_t *lock) { int ret = __raw_write_trylock(&lock->raw_lock); @@ -290,7 +290,7 @@ int _raw_write_trylock(rwlock_t *lock) return ret; } -void _raw_write_unlock(rwlock_t *lock) +void __lockfunc _raw_write_unlock(raw_rwlock_t *lock) { debug_write_unlock(lock); __raw_write_unlock(&lock->raw_lock); patches/pause-on-oops-head-tail.patch0000664000077200007720000000747710655544576017135 0ustar mingomingoSubject: [patch] introduce pause_on_oops_head/tail boot options From: Ingo Molnar if a system crashes with hard to debug oopses which scroll off the screen then it's useful to stop the crash right after the register info or right after the callback printout. Signed-off-by: Ingo Molnar --- arch/i386/kernel/traps.c | 6 +++++ arch/x86_64/kernel/traps.c | 2 + include/linux/kernel.h | 4 +++ kernel/panic.c | 49 ++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 60 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/i386/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/traps.c +++ linux-rt-rebase.q/arch/i386/kernel/traps.c @@ -257,8 +257,14 @@ static void show_stack_log_lvl(struct ta printk("\n%s ", log_lvl); printk("%08lx ", *stack++); } + + pause_on_oops_head(); + printk("\n%sCall Trace:\n", log_lvl); show_trace_log_lvl(task, regs, esp, log_lvl); + + pause_on_oops_tail(); + debug_show_held_locks(task); } Index: linux-rt-rebase.q/arch/x86_64/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/traps.c +++ linux-rt-rebase.q/arch/x86_64/kernel/traps.c @@ -348,9 +348,11 @@ static struct stacktrace_ops print_trace void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack) { + pause_on_oops_head(); printk("\nCall Trace:\n"); dump_trace(tsk, regs, stack, &print_trace_ops, NULL); printk("\n"); + pause_on_oops_tail(); print_traces(tsk); } Index: linux-rt-rebase.q/include/linux/kernel.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/kernel.h +++ linux-rt-rebase.q/include/linux/kernel.h @@ -202,6 +202,10 @@ extern void wake_up_klogd(void); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; extern int panic_on_oops; + +extern void pause_on_oops_head(void); +extern void pause_on_oops_tail(void); + extern int panic_on_unrecovered_nmi; extern int tainted; extern const char *print_tainted(void); Index: linux-rt-rebase.q/kernel/panic.c =================================================================== --- linux-rt-rebase.q.orig/kernel/panic.c +++ linux-rt-rebase.q/kernel/panic.c @@ -26,7 +26,38 @@ static int pause_on_oops; static int pause_on_oops_flag; static DEFINE_SPINLOCK(pause_on_oops_lock); -int panic_timeout; +/* + * Debugging helper: freeze all console output after printing the + * first oops's head (or tail): + */ +static int pause_on_oops_head_flag = 0; +static int pause_on_oops_tail_flag = 0; + +static void pause_on_oops_loop(int flag) +{ + switch (flag) { + default: + break; + case 1: + for (;;) + local_irq_disable(); + case 2: + for (;;) + local_irq_enable(); + } +} + +void pause_on_oops_head(void) +{ + pause_on_oops_loop(pause_on_oops_head_flag); +} + +void pause_on_oops_tail(void) +{ + pause_on_oops_loop(pause_on_oops_tail_flag); +} + +int panic_timeout __read_mostly; ATOMIC_NOTIFIER_HEAD(panic_notifier_list); @@ -191,6 +222,22 @@ static int __init pause_on_oops_setup(ch } __setup("pause_on_oops=", pause_on_oops_setup); +static int __init pause_on_oops_head_setup(char *str) +{ + pause_on_oops_head_flag = simple_strtoul(str, NULL, 0); + printk(KERN_INFO "pause_on_oops_head: %d\n", pause_on_oops_head_flag); + return 1; +} +__setup("pause_on_oops_head=", pause_on_oops_head_setup); + +static int __init pause_on_oops_tail_setup(char *str) +{ + pause_on_oops_tail_flag = simple_strtoul(str, NULL, 0); + printk(KERN_INFO "pause_on_oops_tail: %d\n", pause_on_oops_tail_flag); + return 1; +} +__setup("pause_on_oops_tail=", pause_on_oops_tail_setup); + static void spin_msec(int msecs) { int i; patches/preempt-irqs-core.patch0000664000077200007720000006433110655544573016145 0ustar mingomingo--- include/linux/interrupt.h | 19 ++ include/linux/irq.h | 26 +++- include/linux/sched.h | 14 ++ init/main.c | 5 kernel/irq/autoprobe.c | 1 kernel/irq/chip.c | 33 ++++- kernel/irq/handle.c | 37 +++++ kernel/irq/internals.h | 4 kernel/irq/manage.c | 292 +++++++++++++++++++++++++++++++++++++++++++++- kernel/irq/proc.c | 129 ++++++++++++++------ kernel/irq/spurious.c | 11 + kernel/sched.c | 23 +++ 12 files changed, 539 insertions(+), 55 deletions(-) Index: linux-rt-rebase.q/include/linux/interrupt.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/interrupt.h +++ linux-rt-rebase.q/include/linux/interrupt.h @@ -50,10 +50,12 @@ #define IRQF_SAMPLE_RANDOM 0x00000040 #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 -#define IRQF_TIMER 0x00000200 +#define __IRQF_TIMER 0x00000200 #define IRQF_PERCPU 0x00000400 #define IRQF_NOBALANCING 0x00000800 #define IRQF_IRQPOLL 0x00001000 +#define IRQF_NODELAY 0x00002000 +#define IRQF_TIMER (__IRQF_TIMER | IRQF_NODELAY) /* * Migration helpers. Scheduled for removal in 9/2007 @@ -87,7 +89,7 @@ struct irqaction { void *dev_id; struct irqaction *next; int irq; - struct proc_dir_entry *dir; + struct proc_dir_entry *dir, *threaded; }; extern irqreturn_t no_action(int cpl, void *dev_id); @@ -209,6 +211,7 @@ static inline int disable_irq_wake(unsig #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) +// FIXME: PREEMPT_RT: set_bit()? #define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif @@ -284,12 +287,18 @@ struct softirq_action void *data; }; -#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) -#define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr) - asmlinkage void do_softirq(void); extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); extern void softirq_init(void); + +#ifdef CONFIG_PREEMPT_HARDIRQS +# define __raise_softirq_irqoff(nr) raise_softirq_irqoff(nr) +# define __do_raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) +#else +# define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) +# define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr) +#endif + extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); extern void FASTCALL(raise_softirq(unsigned int nr)); extern void wakeup_irqd(void); Index: linux-rt-rebase.q/include/linux/irq.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/irq.h +++ linux-rt-rebase.q/include/linux/irq.h @@ -19,10 +19,12 @@ #include #include #include +#include #include #include #include +#include struct irq_desc; typedef void fastcall (*irq_flow_handler_t)(unsigned int irq, @@ -61,6 +63,7 @@ typedef void fastcall (*irq_flow_handler #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ +#define IRQ_NODELAY 0x40000000 /* IRQ must run immediately */ #ifdef CONFIG_IRQ_PER_CPU # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) @@ -141,6 +144,9 @@ struct irq_chip { * @irq_count: stats field to detect stalled irqs * @irqs_unhandled: stats field for spurious unhandled interrupts * @last_unhandled: aging timer for unhandled count + * @thread: Thread pointer for threaded preemptible irq handling + * @wait_for_handler: Waitqueue to wait for a running preemptible handler + * @cycles: Timestamp for stats and debugging * @lock: locking for SMP * @affinity: IRQ affinity on SMP * @cpu: cpu index useful for balancing @@ -163,6 +169,9 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned int irqs_unhandled; unsigned long last_unhandled; /* Aging timer for unhandled count */ + struct task_struct *thread; + wait_queue_head_t wait_for_handler; + cycles_t timestamp; spinlock_t lock; #ifdef CONFIG_SMP cpumask_t affinity; @@ -387,7 +396,22 @@ extern int set_irq_msi(unsigned int irq, #define get_irq_data(irq) (irq_desc[irq].handler_data) #define get_irq_msi(irq) (irq_desc[irq].msi_desc) -#endif /* CONFIG_GENERIC_HARDIRQS */ +/* Early initialization of irqs */ +extern void early_init_hardirqs(void); +extern cycles_t irq_timestamp(unsigned int irq); + +#if defined(CONFIG_PREEMPT_HARDIRQS) +extern void init_hardirqs(void); +#else +static inline void init_hardirqs(void) { } +#endif + +#else /* end GENERIC HARDIRQS */ + +static inline void early_init_hardirqs(void) { } +static inline void init_hardirqs(void) { } + +#endif /* !CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -94,6 +94,11 @@ extern int softirq_preemption; #else # define softirq_preemption 0 #endif +#ifdef CONFIG_PREEMPT_HARDIRQS +extern int hardirq_preemption; +#else +# define hardirq_preemption 0 +#endif struct exec_domain; struct futex_pi_state; @@ -1452,6 +1457,7 @@ static inline void put_task_struct(struc #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_SOFTIRQ 0x04000000 /* softirq context */ +#define PF_HARDIRQ 0x08000000 /* hardirq context */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ @@ -1884,6 +1890,7 @@ extern int cond_resched(void); extern int cond_resched_lock(spinlock_t * lock); extern int cond_resched_softirq(void); extern int cond_resched_softirq_context(void); +extern int cond_resched_hardirq_context(void); /* * Does a critical section need to be broken due to another @@ -1909,6 +1916,13 @@ static inline int softirq_need_resched(v return 0; } +static inline int hardirq_need_resched(void) +{ + if (hardirq_preemption && (current->flags & PF_HARDIRQ)) + return need_resched(); + return 0; +} + /* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. Index: linux-rt-rebase.q/init/main.c =================================================================== --- linux-rt-rebase.q.orig/init/main.c +++ linux-rt-rebase.q/init/main.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -555,8 +556,10 @@ asmlinkage void __init start_kernel(void * fragile until we cpu_idle() for the first time. */ preempt_disable(); + build_all_zonelists(); page_alloc_init(); + early_init_hardirqs(); printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); parse_early_param(); parse_args("Booting kernel", static_command_line, __start___param, @@ -834,6 +837,8 @@ static int __init kernel_init(void * unu smp_prepare_cpus(max_cpus); + init_hardirqs(); + do_pre_smp_initcalls(); smp_init(); Index: linux-rt-rebase.q/kernel/irq/autoprobe.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/autoprobe.c +++ linux-rt-rebase.q/kernel/irq/autoprobe.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include Index: linux-rt-rebase.q/kernel/irq/chip.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/chip.c +++ linux-rt-rebase.q/kernel/irq/chip.c @@ -269,8 +269,10 @@ static inline void mask_ack_irq(struct i if (desc->chip->mask_ack) desc->chip->mask_ack(irq); else { - desc->chip->mask(irq); - desc->chip->ack(irq); + if (desc->chip->mask) + desc->chip->mask(irq); + if (desc->chip->ack) + desc->chip->ack(irq); } } @@ -310,6 +312,11 @@ handle_simple_irq(unsigned int irq, stru desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING); desc->status |= IRQ_INPROGRESS; + /* + * hardirq redirection to the irqd process context: + */ + if (redirect_hardirq(desc)) + goto out_unlock; spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); @@ -358,6 +365,11 @@ handle_level_irq(unsigned int irq, struc } desc->status |= IRQ_INPROGRESS; + /* + * hardirq redirection to the irqd process context: + */ + if (redirect_hardirq(desc)) + goto out_unlock; desc->status &= ~IRQ_PENDING; spin_unlock(&desc->lock); @@ -411,6 +423,16 @@ handle_fasteoi_irq(unsigned int irq, str } desc->status |= IRQ_INPROGRESS; + + /* + * In the threaded case we fall back to a mask+eoi sequence: + */ + if (redirect_hardirq(desc)) { + if (desc->chip->mask) + desc->chip->mask(irq); + goto out; + } + desc->status &= ~IRQ_PENDING; spin_unlock(&desc->lock); @@ -422,7 +444,6 @@ handle_fasteoi_irq(unsigned int irq, str desc->status &= ~IRQ_INPROGRESS; out: desc->chip->eoi(irq); - spin_unlock(&desc->lock); } @@ -471,6 +492,12 @@ handle_edge_irq(unsigned int irq, struct /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; + /* + * hardirq redirection to the irqd process context: + */ + if (redirect_hardirq(desc)) + goto out_unlock; + do { struct irqaction *action = desc->action; irqreturn_t action_ret; Index: linux-rt-rebase.q/kernel/irq/handle.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/handle.c +++ linux-rt-rebase.q/kernel/irq/handle.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -133,24 +134,54 @@ irqreturn_t handle_IRQ_event(unsigned in handle_dynamic_tick(action); - if (!(action->flags & IRQF_DISABLED)) - local_irq_enable_in_hardirq(); + /* + * Unconditionally enable interrupts for threaded + * IRQ handlers: + */ + if (!hardirq_count() || !(action->flags & IRQF_DISABLED)) + local_irq_enable(); do { + unsigned int preempt_count = preempt_count(); + ret = action->handler(irq, action->dev_id); + if (preempt_count() != preempt_count) { + print_symbol("BUG: unbalanced irq-handler preempt count in %s!\n", (unsigned long) action->handler); + printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count()); + dump_stack(); + preempt_count() = preempt_count; + } if (ret == IRQ_HANDLED) status |= action->flags; retval |= ret; action = action->next; } while (action); - if (status & IRQF_SAMPLE_RANDOM) + if (status & IRQF_SAMPLE_RANDOM) { + local_irq_enable(); add_interrupt_randomness(irq); + } local_irq_disable(); return retval; } +int redirect_hardirq(struct irq_desc *desc) +{ + /* + * Direct execution: + */ + if (!hardirq_preemption || (desc->status & IRQ_NODELAY) || + !desc->thread) + return 0; + + BUG_ON(!irqs_disabled()); + if (desc->thread && desc->thread->state != TASK_RUNNING) + wake_up_process(desc->thread); + + return 1; +} + #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ /** * __do_IRQ - original all in one highlevel IRQ handler Index: linux-rt-rebase.q/kernel/irq/internals.h =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/internals.h +++ linux-rt-rebase.q/kernel/irq/internals.h @@ -10,6 +10,10 @@ extern void irq_chip_set_defaults(struct /* Set default handler: */ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); +extern int redirect_hardirq(struct irq_desc *desc); + +void recalculate_desc_flags(struct irq_desc *desc); + #ifdef CONFIG_PROC_FS extern void register_irq_proc(unsigned int irq); extern void register_handler_proc(unsigned int irq, struct irqaction *action); Index: linux-rt-rebase.q/kernel/irq/manage.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/manage.c +++ linux-rt-rebase.q/kernel/irq/manage.c @@ -8,8 +8,10 @@ */ #include -#include #include +#include +#include +#include #include #include "internals.h" @@ -33,8 +35,12 @@ void synchronize_irq(unsigned int irq) if (irq >= NR_IRQS) return; - while (desc->status & IRQ_INPROGRESS) - cpu_relax(); + if (hardirq_preemption && !(desc->status & IRQ_NODELAY)) + wait_event(desc->wait_for_handler, + !(desc->status & IRQ_INPROGRESS)); + else + while (desc->status & IRQ_INPROGRESS) + cpu_relax(); } EXPORT_SYMBOL(synchronize_irq); @@ -218,6 +224,21 @@ int set_irq_wake(unsigned int irq, unsig EXPORT_SYMBOL(set_irq_wake); /* + * If any action has IRQF_NODELAY then turn IRQ_NODELAY on: + */ +void recalculate_desc_flags(struct irq_desc *desc) +{ + struct irqaction *action; + + desc->status &= ~IRQ_NODELAY; + for (action = desc->action ; action; action = action->next) + if (action->flags & IRQF_NODELAY) + desc->status |= IRQ_NODELAY; +} + +static int start_irq_thread(int irq, struct irq_desc *desc); + +/* * Internal function that tells the architecture code whether a * particular irq has been exclusively allocated or is available * for driver use. @@ -282,6 +303,9 @@ int setup_irq(unsigned int irq, struct i rand_initialize_irq(irq); } + if (!(new->flags & IRQF_NODELAY)) + if (start_irq_thread(irq, desc)) + return -ENOMEM; /* * The following block of code has to be executed atomically */ @@ -325,6 +349,11 @@ int setup_irq(unsigned int irq, struct i if (!shared) { irq_chip_set_defaults(desc->chip); + /* + * Propagate any possible IRQF_NODELAY flag into IRQ_NODELAY: + */ + recalculate_desc_flags(desc); + #if defined(CONFIG_IRQ_PER_CPU) if (new->flags & IRQF_PERCPU) desc->status |= IRQ_PER_CPU; @@ -368,7 +397,7 @@ int setup_irq(unsigned int irq, struct i new->irq = irq; register_irq_proc(irq); - new->dir = NULL; + new->dir = new->threaded = NULL; register_handler_proc(irq, new); return 0; @@ -440,6 +469,7 @@ void free_irq(unsigned int irq, void *de else desc->chip->disable(irq); } + recalculate_desc_flags(desc); spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); @@ -563,3 +593,257 @@ int request_irq(unsigned int irq, irq_ha return retval; } EXPORT_SYMBOL(request_irq); + +#ifdef CONFIG_PREEMPT_HARDIRQS + +int hardirq_preemption = 1; + +EXPORT_SYMBOL(hardirq_preemption); + +static int __init hardirq_preempt_setup (char *str) +{ + if (!strncmp(str, "off", 3)) + hardirq_preemption = 0; + else + get_option(&str, &hardirq_preemption); + if (!hardirq_preemption) + printk("turning off hardirq preemption!\n"); + + return 1; +} + +__setup("hardirq-preempt=", hardirq_preempt_setup); + + +/* + * threaded simple handler + */ +static void thread_simple_irq(irq_desc_t *desc) +{ + struct irqaction *action = desc->action; + unsigned int irq = desc - irq_desc; + irqreturn_t action_ret; + + if (action && !desc->depth) { + spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + cond_resched_hardirq_context(); + spin_lock_irq(&desc->lock); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + } + desc->status &= ~IRQ_INPROGRESS; +} + +/* + * threaded level type irq handler + */ +static void thread_level_irq(irq_desc_t *desc) +{ + unsigned int irq = desc - irq_desc; + + thread_simple_irq(desc); + if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) + desc->chip->unmask(irq); +} + +/* + * threaded fasteoi type irq handler + */ +static void thread_fasteoi_irq(irq_desc_t *desc) +{ + unsigned int irq = desc - irq_desc; + + thread_simple_irq(desc); + if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) + desc->chip->unmask(irq); +} + +/* + * threaded edge type IRQ handler + */ +static void thread_edge_irq(irq_desc_t *desc) +{ + unsigned int irq = desc - irq_desc; + + do { + struct irqaction *action = desc->action; + irqreturn_t action_ret; + + if (unlikely(!action)) { + desc->status &= ~IRQ_INPROGRESS; + desc->chip->mask(irq); + return; + } + + /* + * When another irq arrived while we were handling + * one, we could have masked the irq. + * Renable it, if it was not disabled in meantime. + */ + if (unlikely(((desc->status & (IRQ_PENDING | IRQ_MASKED)) == + (IRQ_PENDING | IRQ_MASKED)) && !desc->depth)) + desc->chip->unmask(irq); + + desc->status &= ~IRQ_PENDING; + spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + cond_resched_hardirq_context(); + spin_lock_irq(&desc->lock); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + } while ((desc->status & IRQ_PENDING) && !desc->depth); + + desc->status &= ~IRQ_INPROGRESS; +} + +/* + * threaded edge type IRQ handler + */ +static void thread_do_irq(irq_desc_t *desc) +{ + unsigned int irq = desc - irq_desc; + + do { + struct irqaction *action = desc->action; + irqreturn_t action_ret; + + if (unlikely(!action)) { + desc->status &= ~IRQ_INPROGRESS; + desc->chip->disable(irq); + return; + } + + desc->status &= ~IRQ_PENDING; + spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + cond_resched_hardirq_context(); + spin_lock_irq(&desc->lock); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + } while ((desc->status & IRQ_PENDING) && !desc->depth); + + desc->status &= ~IRQ_INPROGRESS; + desc->chip->end(irq); +} + +static void do_hardirq(struct irq_desc *desc) +{ + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + + if (!(desc->status & IRQ_INPROGRESS)) + goto out; + + if (desc->handle_irq == handle_simple_irq) + thread_simple_irq(desc); + else if (desc->handle_irq == handle_level_irq) + thread_level_irq(desc); + else if (desc->handle_irq == handle_fasteoi_irq) + thread_fasteoi_irq(desc); + else if (desc->handle_irq == handle_edge_irq) + thread_edge_irq(desc); + else + thread_do_irq(desc); + out: + spin_unlock_irqrestore(&desc->lock, flags); + + if (waitqueue_active(&desc->wait_for_handler)) + wake_up(&desc->wait_for_handler); +} + +extern asmlinkage void __do_softirq(void); + +static int do_irqd(void * __desc) +{ + struct sched_param param = { 0, }; + struct irq_desc *desc = __desc; + +#ifdef CONFIG_SMP + set_cpus_allowed(current, desc->affinity); +#endif + current->flags |= PF_NOFREEZE | PF_HARDIRQ; + + /* + * Set irq thread priority to SCHED_FIFO/50: + */ + param.sched_priority = MAX_USER_RT_PRIO/2; + + sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m); + + while (!kthread_should_stop()) { + local_irq_disable(); + set_current_state(TASK_INTERRUPTIBLE); + irq_enter(); + do_hardirq(desc); + irq_exit(); + local_irq_enable(); + cond_resched(); +#ifdef CONFIG_SMP + /* + * Did IRQ affinities change? + */ + if (!cpus_equal(current->cpus_allowed, desc->affinity)) + set_cpus_allowed(current, desc->affinity); +#endif + schedule(); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + +static int ok_to_create_irq_threads; + +static int start_irq_thread(int irq, struct irq_desc *desc) +{ + if (desc->thread || !ok_to_create_irq_threads) + return 0; + + desc->thread = kthread_create(do_irqd, desc, "IRQ-%d", irq); + if (!desc->thread) { + printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq); + return -ENOMEM; + } + + /* + * An interrupt may have come in before the thread pointer was + * stored in desc->thread; make sure the thread gets woken up in + * such a case: + */ + smp_mb(); + wake_up_process(desc->thread); + + return 0; +} + +void __init init_hardirqs(void) +{ + int i; + ok_to_create_irq_threads = 1; + + for (i = 0; i < NR_IRQS; i++) { + irq_desc_t *desc = irq_desc + i; + + if (desc->action && !(desc->status & IRQ_NODELAY)) + start_irq_thread(i, desc); + } +} + +#else + +static int start_irq_thread(int irq, struct irq_desc *desc) +{ + return 0; +} + +#endif + +void __init early_init_hardirqs(void) +{ + int i; + + for (i = 0; i < NR_IRQS; i++) + init_waitqueue_head(&irq_desc[i].wait_for_handler); +} Index: linux-rt-rebase.q/kernel/irq/proc.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/proc.c +++ linux-rt-rebase.q/kernel/irq/proc.c @@ -7,6 +7,8 @@ */ #include +#include +#include #include #include @@ -75,44 +77,6 @@ static int irq_affinity_write_proc(struc #endif -#define MAX_NAMELEN 128 - -static int name_unique(unsigned int irq, struct irqaction *new_action) -{ - struct irq_desc *desc = irq_desc + irq; - struct irqaction *action; - unsigned long flags; - int ret = 1; - - spin_lock_irqsave(&desc->lock, flags); - for (action = desc->action ; action; action = action->next) { - if ((action != new_action) && action->name && - !strcmp(new_action->name, action->name)) { - ret = 0; - break; - } - } - spin_unlock_irqrestore(&desc->lock, flags); - return ret; -} - -void register_handler_proc(unsigned int irq, struct irqaction *action) -{ - char name [MAX_NAMELEN]; - - if (!irq_desc[irq].dir || action->dir || !action->name || - !name_unique(irq, action)) - return; - - memset(name, 0, MAX_NAMELEN); - snprintf(name, MAX_NAMELEN, "%s", action->name); - - /* create /proc/irq/1234/handler/ */ - action->dir = proc_mkdir(name, irq_desc[irq].dir); -} - -#undef MAX_NAMELEN - #define MAX_NAMELEN 10 void register_irq_proc(unsigned int irq) @@ -150,10 +114,96 @@ void register_irq_proc(unsigned int irq) void unregister_handler_proc(unsigned int irq, struct irqaction *action) { + if (action->threaded) + remove_proc_entry(action->threaded->name, action->dir); if (action->dir) remove_proc_entry(action->dir->name, irq_desc[irq].dir); } +#ifndef CONFIG_PREEMPT_RT + +static int threaded_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return sprintf(page, "%c\n", + ((struct irqaction *)data)->flags & IRQF_NODELAY ? '0' : '1'); +} + +static int threaded_write_proc(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + int c; + struct irqaction *action = data; + irq_desc_t *desc = irq_desc + action->irq; + + if (get_user(c, buffer)) + return -EFAULT; + if (c != '0' && c != '1') + return -EINVAL; + + spin_lock_irq(&desc->lock); + + if (c == '0') + action->flags |= IRQF_NODELAY; + if (c == '1') + action->flags &= ~IRQF_NODELAY; + recalculate_desc_flags(desc); + + spin_unlock_irq(&desc->lock); + + return 1; +} + +#endif + +#define MAX_NAMELEN 128 + +static int name_unique(unsigned int irq, struct irqaction *new_action) +{ + struct irq_desc *desc = irq_desc + irq; + struct irqaction *action; + + for (action = desc->action ; action; action = action->next) + if ((action != new_action) && action->name && + !strcmp(new_action->name, action->name)) + return 0; + return 1; +} + +void register_handler_proc(unsigned int irq, struct irqaction *action) +{ + char name [MAX_NAMELEN]; + + if (!irq_desc[irq].dir || action->dir || !action->name || + !name_unique(irq, action)) + return; + + memset(name, 0, MAX_NAMELEN); + snprintf(name, MAX_NAMELEN, "%s", action->name); + + /* create /proc/irq/1234/handler/ */ + action->dir = proc_mkdir(name, irq_desc[irq].dir); + + if (!action->dir) + return; +#ifndef CONFIG_PREEMPT_RT + { + struct proc_dir_entry *entry; + /* create /proc/irq/1234/handler/threaded */ + entry = create_proc_entry("threaded", 0600, action->dir); + if (!entry) + return; + entry->nlink = 1; + entry->data = (void *)action; + entry->read_proc = threaded_read_proc; + entry->write_proc = threaded_write_proc; + action->threaded = entry; + } +#endif +} + +#undef MAX_NAMELEN + void init_irq_proc(void) { int i; @@ -163,6 +213,9 @@ void init_irq_proc(void) if (!root_irq_dir) return; + /* create /proc/irq/prof_cpu_mask */ + create_prof_cpu_mask(root_irq_dir); + /* * Create entries for all existing IRQs. */ Index: linux-rt-rebase.q/kernel/irq/spurious.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/spurious.c +++ linux-rt-rebase.q/kernel/irq/spurious.c @@ -10,6 +10,10 @@ #include #include #include +#ifdef CONFIG_X86_IO_APIC +# include +# include +#endif static int irqfixup __read_mostly; @@ -203,6 +207,12 @@ void note_interrupt(unsigned int irq, st * The interrupt is stuck */ __report_bad_irq(irq, desc, action_ret); +#ifdef CONFIG_X86_IO_APIC + if (!sis_apic_bug) { + sis_apic_bug = 1; + printk(KERN_ERR "turning off IO-APIC fast mode.\n"); + } +#else /* * Now kill the IRQ */ @@ -210,6 +220,7 @@ void note_interrupt(unsigned int irq, st desc->status |= IRQ_DISABLED; desc->depth = 1; desc->chip->disable(irq); +#endif } desc->irqs_unhandled = 0; } Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -3265,7 +3265,7 @@ void account_system_time(struct task_str /* Add system time to cpustat. */ tmp = cputime_to_cputime64(cputime); - if (hardirq_count() - hardirq_offset) + if (hardirq_count() - hardirq_offset || (p->flags & PF_HARDIRQ)) cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count() || (p->flags & PF_SOFTIRQ)) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); @@ -4599,6 +4599,27 @@ int __sched cond_resched_softirq_context } EXPORT_SYMBOL(cond_resched_softirq_context); +/* + * Preempt a hardirq context if necessary (possible with hardirq threading): + */ +int cond_resched_hardirq_context(void) +{ + WARN_ON_ONCE(!in_irq()); + WARN_ON_ONCE(!irqs_disabled()); + + if (hardirq_need_resched()) { + irq_exit(); + local_irq_enable(); + __cond_resched(); + local_irq_disable(); + __irq_enter(); + + return 1; + } + return 0; +} +EXPORT_SYMBOL(cond_resched_hardirq_context); + /** * yield - yield the current processor to other threads. * patches/rt-mutex-trivial-tcp-preempt-fix.patch0000664000077200007720000000126510655544573021043 0ustar mingomingo--- net/ipv4/tcp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/net/ipv4/tcp.c =================================================================== --- linux-rt-rebase.q.orig/net/ipv4/tcp.c +++ linux-rt-rebase.q/net/ipv4/tcp.c @@ -1154,11 +1154,11 @@ int tcp_recvmsg(struct kiocb *iocb, stru (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { - preempt_enable_no_resched(); + preempt_enable(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); } else { - preempt_enable_no_resched(); + preempt_enable(); } } #endif patches/loopback-revert.patch0000664000077200007720000000215110655544572015655 0ustar mingomingo revert this commit: commit 58f539740b1ccfc5ef4e509ec2efe82621b546e3 Author: Eric Dumazet Date: Fri Oct 20 00:32:41 2006 -0700 [NET]: Can use __get_cpu_var() instead of per_cpu() in loopback driver. As BHs are off in loopback_xmit(), preemption cannot occurs, so we can use __get_cpu_var() instead of per_cpu() (and avoid a preempt_enable()/preempt_disable() pair) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/loopback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux/drivers/net/loopback.c =================================================================== --- linux.orig/drivers/net/loopback.c +++ linux/drivers/net/loopback.c @@ -154,10 +154,10 @@ static int loopback_xmit(struct sk_buff #endif dev->last_rx = jiffies; - /* it's OK to use __get_cpu_var() because BHs are off */ - lb_stats = &__get_cpu_var(pcpu_lstats); + lb_stats = &per_cpu(pcpu_lstats, get_cpu()); lb_stats->bytes += skb->len; lb_stats->packets++; + put_cpu(); netif_rx(skb); patches/preempt-irqs-ppc-celleb-beatic-eoi.patch0000664000077200007720000000676710655544573021233 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Tue May 15 17:44:07 2007 Date: Tue, 15 May 2007 17:44:07 +0900 From: Tsutomu OWA To: linuxppc-dev@ozlabs.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [RFC] [patch 1/2] powerpc 2.6.21-rt1: fix kernel hang and/or panic > It occurs on 2.6.21 + patch-2.6.21-rt1 + series of patches that I posted > yesterday. When doing 'hdparm -t /dev/hda' several times, it silently hangs. I think it freezes since It does not response to ping as well. On the other hand, PREEMPT_NONE kernel works just fine. After looking into the rt interrupt handling code, I noticed that code path differs between PREEMPT_NONE and PREEMPT_RT; NONE: mask() -> unmask() -> eoi() RT: mask() -> eoi() -> unmask() The hypervisor underlying the linux on Celleb wants to be called in this "mask() -> unmask() -> eoi()" order. This patch mimics the behavior of PREEPT_NONE even if PREEMPT_RT is specified. Or, would it be better to create/add a new (threaded) irq handler? Any comments? Thanks in advance Signed-off-by: Tsutomu OWA -- owa --- arch/powerpc/platforms/celleb/interrupt.c | 39 +++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 6 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/platforms/celleb/interrupt.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/celleb/interrupt.c +++ linux-rt-rebase.q/arch/powerpc/platforms/celleb/interrupt.c @@ -29,6 +29,10 @@ #include "interrupt.h" #include "beat_wrapper.h" +#ifdef CONFIG_PREEMPT_HARDIRQS +extern int hardirq_preemption; +#endif /* CONFIG_PREEMPT_HARDIRQS */ + #define MAX_IRQS NR_IRQS static DEFINE_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; @@ -71,12 +75,35 @@ static void beatic_mask_irq(unsigned int spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } +static void __beatic_eoi_irq(unsigned int irq_plug) +{ + s64 err; + + if ((err = beat_downcount_of_interrupt(irq_plug)) != 0) { + if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ + panic("Failed to downcount IRQ! Error = %16lx", err); + + printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); + } +} + static void beatic_unmask_irq(unsigned int irq_plug) { unsigned long flags; +#ifdef CONFIG_PREEMPT_HARDIRQS + if (hardirq_preemption) + __beatic_eoi_irq(irq_plug); +#endif /* CONFIG_PREEMPT_HARDIRQS */ + spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); + +#ifdef CONFIG_PREEMPT_HARDIRQS + if (hardirq_preemption) + beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); +#endif /* CONFIG_PREEMPT_HARDIRQS */ + beatic_update_irq_mask(irq_plug); spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } @@ -93,15 +120,15 @@ static void beatic_ack_irq(unsigned int static void beatic_end_irq(unsigned int irq_plug) { - s64 err; unsigned long flags; - if ((err = beat_downcount_of_interrupt(irq_plug)) != 0) { - if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ - panic("Failed to downcount IRQ! Error = %16lx", err); +#ifdef CONFIG_PREEMPT_HARDIRQS + if (hardirq_preemption) + return; +#endif /* CONFIG_PREEMPT_HARDIRQS */ + + __beatic_eoi_irq(irq_plug); - printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); - } spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); beatic_update_irq_mask(irq_plug); patches/realtime-lsm.patch0000664000077200007720000001400610655544576015157 0ustar mingomingo--- security/Kconfig | 9 +++ security/Makefile | 1 security/realcap.c | 144 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 154 insertions(+) Index: linux-rt-rebase.q/security/Kconfig =================================================================== --- linux-rt-rebase.q.orig/security/Kconfig +++ linux-rt-rebase.q/security/Kconfig @@ -80,6 +80,15 @@ config SECURITY_CAPABILITIES This enables the "default" Linux capabilities functionality. If you are unsure how to answer this question, answer Y. +config REALTIME_CAPABILITIES + tristate "Real-Time LSM (Obsolete)" + depends on SECURITY && EXPERIMENTAL + help + This is an obsolete LSM - use newer PAM and rt-limites + to manage your real-time apps. + + If you are unsure how to answer this question, answer N. + config SECURITY_ROOTPLUG tristate "Root Plug Support" depends on USB && SECURITY Index: linux-rt-rebase.q/security/Makefile =================================================================== --- linux-rt-rebase.q.orig/security/Makefile +++ linux-rt-rebase.q/security/Makefile @@ -15,4 +15,5 @@ obj-$(CONFIG_SECURITY) += security.o d # Must precede capability.o in order to stack properly. obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o obj-$(CONFIG_SECURITY_CAPABILITIES) += commoncap.o capability.o +obj-$(CONFIG_REALTIME_CAPABILITIES) += commoncap.o realcap.o obj-$(CONFIG_SECURITY_ROOTPLUG) += commoncap.o root_plug.o Index: linux-rt-rebase.q/security/realcap.c =================================================================== --- /dev/null +++ linux-rt-rebase.q/security/realcap.c @@ -0,0 +1,144 @@ +/* + * Realtime Capabilities Linux Security Module + * + * Copyright (C) 2003 Torben Hohn + * Copyright (C) 2003, 2004 Jack O'Quin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include + +#define RT_LSM "Realtime LSM " /* syslog module name prefix */ +#define RT_ERR "Realtime: " /* syslog error message prefix */ + +/* module parameters + * + * These values could change at any time due to some process writing + * a new value in /sys/module/realtime/parameters. This is OK, + * because each is referenced only once in each function call. + * Nothing depends on parameters having the same value every time. + */ + +/* if TRUE, any process is realtime */ +static int rt_any; +module_param_named(any, rt_any, int, 0644); +MODULE_PARM_DESC(any, " grant realtime privileges to any process."); + +/* realtime group id, or NO_GROUP */ +static int rt_gid = -1; +module_param_named(gid, rt_gid, int, 0644); +MODULE_PARM_DESC(gid, " the group ID with access to realtime privileges."); + +/* enable mlock() privileges */ +static int rt_mlock = 1; +module_param_named(mlock, rt_mlock, int, 0644); +MODULE_PARM_DESC(mlock, " enable memory locking privileges."); + +/* helper function for testing group membership */ +static inline int gid_ok(int gid) +{ + if (gid == -1) + return 0; + + if (gid == current->gid) + return 1; + + return in_egroup_p(gid); +} + +static void realtime_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) +{ + cap_bprm_apply_creds(bprm, unsafe); + + /* If a non-zero `any' parameter was specified, we grant + * realtime privileges to every process. If the `gid' + * parameter was specified and it matches the group id of the + * executable, of the current process or any supplementary + * groups, we grant realtime capabilites. + */ + + if (rt_any || gid_ok(rt_gid)) { + cap_raise(current->cap_effective, CAP_SYS_NICE); + if (rt_mlock) { + cap_raise(current->cap_effective, CAP_IPC_LOCK); + cap_raise(current->cap_effective, CAP_SYS_RESOURCE); + } + } +} + +static struct security_operations capability_ops = { + .ptrace = cap_ptrace, + .capget = cap_capget, + .capset_check = cap_capset_check, + .capset_set = cap_capset_set, + .capable = cap_capable, + .netlink_send = cap_netlink_send, + .netlink_recv = cap_netlink_recv, + .bprm_apply_creds = realtime_bprm_apply_creds, + .bprm_set_security = cap_bprm_set_security, + .bprm_secureexec = cap_bprm_secureexec, + .task_post_setuid = cap_task_post_setuid, + .task_reparent_to_init = cap_task_reparent_to_init, + .syslog = cap_syslog, + .vm_enough_memory = cap_vm_enough_memory, +}; + +#define MY_NAME __stringify(KBUILD_MODNAME) + +static int secondary; /* flag to keep track of how we were registered */ + +static int __init realtime_init(void) +{ + /* register ourselves with the security framework */ + if (register_security(&capability_ops)) { + + /* try registering with primary module */ + if (mod_reg_security(MY_NAME, &capability_ops)) { + printk(KERN_INFO RT_ERR "Failure registering " + "capabilities with primary security module.\n"); + printk(KERN_INFO RT_ERR "Is kernel configured " + "with CONFIG_SECURITY_CAPABILITIES=m?\n"); + return -EINVAL; + } + secondary = 1; + } + + if (rt_any) + printk(KERN_INFO RT_LSM + "initialized (all groups, mlock=%d)\n", rt_mlock); + else if (rt_gid == -1) + printk(KERN_INFO RT_LSM + "initialized (no groups, mlock=%d)\n", rt_mlock); + else + printk(KERN_INFO RT_LSM + "initialized (group %d, mlock=%d)\n", rt_gid, rt_mlock); + + return 0; +} + +static void __exit realtime_exit(void) +{ + /* remove ourselves from the security framework */ + if (secondary) { + if (mod_unreg_security(MY_NAME, &capability_ops)) + printk(KERN_INFO RT_ERR "Failure unregistering " + "capabilities with primary module.\n"); + + } else if (unregister_security(&capability_ops)) { + printk(KERN_INFO RT_ERR + "Failure unregistering capabilities with the kernel\n"); + } + printk(KERN_INFO "Realtime Capability LSM exiting\n"); +} + +late_initcall(realtime_init); +module_exit(realtime_exit); + +MODULE_DESCRIPTION("Realtime Capabilities Security Module"); +MODULE_LICENSE("GPL"); patches/latency-tracing-arm.patch0000664000077200007720000002726510655544572016434 0ustar mingomingo arch/arm/boot/compressed/head.S | 13 ++++ arch/arm/kernel/entry-common.S | 109 ++++++++++++++++++++++++++++++++++++++++ arch/arm/kernel/fiq.c | 4 - arch/arm/kernel/irq.c | 4 + arch/arm/kernel/traps.c | 1 arch/arm/mm/copypage-v4mc.c | 4 - arch/arm/mm/copypage-xscale.c | 4 - arch/arm/mm/fault.c | 14 ++--- include/asm-arm/pgalloc.h | 4 - include/asm-arm/timex.h | 10 +++ include/asm-arm/unistd.h | 4 + 11 files changed, 154 insertions(+), 17 deletions(-) Index: linux/arch/arm/boot/compressed/head.S =================================================================== --- linux.orig/arch/arm/boot/compressed/head.S +++ linux/arch/arm/boot/compressed/head.S @@ -928,6 +928,19 @@ memdump: mov r12, r0 #endif .ltorg +#ifdef CONFIG_MCOUNT +/* CONFIG_MCOUNT causes boot header to be built with -pg requiring this + * trampoline + */ + .text + .align 0 + .type mcount %function + .global mcount +mcount: + mov pc, lr @ just return +#endif + + reloc_end: .align Index: linux/arch/arm/kernel/entry-common.S =================================================================== --- linux.orig/arch/arm/kernel/entry-common.S +++ linux/arch/arm/kernel/entry-common.S @@ -3,6 +3,8 @@ * * Copyright (C) 2000 Russell King * + * FUNCTION_TRACE/mcount support (C) 2005 Timesys john.cooper@timesys.com + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -395,5 +397,112 @@ ENTRY(sys_oabi_call_table) #undef ABI #undef OBSOLETE +#ifdef CONFIG_FRAME_POINTER + +#ifdef CONFIG_MCOUNT +/* + * At the point where we are in mcount() we maintain the + * frame of the prologue code and keep the call to mcount() + * out of the stack frame list: + + saved pc <---\ caller of instrumented routine + saved lr | + ip/prev_sp | + fp -----^ | + : | + | + -> saved pc | instrumented routine + | saved lr | + | ip/prev_sp | + | fp ---------/ + | : + | + | mcount + | saved pc + | saved lr + | ip/prev sp + -- fp + r3 + r2 + r1 + sp-> r0 + : + */ + + .text + .align 0 + .type mcount %function + .global mcount + +/* gcc -pg generated FUNCTION_PROLOGUE references mcount() + * and has already created the stack frame invocation for + * the routine we have been called to instrument. We create + * a complete frame nevertheless, as we want to use the same + * call to mcount() from c code. + */ +mcount: + + ldr ip, =mcount_enabled @ leave early, if disabled + ldr ip, [ip] + cmp ip, #0 + moveq pc,lr + + mov ip, sp + stmdb sp!, {r0 - r3, fp, ip, lr, pc} @ create stack frame + + ldr r1, [fp, #-4] @ get lr (the return address + @ of the caller of the + @ instrumented function) + mov r0, lr @ get lr - (the return address + @ of the instrumented function) + + sub fp, ip, #4 @ point fp at this frame + + bl __trace +1: + ldmdb fp, {r0 - r3, fp, sp, pc} @ pop entry frame and return + +#endif + +/* ARM replacement for unsupported gcc __builtin_return_address(n) + * where 0 < n. n == 0 is supported here as well. + * + * Walk up the stack frame until the desired frame is found or a NULL + * fp is encountered, return NULL in the latter case. + * + * Note: it is possible under code optimization for the stack invocation + * of an ancestor function (level N) to be removed before calling a + * descendant function (level N+1). No easy means is available to deduce + * this scenario with the result being [for example] caller_addr(0) when + * called from level N+1 returning level N-1 rather than the expected + * level N. This optimization issue appears isolated to the case of + * a call to a level N+1 routine made at the tail end of a level N + * routine -- the level N frame is deleted and a simple branch is made + * to the level N+1 routine. + */ + + .text + .align 0 + .type arm_return_addr %function + .global arm_return_addr + +arm_return_addr: + mov ip, r0 + mov r0, fp +3: + cmp r0, #0 + beq 1f @ frame list hit end, bail + cmp ip, #0 + beq 2f @ reached desired frame + ldr r0, [r0, #-12] @ else continue, get next fp + sub ip, ip, #1 + b 3b +2: + ldr r0, [r0, #-4] @ get target return address +1: + mov pc, lr + +#endif + #endif Index: linux/arch/arm/kernel/fiq.c =================================================================== --- linux.orig/arch/arm/kernel/fiq.c +++ linux/arch/arm/kernel/fiq.c @@ -89,7 +89,7 @@ void set_fiq_handler(void *start, unsign * disable irqs for the duration. Note - these functions are almost * entirely coded in assembly. */ -void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs) +void notrace __attribute__((naked)) set_fiq_regs(struct pt_regs *regs) { register unsigned long tmp; asm volatile ( @@ -107,7 +107,7 @@ void __attribute__((naked)) set_fiq_regs : "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); } -void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs) +void notrace __attribute__((naked)) get_fiq_regs(struct pt_regs *regs) { register unsigned long tmp; asm volatile ( Index: linux/arch/arm/kernel/irq.c =================================================================== --- linux.orig/arch/arm/kernel/irq.c +++ linux/arch/arm/kernel/irq.c @@ -108,11 +108,13 @@ static struct irq_desc bad_irq_desc = { * come via this function. Instead, they should provide their * own 'handler' */ -asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) +asmlinkage void __exception notrace asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); struct irq_desc *desc = irq_desc + irq; + trace_special(instruction_pointer(regs), irq, 0); + /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. Index: linux/arch/arm/kernel/traps.c =================================================================== --- linux.orig/arch/arm/kernel/traps.c +++ linux/arch/arm/kernel/traps.c @@ -354,6 +354,7 @@ asmlinkage void do_unexp_fiq (struct pt_ { printk("Hmm. Unexpected FIQ received, but trying to continue\n"); printk("You may have a hardware problem...\n"); + print_traces(current); } /* Index: linux/arch/arm/mm/copypage-v4mc.c =================================================================== --- linux.orig/arch/arm/mm/copypage-v4mc.c +++ linux/arch/arm/mm/copypage-v4mc.c @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock); * instruction. If your processor does not supply this, you have to write your * own copy_user_page that does the right thing. */ -static void __attribute__((naked)) +static void notrace __attribute__((naked)) mc_copy_user_page(void *from, void *to) { asm volatile( @@ -88,7 +88,7 @@ void v4_mc_copy_user_page(void *kto, con /* * ARMv4 optimised clear_user_page */ -void __attribute__((naked)) +void notrace __attribute__((naked)) v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) { asm volatile( Index: linux/arch/arm/mm/copypage-xscale.c =================================================================== --- linux.orig/arch/arm/mm/copypage-xscale.c +++ linux/arch/arm/mm/copypage-xscale.c @@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ -static void __attribute__((naked)) +static void notrace __attribute__((naked)) mc_copy_user_page(void *from, void *to) { /* @@ -110,7 +110,7 @@ void xscale_mc_copy_user_page(void *kto, /* * XScale optimised clear_user_page */ -void __attribute__((naked)) +void notrace __attribute__((naked)) xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) { asm volatile( Index: linux/arch/arm/mm/fault.c =================================================================== --- linux.orig/arch/arm/mm/fault.c +++ linux/arch/arm/mm/fault.c @@ -215,7 +215,7 @@ out: return fault; } -static int +static notrace int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { struct task_struct *tsk; @@ -311,7 +311,7 @@ no_context: * interrupt or a critical region, and should only copy the information * from the master page table, nothing more. */ -static int +static notrace int do_translation_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -354,7 +354,7 @@ bad_area: * Some section permission faults need to be handled gracefully. * They can happen due to a __{get,put}_user during an oops. */ -static int +static notrace int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { do_bad_area(addr, fsr, regs); @@ -364,7 +364,7 @@ do_sect_fault(unsigned long addr, unsign /* * This abort handler always returns "fault". */ -static int +static notrace int do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { return 1; @@ -419,7 +419,7 @@ static struct fsr_info { { do_bad, SIGBUS, 0, "unknown 31" } }; -void __init +void __init notrace hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, const char *name) { @@ -433,7 +433,7 @@ hook_fault_code(int nr, int (*fn)(unsign /* * Dispatch a data abort to the relevant handler. */ -asmlinkage void __exception +asmlinkage void __exception notrace do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6); @@ -452,7 +452,7 @@ do_DataAbort(unsigned long addr, unsigne arm_notify_die("", regs, &info, fsr, 0); } -asmlinkage void __exception +asmlinkage void __exception notrace do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) { do_translation_fault(addr, 0, regs); Index: linux/include/asm-arm/pgalloc.h =================================================================== --- linux.orig/include/asm-arm/pgalloc.h +++ linux/include/asm-arm/pgalloc.h @@ -109,7 +109,7 @@ static inline void __pmd_populate(pmd_t * * Ensure that we always set both PMD entries. */ -static inline void +static inline void notrace pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { unsigned long pte_ptr = (unsigned long)ptep; @@ -122,7 +122,7 @@ pmd_populate_kernel(struct mm_struct *mm __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); } -static inline void +static inline void notrace pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) { __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); Index: linux/include/asm-arm/timex.h =================================================================== --- linux.orig/include/asm-arm/timex.h +++ linux/include/asm-arm/timex.h @@ -16,9 +16,17 @@ typedef unsigned long cycles_t; +#ifndef mach_read_cycles + #define mach_read_cycles() (0) +#ifdef CONFIG_LATENCY_TIMING + #define mach_cycles_to_usecs(d) (d) + #define mach_usecs_to_cycles(d) (d) +#endif +#endif + static inline cycles_t get_cycles (void) { - return 0; + return mach_read_cycles(); } #endif Index: linux/include/asm-arm/unistd.h =================================================================== --- linux.orig/include/asm-arm/unistd.h +++ linux/include/asm-arm/unistd.h @@ -379,6 +379,10 @@ #define __NR_timerfd (__NR_SYSCALL_BASE+350) #define __NR_eventfd (__NR_SYSCALL_BASE+351) +#ifndef __ASSEMBLY__ +#define NR_syscalls (__NR_set_mempolicy + 1 - __NR_SYSCALL_BASE) +#endif + /* * The following SWIs are ARM private. */ patches/preempt-realtime-powerpc-celleb-raw-spinlocks.patch0000664000077200007720000000321210655544574023526 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Mon May 14 15:28:23 2007 Date: Mon, 14 May 2007 15:28:23 +0900 From: Tsutomu OWA To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [patch 2/4] powerpc 2.6.21-rt1: convert spinlocks to raw ones for Celleb. Convert more spinlocks to raw ones for Celleb. Signed-off-by: Tsutomu OWA -- owa --- arch/powerpc/platforms/celleb/htab.c | 2 +- arch/powerpc/platforms/celleb/interrupt.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/platforms/celleb/htab.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/celleb/htab.c +++ linux-rt-rebase.q/arch/powerpc/platforms/celleb/htab.c @@ -40,7 +40,7 @@ #define DBG_LOW(fmt...) do { } while(0) #endif -static DEFINE_SPINLOCK(beat_htab_lock); +static DEFINE_RAW_SPINLOCK(beat_htab_lock); static inline unsigned int beat_read_mask(unsigned hpte_group) { Index: linux-rt-rebase.q/arch/powerpc/platforms/celleb/interrupt.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/celleb/interrupt.c +++ linux-rt-rebase.q/arch/powerpc/platforms/celleb/interrupt.c @@ -34,7 +34,7 @@ extern int hardirq_preemption; #endif /* CONFIG_PREEMPT_HARDIRQS */ #define MAX_IRQS NR_IRQS -static DEFINE_SPINLOCK(beatic_irq_mask_lock); +static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; patches/floppy-resume-fix.patch0000664000077200007720000000414210655544571016152 0ustar mingomingoSubject: [patch] floppy: suspend/resume fix From: Ingo Molnar introduce a floppy platform-driver and suspend/resume ops to stop/start the floppy driver. Bug reported by Mikael Pettersson. Signed-off-by: Ingo Molnar --- drivers/block/floppy.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) Index: linux/drivers/block/floppy.c =================================================================== --- linux.orig/drivers/block/floppy.c +++ linux/drivers/block/floppy.c @@ -4157,6 +4157,28 @@ static void floppy_device_release(struct complete(&device_release); } +static int floppy_suspend(struct platform_device *dev, pm_message_t state) +{ + floppy_release_irq_and_dma(); + + return 0; +} + +static int floppy_resume(struct platform_device *dev) +{ + floppy_grab_irq_and_dma(); + + return 0; +} + +static struct platform_driver floppy_driver = { + .suspend = floppy_suspend, + .resume = floppy_resume, + .driver = { + .name = "floppy", + }, +}; + static struct platform_device floppy_device[N_DRIVE]; static struct kobject *floppy_find(dev_t dev, int *part, void *data) @@ -4205,10 +4227,14 @@ static int __init floppy_init(void) if (err) goto out_put_disk; + err = platform_driver_register(&floppy_driver); + if (err) + goto out_unreg_blkdev; + floppy_queue = blk_init_queue(do_fd_request, &floppy_lock); if (!floppy_queue) { err = -ENOMEM; - goto out_unreg_blkdev; + goto out_unreg_driver; } blk_queue_max_sectors(floppy_queue, 64); @@ -4357,6 +4383,8 @@ out_flush_work: out_unreg_region: blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); blk_cleanup_queue(floppy_queue); +out_unreg_driver: + platform_driver_unregister(&floppy_driver); out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); out_put_disk: @@ -4548,6 +4576,7 @@ void cleanup_module(void) init_completion(&device_release); blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); unregister_blkdev(FLOPPY_MAJOR, "fd"); + platform_driver_unregister(&floppy_driver); for (drive = 0; drive < N_DRIVE; drive++) { del_timer_sync(&motor_off_timer[drive]); patches/preempt-realtime-ppc-need-resched-delayed.patch0000664000077200007720000000216510655544574022552 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Mon May 14 15:29:17 2007 Date: Mon, 14 May 2007 15:29:17 +0900 From: Tsutomu OWA To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [patch 3/4] powerpc 2.6.21-rt1: add a need_resched_delayed() check Add a need_resched_delayed() check. This was pointed by Sergei Shtylyov; http://ozlabs.org/pipermail/linuxppc-dev/2007-March/033148.html Signed-off-by: Tsutomu Owa -- owa --- arch/powerpc/kernel/idle.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/powerpc/kernel/idle.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/idle.c +++ linux-rt-rebase.q/arch/powerpc/kernel/idle.c @@ -75,7 +75,9 @@ void cpu_idle(void) local_irq_disable(); /* check again after disabling irqs */ - if (!need_resched() && !cpu_should_die()) + if (!need_resched() && + !need_resched_delayed() && + !cpu_should_die()) ppc_md.power_save(); local_irq_enable(); patches/lock_page_ref.patch0000664000077200007720000003237210655544576015352 0ustar mingomingoSubject: mm: lock_page_ref Change the PG_nonewref operations into locking primitives and place them so that they provide page level serialization with regard to the page_tree operations. (basically replace the tree_lock with a per page lock). The normal page lock has sufficiently different (and overlapping) scope and protection rules that this second lock is needed. Signed-off-by: Peter Zijlstra --- fs/buffer.c | 6 ++++-- include/linux/page-flags.h | 21 --------------------- include/linux/pagemap.h | 45 +++++++++++++++++++++++++++++++++++++++++++-- mm/filemap.c | 14 ++++++++------ mm/migrate.c | 25 +++++++++++++------------ mm/page-writeback.c | 18 ++++++++++++------ mm/swap_state.c | 14 ++++++++------ mm/swapfile.c | 6 ++++-- mm/truncate.c | 9 ++++++--- mm/vmscan.c | 14 +++++++------- 10 files changed, 105 insertions(+), 67 deletions(-) Index: linux-rt-rebase.q/fs/buffer.c =================================================================== --- linux-rt-rebase.q.orig/fs/buffer.c +++ linux-rt-rebase.q/fs/buffer.c @@ -684,7 +684,8 @@ static int __set_page_dirty(struct page if (TestSetPageDirty(page)) return 0; - spin_lock_irq(&mapping->tree_lock); + lock_page_ref_irq(page); + spin_lock(&mapping->tree_lock); if (page->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !PageUptodate(page)); @@ -695,7 +696,8 @@ static int __set_page_dirty(struct page radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } - spin_unlock_irq(&mapping->tree_lock); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irq(page); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); return 1; Index: linux-rt-rebase.q/include/linux/page-flags.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/page-flags.h +++ linux-rt-rebase.q/include/linux/page-flags.h @@ -279,25 +279,4 @@ static inline void set_page_writeback(st test_set_page_writeback(page); } -static inline void set_page_nonewrefs(struct page *page) -{ - preempt_disable(); - SetPageNoNewRefs(page); - smp_wmb(); -} - -static inline void __clear_page_nonewrefs(struct page *page) -{ - smp_wmb(); - __ClearPageNoNewRefs(page); - preempt_enable(); -} - -static inline void clear_page_nonewrefs(struct page *page) -{ - smp_wmb(); - ClearPageNoNewRefs(page); - preempt_enable(); -} - #endif /* PAGE_FLAGS_H */ Index: linux-rt-rebase.q/include/linux/pagemap.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/pagemap.h +++ linux-rt-rebase.q/include/linux/pagemap.h @@ -14,6 +14,7 @@ #include #include #include /* for in_interrupt() */ +#include /* * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page @@ -64,6 +65,47 @@ static inline void mapping_set_gfp_mask( #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +static inline void lock_page_ref(struct page *page) +{ + bit_spin_lock(PG_nonewrefs, &page->flags); + smp_wmb(); +} + +static inline void unlock_page_ref(struct page *page) +{ + bit_spin_unlock(PG_nonewrefs, &page->flags); +} + +static inline void wait_on_page_ref(struct page *page) +{ + while (unlikely(test_bit(PG_nonewrefs, &page->flags))) + cpu_relax(); +} + +#define lock_page_ref_irq(page) \ + do { \ + local_irq_disable(); \ + lock_page_ref(page); \ + } while (0) + +#define unlock_page_ref_irq(page) \ + do { \ + unlock_page_ref(page); \ + local_irq_enable(); \ + } while (0) + +#define lock_page_ref_irqsave(page, flags) \ + do { \ + local_irq_save(flags); \ + lock_page_ref(page); \ + } while (0) + +#define unlock_page_ref_irqrestore(page, flags) \ + do { \ + unlock_page_ref(page); \ + local_irq_restore(flags); \ + } while (0) + /* * speculatively take a reference to a page. * If the page is free (_count == 0), then _count is untouched, and 0 @@ -139,8 +181,7 @@ static inline int page_cache_get_specula * page refcount has been raised. See below comment. */ - while (unlikely(PageNoNewRefs(page))) - cpu_relax(); + wait_on_page_ref(page); /* * smp_rmb is to ensure the load of page->flags (for PageNoNewRefs()) Index: linux-rt-rebase.q/mm/filemap.c =================================================================== --- linux-rt-rebase.q.orig/mm/filemap.c +++ linux-rt-rebase.q/mm/filemap.c @@ -129,9 +129,11 @@ void remove_from_page_cache(struct page BUG_ON(!PageLocked(page)); - spin_lock_irq(&mapping->tree_lock); + lock_page_ref_irq(page); + spin_lock(&mapping->tree_lock); __remove_from_page_cache(page); - spin_unlock_irq(&mapping->tree_lock); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irq(page); } static int sync_page(void *word) @@ -441,8 +443,8 @@ int add_to_page_cache(struct page *page, int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (error == 0) { - set_page_nonewrefs(page); - spin_lock_irq(&mapping->tree_lock); + lock_page_ref_irq(page); + spin_lock(&mapping->tree_lock); error = radix_tree_insert(&mapping->page_tree, offset, page); if (!error) { page_cache_get(page); @@ -452,8 +454,8 @@ int add_to_page_cache(struct page *page, mapping_nrpages_inc(mapping); __inc_zone_page_state(page, NR_FILE_PAGES); } - spin_unlock_irq(&mapping->tree_lock); - clear_page_nonewrefs(page); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irq(page); radix_tree_preload_end(); } return error; Index: linux-rt-rebase.q/mm/migrate.c =================================================================== --- linux-rt-rebase.q.orig/mm/migrate.c +++ linux-rt-rebase.q/mm/migrate.c @@ -302,16 +302,16 @@ static int migrate_page_move_mapping(str return 0; } - set_page_nonewrefs(page); - spin_lock_irq(&mapping->tree_lock); + lock_page_ref_irq(page); + spin_lock(&mapping->tree_lock); pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); if (page_count(page) != 2 + !!PagePrivate(page) || (struct page *)radix_tree_deref_slot(pslot) != page) { - spin_unlock_irq(&mapping->tree_lock); - clear_page_nonewrefs(page); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irq(page); return -EAGAIN; } @@ -328,14 +328,7 @@ static int migrate_page_move_mapping(str radix_tree_replace_slot(pslot, newpage); page->mapping = NULL; - spin_unlock_irq(&mapping->tree_lock); - clear_page_nonewrefs(page); - - /* - * Drop cache reference from old page. - * We know this isn't the last reference. - */ - __put_page(page); + spin_unlock(&mapping->tree_lock); /* * If moved to a different zone then also account @@ -350,6 +343,14 @@ static int migrate_page_move_mapping(str __dec_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(newpage, NR_FILE_PAGES); + unlock_page_ref_irq(page); + + /* + * Drop cache reference from old page. + * We know this isn't the last reference. + */ + __put_page(page); + return 0; } Index: linux-rt-rebase.q/mm/page-writeback.c =================================================================== --- linux-rt-rebase.q.orig/mm/page-writeback.c +++ linux-rt-rebase.q/mm/page-writeback.c @@ -820,7 +820,8 @@ int __set_page_dirty_nobuffers(struct pa if (!mapping) return 1; - spin_lock_irq(&mapping->tree_lock); + lock_page_ref_irq(page); + spin_lock(&mapping->tree_lock); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); @@ -832,7 +833,8 @@ int __set_page_dirty_nobuffers(struct pa radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } - spin_unlock_irq(&mapping->tree_lock); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irq(page); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); @@ -977,13 +979,15 @@ int test_clear_page_writeback(struct pag if (mapping) { unsigned long flags; - spin_lock_irqsave(&mapping->tree_lock, flags); + lock_page_ref_irqsave(page, flags); + spin_lock(&mapping->tree_lock); ret = TestClearPageWriteback(page); if (ret) radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); - spin_unlock_irqrestore(&mapping->tree_lock, flags); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irqrestore(page, flags); } else { ret = TestClearPageWriteback(page); } @@ -1000,7 +1004,8 @@ int test_set_page_writeback(struct page if (mapping) { unsigned long flags; - spin_lock_irqsave(&mapping->tree_lock, flags); + lock_page_ref_irqsave(page, flags); + spin_lock(&mapping->tree_lock); ret = TestSetPageWriteback(page); if (!ret) radix_tree_tag_set(&mapping->page_tree, @@ -1010,7 +1015,8 @@ int test_set_page_writeback(struct page radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); - spin_unlock_irqrestore(&mapping->tree_lock, flags); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irqrestore(page, flags); } else { ret = TestSetPageWriteback(page); } Index: linux-rt-rebase.q/mm/swap_state.c =================================================================== --- linux-rt-rebase.q.orig/mm/swap_state.c +++ linux-rt-rebase.q/mm/swap_state.c @@ -79,8 +79,8 @@ static int __add_to_swap_cache(struct pa BUG_ON(PagePrivate(page)); error = radix_tree_preload(gfp_mask); if (!error) { - set_page_nonewrefs(page); - spin_lock_irq(&swapper_space.tree_lock); + lock_page_ref_irq(page); + spin_lock(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); if (!error) { @@ -90,8 +90,8 @@ static int __add_to_swap_cache(struct pa mapping_nrpages_inc(&swapper_space); __inc_zone_page_state(page, NR_FILE_PAGES); } - spin_unlock_irq(&swapper_space.tree_lock); - clear_page_nonewrefs(page); + spin_unlock(&swapper_space.tree_lock); + unlock_page_ref_irq(page); radix_tree_preload_end(); } return error; @@ -202,9 +202,11 @@ void delete_from_swap_cache(struct page entry.val = page_private(page); - spin_lock_irq(&swapper_space.tree_lock); + lock_page_ref_irq(page); + spin_lock(&swapper_space.tree_lock); __delete_from_swap_cache(page); - spin_unlock_irq(&swapper_space.tree_lock); + spin_unlock(&swapper_space.tree_lock); + unlock_page_ref_irq(page); swap_free(entry); page_cache_release(page); Index: linux-rt-rebase.q/mm/swapfile.c =================================================================== --- linux-rt-rebase.q.orig/mm/swapfile.c +++ linux-rt-rebase.q/mm/swapfile.c @@ -367,13 +367,15 @@ int remove_exclusive_swap_page(struct pa retval = 0; if (p->swap_map[swp_offset(entry)] == 1) { /* Recheck the page count with the swapcache lock held.. */ - spin_lock_irq(&swapper_space.tree_lock); + lock_page_ref_irq(page); + spin_lock(&swapper_space.tree_lock); if ((page_count(page) == 2) && !PageWriteback(page)) { __delete_from_swap_cache(page); SetPageDirty(page); retval = 1; } - spin_unlock_irq(&swapper_space.tree_lock); + spin_unlock(&swapper_space.tree_lock); + unlock_page_ref_irq(page); } spin_unlock(&swap_lock); Index: linux-rt-rebase.q/mm/truncate.c =================================================================== --- linux-rt-rebase.q.orig/mm/truncate.c +++ linux-rt-rebase.q/mm/truncate.c @@ -347,18 +347,21 @@ invalidate_complete_page2(struct address if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) return 0; - spin_lock_irq(&mapping->tree_lock); + lock_page_ref_irq(page); + spin_lock(&mapping->tree_lock); if (PageDirty(page)) goto failed; BUG_ON(PagePrivate(page)); __remove_from_page_cache(page); - spin_unlock_irq(&mapping->tree_lock); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irq(page); ClearPageUptodate(page); page_cache_release(page); /* pagecache ref */ return 1; failed: - spin_unlock_irq(&mapping->tree_lock); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irq(page); return 0; } Index: linux-rt-rebase.q/mm/vmscan.c =================================================================== --- linux-rt-rebase.q.orig/mm/vmscan.c +++ linux-rt-rebase.q/mm/vmscan.c @@ -369,8 +369,8 @@ int remove_mapping(struct address_space BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); - set_page_nonewrefs(page); - spin_lock_irq(&mapping->tree_lock); + lock_page_ref_irq(page); + spin_lock(&mapping->tree_lock); /* * The non racy check for a busy page. * @@ -405,22 +405,22 @@ int remove_mapping(struct address_space if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); - spin_unlock_irq(&mapping->tree_lock); + spin_unlock(&mapping->tree_lock); swap_free(swap); goto free_it; } __remove_from_page_cache(page); - spin_unlock_irq(&mapping->tree_lock); + spin_unlock(&mapping->tree_lock); free_it: - __clear_page_nonewrefs(page); + unlock_page_ref_irq(page); __put_page(page); /* The pagecache ref */ return 1; cannot_free: - spin_unlock_irq(&mapping->tree_lock); - clear_page_nonewrefs(page); + spin_unlock(&mapping->tree_lock); + unlock_page_ref_irq(page); return 0; } patches/nf_conntrack-fix-smp-processor-id.patch0000664000077200007720000000130610655544576021220 0ustar mingomingo--- include/net/netfilter/nf_conntrack.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/include/net/netfilter/nf_conntrack.h =================================================================== --- linux-rt-rebase.q.orig/include/net/netfilter/nf_conntrack.h +++ linux-rt-rebase.q/include/net/netfilter/nf_conntrack.h @@ -264,7 +264,7 @@ DECLARE_PER_CPU(struct ip_conntrack_stat #define NF_CT_STAT_INC_ATOMIC(count) \ do { \ local_bh_disable(); \ - __get_cpu_var(nf_conntrack_stat).count++; \ + __raw_get_cpu_var(nf_conntrack_stat).count++; \ local_bh_enable(); \ } while (0) #define NF_CT_STAT_INC(count) (__raw_get_cpu_var(nf_conntrack_stat).count++) patches/s_files-schedule_on_each_cpu_wq.patch0000664000077200007720000000615210655544576021037 0ustar mingomingo--- include/linux/workqueue.h | 1 kernel/workqueue.c | 66 ++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 56 insertions(+), 11 deletions(-) Index: linux-rt-rebase.q/include/linux/workqueue.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/workqueue.h +++ linux-rt-rebase.q/include/linux/workqueue.h @@ -144,6 +144,7 @@ extern int FASTCALL(schedule_delayed_wor unsigned long delay)); extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); +extern int schedule_on_each_cpu_wq(struct workqueue_struct *wq, work_func_t func); extern int schedule_on_each_cpu(work_func_t func); extern int current_is_keventd(void); extern int keventd_up(void); Index: linux-rt-rebase.q/kernel/workqueue.c =================================================================== --- linux-rt-rebase.q.orig/kernel/workqueue.c +++ linux-rt-rebase.q/kernel/workqueue.c @@ -240,6 +240,20 @@ int queue_delayed_work_on(int cpu, struc } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +static void leak_check(void *func) +{ + if (!in_atomic() && lockdep_depth(current) <= 0) + return; + printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " + "%s/0x%08x/%d\n", + current->comm, preempt_count(), + current->pid); + printk(KERN_ERR " last function: "); + print_symbol("%s\n", (unsigned long)func); + debug_show_held_locks(current); + dump_stack(); +} + static void run_workqueue(struct cpu_workqueue_struct *cwq) { spin_lock_irq(&cwq->lock); @@ -261,18 +275,10 @@ static void run_workqueue(struct cpu_wor BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); - f(work); - if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { - printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " - "%s/0x%08x/%d\n", - current->comm, preempt_count(), - current->pid); - printk(KERN_ERR " last function: "); - print_symbol("%s\n", (unsigned long)f); - debug_show_held_locks(current); - dump_stack(); - } + leak_check(NULL); + f(work); + leak_check(f); spin_lock_irq(&cwq->lock); cwq->current_work = NULL; @@ -599,6 +605,44 @@ int schedule_on_each_cpu(work_func_t fun return 0; } +/** + * schedule_on_each_cpu_wq - call a function on each online CPU on a per-CPU wq + * @func: the function to call + * + * Returns zero on success. + * Returns -ve errno on failure. + * + * Appears to be racy against CPU hotplug. + * + * schedule_on_each_cpu() is very slow. + */ +int schedule_on_each_cpu_wq(struct workqueue_struct *wq, work_func_t func) +{ + int cpu; + struct work_struct *works; + + if (is_single_threaded(wq)) { + WARN_ON(1); + return -EINVAL; + } + works = alloc_percpu(struct work_struct); + if (!works) + return -ENOMEM; + + for_each_online_cpu(cpu) { + struct work_struct *work = per_cpu_ptr(works, cpu); + + INIT_WORK(work, func); + set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); + __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); + } + flush_workqueue(wq); + free_percpu(works); + + return 0; +} + + void flush_scheduled_work(void) { flush_workqueue(keventd_wq); patches/preempt-realtime-arm-footbridge.patch0000664000077200007720000000225010655544574020733 0ustar mingomingo--- arch/arm/mach-footbridge/netwinder-hw.c | 2 +- arch/arm/mach-footbridge/netwinder-leds.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/arch/arm/mach-footbridge/netwinder-hw.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mach-footbridge/netwinder-hw.c +++ linux-rt-rebase.q/arch/arm/mach-footbridge/netwinder-hw.c @@ -67,7 +67,7 @@ static inline void wb977_ww(int reg, int /* * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE */ -DEFINE_SPINLOCK(gpio_lock); +DEFINE_RAW_SPINLOCK(gpio_lock); static unsigned int current_gpio_op; static unsigned int current_gpio_io; Index: linux-rt-rebase.q/arch/arm/mach-footbridge/netwinder-leds.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mach-footbridge/netwinder-leds.c +++ linux-rt-rebase.q/arch/arm/mach-footbridge/netwinder-leds.c @@ -32,7 +32,7 @@ static char led_state; static char hw_led_state; static DEFINE_SPINLOCK(leds_lock); -extern spinlock_t gpio_lock; +extern raw_spinlock_t gpio_lock; static void netwinder_leds_event(led_event_t evt) { patches/arm-trace-preempt-idle.patch0000664000077200007720000000525210655544574017025 0ustar mingomingoFrom linux-rt-users-owner@vger.kernel.org Fri Jul 13 20:13:14 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by mail.tglx.de (Postfix) with ESMTP id 5902865C3EB; Fri, 13 Jul 2007 20:13:14 +0200 (CEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933095AbXGMSNN (ORCPT + 1 other); Fri, 13 Jul 2007 14:13:13 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S933031AbXGMSNM (ORCPT ); Fri, 13 Jul 2007 14:13:12 -0400 Received: from deeprooted.net ([216.254.16.51]:38941 "EHLO paris.hilman.org" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1760089AbXGMSNH (ORCPT ); Fri, 13 Jul 2007 14:13:07 -0400 Received: by paris.hilman.org (Postfix, from userid 1000) id E61B1D2857A; Fri, 13 Jul 2007 10:52:28 -0700 (PDT) Message-Id: <20070713175228.623525155@mvista.com> References: <20070713175214.336577416@mvista.com> User-Agent: quilt/0.45-1 Date: Fri, 13 Jul 2007 10:52:18 -0700 From: Kevin Hilman To: tglx@linutronix.de, mingo@elte.hu Cc: linux-rt-users@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH -rt 4/6] Add trace_preempt_*_idle() support for ARM. Content-Disposition: inline; filename=arm-trace-preempt-idle.patch Sender: linux-rt-users-owner@vger.kernel.org Precedence: bulk X-Mailing-List: linux-rt-users@vger.kernel.org X-Filter-To: .Kernel.rt-users X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Mime-Version: 1.0 Add trace functions to ARM idle loop and also move the tick_nohz_restart_sched_tick() after the local_irq_disable() as is done on x86. Signed-off-by: Kevin Hilman --- arch/arm/kernel/process.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/arm/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/process.c +++ linux-rt-rebase.q/arch/arm/kernel/process.c @@ -169,11 +169,13 @@ void cpu_idle(void) while (!need_resched() && !need_resched_delayed()) idle(); leds_event(led_idle_end); - tick_nohz_restart_sched_tick(); local_irq_disable(); + trace_preempt_exit_idle(); + tick_nohz_restart_sched_tick(); __preempt_enable_no_resched(); __schedule(); preempt_disable(); + trace_preempt_enter_idle(); local_irq_enable(); } } patches/x86-64-smpboot-whitespace.patch0000664000077200007720000000102410655544571017242 0ustar mingomingo--- arch/x86_64/kernel/smpboot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux/arch/x86_64/kernel/smpboot.c =================================================================== --- linux.orig/arch/x86_64/kernel/smpboot.c +++ linux/arch/x86_64/kernel/smpboot.c @@ -331,8 +331,8 @@ void __cpuinit start_secondary(void) barrier(); /* - * Check TSC sync first: - */ + * Check TSC sync first: + */ check_tsc_sync_target(); Dprintk("cpu %d: setting up apic clock\n", smp_processor_id()); patches/lockstat_bounce_rt.patch0000664000077200007720000000156010655544576016451 0ustar mingomingo--- include/linux/lockdep.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) Index: linux-rt-rebase.q/include/linux/lockdep.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/lockdep.h +++ linux-rt-rebase.q/include/linux/lockdep.h @@ -349,8 +349,8 @@ do { \ if (!f_try(&(_lock)->lock)) { \ lock_contended(&(_lock)->dep_map, _RET_IP_); \ f_lock(&(_lock)->lock); \ - lock_acquired(&(_lock)->dep_map); \ } \ + lock_acquired(&(_lock)->dep_map); \ } while (0) @@ -360,9 +360,9 @@ do { \ if (!f_try(&(_lock)->lock)) { \ lock_contended(&(_lock)->dep_map, _RET_IP_); \ ret = f_lock(&(_lock)->lock); \ - if (!ret) \ - lock_acquired(&(_lock)->dep_map); \ } \ + if (!ret) \ + lock_acquired(&(_lock)->dep_map); \ ret; \ }) patches/rcu-warn-underflow.patch0000664000077200007720000000141110655544573016316 0ustar mingomingo--- kernel/rcupreempt.c | 5 +++++ 1 file changed, 5 insertions(+) Index: linux-rt-rebase.q/kernel/rcupreempt.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rcupreempt.c +++ linux-rt-rebase.q/kernel/rcupreempt.c @@ -129,6 +129,9 @@ void __rcu_read_lock(void) atomic_inc(current->rcu_flipctr2); smp_mb__after_atomic_inc(); /* might optimize out... */ } + } else { + /* nesting limit: */ + WARN_ON_ONCE(current->rcu_read_lock_nesting > 30); } local_irq_restore(oldirq); } @@ -154,6 +157,8 @@ void __rcu_read_unlock(void) atomic_dec(current->rcu_flipctr2); current->rcu_flipctr2 = NULL; } + } else { + WARN_ON_ONCE(current->rcu_read_lock_nesting < 0); } local_irq_restore(oldirq); patches/softlockup-fix.patch0000664000077200007720000000300710655544576015540 0ustar mingomingoSubject: fix the softlockup watchdog to actually do something From: Ingo Molnar this Xen related commit: commit 966812dc98e6a7fcdf759cbfa0efab77500a8868 Author: Jeremy Fitzhardinge Date: Tue May 8 00:28:02 2007 -0700 Ignore stolen time in the softlockup watchdog broke the softlockup watchdog to never report any lockups. (!) print_timestamp defaults to 0, this makes the following condition always true: if (print_timestamp < (touch_timestamp + 1) || and we'll in essence never report soft lockups. apparently the functionality of the soft lockup watchdog was never actually tested with that patch applied ... [this is -stable material too.] Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) Index: linux-rt-rebase.q/kernel/softlockup.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softlockup.c +++ linux-rt-rebase.q/kernel/softlockup.c @@ -80,10 +80,11 @@ void softlockup_tick(void) print_timestamp = per_cpu(print_timestamp, this_cpu); /* report at most once a second */ - if (print_timestamp < (touch_timestamp + 1) || - did_panic || - !per_cpu(watchdog_task, this_cpu)) + if ((print_timestamp >= touch_timestamp && + print_timestamp < (touch_timestamp + 1)) || + did_panic || !per_cpu(watchdog_task, this_cpu)) { return; + } /* do not print during early bootup: */ if (unlikely(system_state != SYSTEM_RUNNING)) { patches/arm-leds-timer.patch0000664000077200007720000000125210655544571015400 0ustar mingomingoThe clockevent layer now handles everything done by the ARM timer_tick() call, except the LED stuff. Here we add an arch_tick_leds() to handle LED toggling which is called by do_timer(). --- arch/arm/kernel/time.c | 7 +++++++ 1 file changed, 7 insertions(+) Index: linux/arch/arm/kernel/time.c =================================================================== --- linux.orig/arch/arm/kernel/time.c +++ linux/arch/arm/kernel/time.c @@ -236,6 +236,13 @@ static inline void do_leds(void) #define do_leds() #endif +void arch_tick_leds(void) +{ +#ifdef CONFIG_LEDS_TIMER + do_leds(); +#endif +} + #ifndef CONFIG_GENERIC_TIME void do_gettimeofday(struct timeval *tv) { patches/ppc-remove-last-cpukhz.patch0000664000077200007720000000175210655544571017103 0ustar mingomingoFrom sshtylyov@ru.mvista.com Thu May 24 06:02:00 2007 From: Sergei Shtylyov Subject: [PATCH 2.6.21-rt7] PowerPC: kill cpu_khz reference Date: Thu, 24 May 2007 06:02:00 +1000 X-Patchwork-ID: 11304 Remove forgotten reference to 'cpu_khz' which have been removed for PowerPC in 2.6.21-rt7... Signed-off-by: Sergei Shtylyov --- The irony here is that it was me who sent a patch to add that line. :-) --- --- arch/powerpc/kernel/time.c | 1 - 1 file changed, 1 deletion(-) Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -922,7 +922,6 @@ void __init time_init(void) tb_ticks_per_jiffy = ppc_tb_freq / HZ; tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; - cpu_khz = ppc_tb_freq / 1000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); calc_cputime_factors(); patches/rt-mutex-ppc.patch0000664000077200007720000006566310655544573015145 0ustar mingomingo--- arch/powerpc/Kconfig | 19 +++++++---- arch/powerpc/kernel/Makefile | 3 + arch/powerpc/kernel/ppc_ksyms.c | 1 arch/powerpc/kernel/semaphore.c | 20 +++++++----- arch/powerpc/lib/locks.c | 4 +- arch/ppc/Kconfig | 19 +++++++---- arch/ppc/kernel/entry.S | 4 +- arch/ppc/kernel/semaphore.c | 13 +++++-- arch/ppc/lib/locks.c | 38 +++++++++++------------ arch/ppc/syslib/ocp.c | 2 - drivers/macintosh/adb.c | 10 +++--- include/asm-powerpc/rwsem.h | 42 ++++++++++++++----------- include/asm-powerpc/semaphore.h | 57 ++++++++++++++++++++++------------- include/asm-powerpc/spinlock.h | 38 +++++++++++------------ include/asm-powerpc/spinlock_types.h | 4 +- include/asm-ppc/ocp.h | 2 - 16 files changed, 159 insertions(+), 117 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/Kconfig +++ linux-rt-rebase.q/arch/powerpc/Kconfig @@ -33,13 +33,6 @@ config IRQ_PER_CPU bool default y -config RWSEM_GENERIC_SPINLOCK - bool - -config RWSEM_XCHGADD_ALGORITHM - bool - default y - config ARCH_HAS_ILOG2_U32 bool default y @@ -173,6 +166,18 @@ config GENERIC_CLOCKEVENTS source kernel/time/Kconfig source kernel/Kconfig.preempt + +config RWSEM_GENERIC_SPINLOCK + bool + default y + +config ASM_SEMAPHORES + bool + default y + +config RWSEM_XCHGADD_ALGORITHM + bool + source "fs/Kconfig.binfmt" # We optimistically allocate largepages from the VM, so make the limit Index: linux-rt-rebase.q/arch/powerpc/kernel/Makefile =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/Makefile +++ linux-rt-rebase.q/arch/powerpc/kernel/Makefile @@ -10,11 +10,12 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o += -fPIC endif -obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ +obj-y := cputable.o ptrace.o syscalls.o \ irq.o align.o signal_32.o pmc.o vdso.o \ init_task.o process.o systbl.o idle.o \ signal.o obj-y += vdso32/ +obj-$(CONFIG_ASM_SEMAPHORES) += semaphore.o obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ signal_64.o ptrace32.o \ paca.o cpu_setup_ppc970.o \ Index: linux-rt-rebase.q/arch/powerpc/kernel/ppc_ksyms.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/ppc_ksyms.c +++ linux-rt-rebase.q/arch/powerpc/kernel/ppc_ksyms.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include Index: linux-rt-rebase.q/arch/powerpc/kernel/semaphore.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/semaphore.c +++ linux-rt-rebase.q/arch/powerpc/kernel/semaphore.c @@ -31,7 +31,7 @@ * sem->count = tmp; * return old_count; */ -static inline int __sem_update_count(struct semaphore *sem, int incr) +static inline int __sem_update_count(struct compat_semaphore *sem, int incr) { int old_count, tmp; @@ -50,7 +50,7 @@ static inline int __sem_update_count(str return old_count; } -void __up(struct semaphore *sem) +void __compat_up(struct compat_semaphore *sem) { /* * Note that we incremented count in up() before we came here, @@ -63,7 +63,7 @@ void __up(struct semaphore *sem) __sem_update_count(sem, 1); wake_up(&sem->wait); } -EXPORT_SYMBOL(__up); +EXPORT_SYMBOL(__compat_up); /* * Note that when we come in to __down or __down_interruptible, @@ -73,7 +73,7 @@ EXPORT_SYMBOL(__up); * Thus it is only when we decrement count from some value > 0 * that we have actually got the semaphore. */ -void __sched __down(struct semaphore *sem) +void __sched __compat_down(struct compat_semaphore *sem) { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); @@ -101,9 +101,9 @@ void __sched __down(struct semaphore *se */ wake_up(&sem->wait); } -EXPORT_SYMBOL(__down); +EXPORT_SYMBOL(__compat_down); -int __sched __down_interruptible(struct semaphore * sem) +int __sched __compat_down_interruptible(struct compat_semaphore *sem) { int retval = 0; struct task_struct *tsk = current; @@ -132,4 +132,10 @@ int __sched __down_interruptible(struct wake_up(&sem->wait); return retval; } -EXPORT_SYMBOL(__down_interruptible); +EXPORT_SYMBOL(__compat_down_interruptible); + +int compat_sem_is_locked(struct compat_semaphore *sem) +{ + return (int) atomic_read(&sem->count) < 0; +} +EXPORT_SYMBOL(compat_sem_is_locked); Index: linux-rt-rebase.q/arch/powerpc/lib/locks.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/lib/locks.c +++ linux-rt-rebase.q/arch/powerpc/lib/locks.c @@ -25,7 +25,7 @@ #include #include -void __spin_yield(raw_spinlock_t *lock) +void __spin_yield(__raw_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; @@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) } #endif -void __raw_spin_unlock_wait(raw_spinlock_t *lock) +void __raw_spin_unlock_wait(__raw_spinlock_t *lock) { while (lock->slock) { HMT_low(); Index: linux-rt-rebase.q/arch/ppc/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/Kconfig +++ linux-rt-rebase.q/arch/ppc/Kconfig @@ -12,13 +12,6 @@ config GENERIC_HARDIRQS bool default y -config RWSEM_GENERIC_SPINLOCK - bool - -config RWSEM_XCHGADD_ALGORITHM - bool - default y - config ARCH_HAS_ILOG2_U32 bool default y @@ -988,6 +981,18 @@ config ARCH_POPULATES_NODE_MAP source kernel/Kconfig.hz source kernel/Kconfig.preempt + +config RWSEM_GENERIC_SPINLOCK + bool + default y + +config ASM_SEMAPHORES + bool + default y + +config RWSEM_XCHGADD_ALGORITHM + bool + source "mm/Kconfig" source "fs/Kconfig.binfmt" Index: linux-rt-rebase.q/arch/ppc/kernel/entry.S =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/kernel/entry.S +++ linux-rt-rebase.q/arch/ppc/kernel/entry.S @@ -863,7 +863,7 @@ global_dbcr0: #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ do_work: /* r10 contains MSR_KERNEL here */ - andi. r0,r9,_TIF_NEED_RESCHED + andi. r0,r9,(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) beq do_user_signal do_resched: /* r10 contains MSR_KERNEL here */ @@ -877,7 +877,7 @@ recheck: MTMSRD(r10) /* disable interrupts */ rlwinm r9,r1,0,0,18 lwz r9,TI_FLAGS(r9) - andi. r0,r9,_TIF_NEED_RESCHED + andi. r0,r9,(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) bne- do_resched andi. r0,r9,_TIF_SIGPENDING beq restore_user Index: linux-rt-rebase.q/arch/ppc/kernel/semaphore.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/kernel/semaphore.c +++ linux-rt-rebase.q/arch/ppc/kernel/semaphore.c @@ -29,7 +29,7 @@ * sem->count = tmp; * return old_count; */ -static inline int __sem_update_count(struct semaphore *sem, int incr) +static inline int __sem_update_count(struct compat_semaphore *sem, int incr) { int old_count, tmp; @@ -48,7 +48,7 @@ static inline int __sem_update_count(str return old_count; } -void __up(struct semaphore *sem) +void __compat_up(struct compat_semaphore *sem) { /* * Note that we incremented count in up() before we came here, @@ -70,7 +70,7 @@ void __up(struct semaphore *sem) * Thus it is only when we decrement count from some value > 0 * that we have actually got the semaphore. */ -void __sched __down(struct semaphore *sem) +void __sched __compat_down(struct compat_semaphore *sem) { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); @@ -100,7 +100,7 @@ void __sched __down(struct semaphore *se wake_up(&sem->wait); } -int __sched __down_interruptible(struct semaphore * sem) +int __sched __compat_down_interruptible(struct compat_semaphore * sem) { int retval = 0; struct task_struct *tsk = current; @@ -129,3 +129,8 @@ int __sched __down_interruptible(struct wake_up(&sem->wait); return retval; } + +int compat_sem_is_locked(struct compat_semaphore *sem) +{ + return (int) atomic_read(&sem->count) < 0; +} Index: linux-rt-rebase.q/arch/ppc/lib/locks.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/lib/locks.c +++ linux-rt-rebase.q/arch/ppc/lib/locks.c @@ -42,7 +42,7 @@ static inline unsigned long __spin_trylo return ret; } -void _raw_spin_lock(spinlock_t *lock) +void __raw_spin_lock(raw_spinlock_t *lock) { int cpu = smp_processor_id(); unsigned int stuck = INIT_STUCK; @@ -62,9 +62,9 @@ void _raw_spin_lock(spinlock_t *lock) lock->owner_pc = (unsigned long)__builtin_return_address(0); lock->owner_cpu = cpu; } -EXPORT_SYMBOL(_raw_spin_lock); +EXPORT_SYMBOL(__raw_spin_lock); -int _raw_spin_trylock(spinlock_t *lock) +int __raw_spin_trylock(raw_spinlock_t *lock) { if (__spin_trylock(&lock->lock)) return 0; @@ -72,9 +72,9 @@ int _raw_spin_trylock(spinlock_t *lock) lock->owner_pc = (unsigned long)__builtin_return_address(0); return 1; } -EXPORT_SYMBOL(_raw_spin_trylock); +EXPORT_SYMBOL(__raw_spin_trylock); -void _raw_spin_unlock(spinlock_t *lp) +void __raw_spin_unlock(raw_spinlock_t *lp) { if ( !lp->lock ) printk("_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n", @@ -88,13 +88,13 @@ void _raw_spin_unlock(spinlock_t *lp) wmb(); lp->lock = 0; } -EXPORT_SYMBOL(_raw_spin_unlock); +EXPORT_SYMBOL(__raw_spin_unlock); /* * For rwlocks, zero is unlocked, -1 is write-locked, * positive is read-locked. */ -static __inline__ int __read_trylock(rwlock_t *rw) +static __inline__ int __read_trylock(raw_rwlock_t *rw) { signed int tmp; @@ -114,13 +114,13 @@ static __inline__ int __read_trylock(rwl return tmp; } -int _raw_read_trylock(rwlock_t *rw) +int __raw_read_trylock(raw_rwlock_t *rw) { return __read_trylock(rw) > 0; } -EXPORT_SYMBOL(_raw_read_trylock); +EXPORT_SYMBOL(__raw_read_trylock); -void _raw_read_lock(rwlock_t *rw) +void __raw_read_lock(rwlock_t *rw) { unsigned int stuck; @@ -135,9 +135,9 @@ void _raw_read_lock(rwlock_t *rw) } } } -EXPORT_SYMBOL(_raw_read_lock); +EXPORT_SYMBOL(__raw_read_lock); -void _raw_read_unlock(rwlock_t *rw) +void __raw_read_unlock(raw_rwlock_t *rw) { if ( rw->lock == 0 ) printk("_read_unlock(): %s/%d (nip %08lX) lock %d\n", @@ -146,9 +146,9 @@ void _raw_read_unlock(rwlock_t *rw) wmb(); atomic_dec((atomic_t *) &(rw)->lock); } -EXPORT_SYMBOL(_raw_read_unlock); +EXPORT_SYMBOL(__raw_read_unlock); -void _raw_write_lock(rwlock_t *rw) +void __raw_write_lock(raw_rwlock_t *rw) { unsigned int stuck; @@ -164,18 +164,18 @@ void _raw_write_lock(rwlock_t *rw) } wmb(); } -EXPORT_SYMBOL(_raw_write_lock); +EXPORT_SYMBOL(__raw_write_lock); -int _raw_write_trylock(rwlock_t *rw) +int __raw_write_trylock(raw_rwlock_t *rw) { if (cmpxchg(&rw->lock, 0, -1) != 0) return 0; wmb(); return 1; } -EXPORT_SYMBOL(_raw_write_trylock); +EXPORT_SYMBOL(__raw_write_trylock); -void _raw_write_unlock(rwlock_t *rw) +void __raw_write_unlock(raw_rwlock_t *rw) { if (rw->lock >= 0) printk("_write_lock(): %s/%d (nip %08lX) lock %d\n", @@ -184,6 +184,6 @@ void _raw_write_unlock(rwlock_t *rw) wmb(); rw->lock = 0; } -EXPORT_SYMBOL(_raw_write_unlock); +EXPORT_SYMBOL(__raw_write_unlock); #endif Index: linux-rt-rebase.q/arch/ppc/syslib/ocp.c =================================================================== --- linux-rt-rebase.q.orig/arch/ppc/syslib/ocp.c +++ linux-rt-rebase.q/arch/ppc/syslib/ocp.c @@ -44,11 +44,11 @@ #include #include #include +#include #include #include #include -#include #include //#define DBG(x) printk x Index: linux-rt-rebase.q/drivers/macintosh/adb.c =================================================================== --- linux-rt-rebase.q.orig/drivers/macintosh/adb.c +++ linux-rt-rebase.q/drivers/macintosh/adb.c @@ -250,6 +250,8 @@ adb_probe_task(void *x) { strcpy(current->comm, "kadbprobe"); + down(&adb_probe_mutex); + printk(KERN_INFO "adb: starting probe task...\n"); do_adb_reset_bus(); printk(KERN_INFO "adb: finished probe task...\n"); @@ -276,7 +278,9 @@ adb_reset_bus(void) return 0; } - down(&adb_probe_mutex); + if (adb_got_sleep) + return 0; + schedule_work(&adb_reset_work); return 0; } @@ -339,9 +343,8 @@ adb_notify_sleep(struct pmu_sleep_notifi { switch (when) { case PBOOK_SLEEP_REQUEST: + /* Signal to discontiue probing */ adb_got_sleep = 1; - /* We need to get a lock on the probe thread */ - down(&adb_probe_mutex); /* Stop autopoll */ if (adb_controller->autopoll) adb_controller->autopoll(0); @@ -350,7 +353,6 @@ adb_notify_sleep(struct pmu_sleep_notifi break; case PBOOK_WAKE: adb_got_sleep = 0; - up(&adb_probe_mutex); adb_reset_bus(); break; } Index: linux-rt-rebase.q/include/asm-powerpc/rwsem.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/rwsem.h +++ linux-rt-rebase.q/include/asm-powerpc/rwsem.h @@ -1,6 +1,10 @@ #ifndef _ASM_POWERPC_RWSEM_H #define _ASM_POWERPC_RWSEM_H +#ifndef _LINUX_RWSEM_H +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" +#endif + #ifdef __KERNEL__ /* @@ -17,7 +21,7 @@ /* * the semaphore definition */ -struct rw_semaphore { +struct compat_rw_semaphore { /* XXX this should be able to be an atomic_t -- paulus */ signed int count; #define RWSEM_UNLOCKED_VALUE 0x00000000 @@ -26,7 +30,7 @@ struct rw_semaphore { #define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; + raw_spinlock_t wait_lock; struct list_head wait_list; }; @@ -34,15 +38,15 @@ struct rw_semaphore { { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ LIST_HEAD_INIT((name).wait_list) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define COMPAT_DECLARE_RWSEM(name) \ + struct compat_rw_semaphore name = __RWSEM_INITIALIZER(name) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_down_read_failed(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_down_write_failed(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_wake(struct compat_rw_semaphore *sem); +extern struct compat_rw_semaphore *rwsem_downgrade_wake(struct compat_rw_semaphore *sem); -static inline void init_rwsem(struct rw_semaphore *sem) +static inline void compat_init_rwsem(struct compat_rw_semaphore *sem) { sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); @@ -52,13 +56,13 @@ static inline void init_rwsem(struct rw_ /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct compat_rw_semaphore *sem) { if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) rwsem_down_read_failed(sem); } -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct compat_rw_semaphore *sem) { int tmp; @@ -74,7 +78,7 @@ static inline int __down_read_trylock(st /* * lock for writing */ -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct compat_rw_semaphore *sem) { int tmp; @@ -84,7 +88,7 @@ static inline void __down_write(struct r rwsem_down_write_failed(sem); } -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct compat_rw_semaphore *sem) { int tmp; @@ -96,7 +100,7 @@ static inline int __down_write_trylock(s /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct compat_rw_semaphore *sem) { int tmp; @@ -108,7 +112,7 @@ static inline void __up_read(struct rw_s /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct compat_rw_semaphore *sem) { if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, (atomic_t *)(&sem->count)) < 0)) @@ -118,7 +122,7 @@ static inline void __up_write(struct rw_ /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct compat_rw_semaphore *sem) { atomic_add(delta, (atomic_t *)(&sem->count)); } @@ -126,7 +130,7 @@ static inline void rwsem_atomic_add(int /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct compat_rw_semaphore *sem) { int tmp; @@ -138,12 +142,12 @@ static inline void __downgrade_write(str /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct compat_rw_semaphore *sem) { return atomic_add_return(delta, (atomic_t *)(&sem->count)); } -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline int compat_rwsem_is_locked(struct compat_rw_semaphore *sem) { return (sem->count != 0); } Index: linux-rt-rebase.q/include/asm-powerpc/semaphore.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/semaphore.h +++ linux-rt-rebase.q/include/asm-powerpc/semaphore.h @@ -10,54 +10,65 @@ #ifdef __KERNEL__ +/*#include */ #include #include #include #include -struct semaphore { +/* + * On !PREEMPT_RT all sempahores are compat + */ +#ifndef CONFIG_PREEMPT_RT +# define compat_semaphore semaphore +#endif + +struct compat_semaphore { /* * Note that any negative value of count is equivalent to 0, * but additionally indicates that some process(es) might be * sleeping on `wait'. */ atomic_t count; + int sleepers; wait_queue_head_t wait; }; -#define __SEMAPHORE_INITIALIZER(name, n) \ +#define __COMPAT_SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } -#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) +#define __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, count) \ + struct compat_semaphore name = __COMPAT_SEMAPHORE_INITIALIZER(name,count) -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) +#define COMPAT_DECLARE_MUTEX(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, 1) +#define COMPAT_DECLARE_MUTEX_LOCKED(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, 0) -static inline void sema_init (struct semaphore *sem, int val) +static inline void compat_sema_init (struct compat_semaphore *sem, int val) { atomic_set(&sem->count, val); init_waitqueue_head(&sem->wait); } -static inline void init_MUTEX (struct semaphore *sem) +static inline void compat_init_MUTEX (struct compat_semaphore *sem) { - sema_init(sem, 1); + compat_sema_init(sem, 1); } -static inline void init_MUTEX_LOCKED (struct semaphore *sem) +static inline void compat_init_MUTEX_LOCKED (struct compat_semaphore *sem) { - sema_init(sem, 0); + compat_sema_init(sem, 0); } -extern void __down(struct semaphore * sem); -extern int __down_interruptible(struct semaphore * sem); -extern void __up(struct semaphore * sem); +extern void __compat_down(struct compat_semaphore * sem); +extern int __compat_down_interruptible(struct compat_semaphore * sem); +extern void __compat_up(struct compat_semaphore * sem); + +extern int compat_sem_is_locked(struct compat_semaphore *sem); -static inline void down(struct semaphore * sem) +static inline void compat_down(struct compat_semaphore * sem) { might_sleep(); @@ -65,31 +76,35 @@ static inline void down(struct semaphore * Try to get the semaphore, take the slow path if we fail. */ if (unlikely(atomic_dec_return(&sem->count) < 0)) - __down(sem); + __compat_down(sem); } -static inline int down_interruptible(struct semaphore * sem) +static inline int compat_down_interruptible(struct compat_semaphore * sem) { int ret = 0; might_sleep(); if (unlikely(atomic_dec_return(&sem->count) < 0)) - ret = __down_interruptible(sem); + ret = __compat_down_interruptible(sem); return ret; } -static inline int down_trylock(struct semaphore * sem) +static inline int compat_down_trylock(struct compat_semaphore * sem) { return atomic_dec_if_positive(&sem->count) < 0; } -static inline void up(struct semaphore * sem) +static inline void compat_up(struct compat_semaphore * sem) { if (unlikely(atomic_inc_return(&sem->count) <= 0)) - __up(sem); + __compat_up(sem); } +#define compat_sema_count(sem) atomic_read(&(sem)->count) + +#include + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SEMAPHORE_H */ Index: linux-rt-rebase.q/include/asm-powerpc/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/spinlock.h +++ linux-rt-rebase.q/include/asm-powerpc/spinlock.h @@ -53,7 +53,7 @@ * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. */ -static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) +static __inline__ unsigned long ___raw_spin_trylock(__raw_spinlock_t *lock) { unsigned long tmp, token; @@ -72,10 +72,10 @@ static __inline__ unsigned long __spin_t return tmp; } -static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) +static int __inline__ __raw_spin_trylock(__raw_spinlock_t *lock) { CLEAR_IO_SYNC; - return __spin_trylock(lock) == 0; + return ___raw_spin_trylock(lock) == 0; } /* @@ -95,19 +95,19 @@ static int __inline__ __raw_spin_trylock #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) /* We only yield to the hypervisor if we are in shared processor mode */ #define SHARED_PROCESSOR (get_lppaca()->shared_proc) -extern void __spin_yield(raw_spinlock_t *lock); -extern void __rw_yield(raw_rwlock_t *lock); +extern void __spin_yield(__raw_spinlock_t *lock); +extern void __rw_yield(__raw_rwlock_t *lock); #else /* SPLPAR || ISERIES */ #define __spin_yield(x) barrier() #define __rw_yield(x) barrier() #define SHARED_PROCESSOR 0 #endif -static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) +static void __inline__ __raw_spin_lock(__raw_spinlock_t *lock) { CLEAR_IO_SYNC; while (1) { - if (likely(__spin_trylock(lock) == 0)) + if (likely(___raw_spin_trylock(lock) == 0)) break; do { HMT_low(); @@ -118,13 +118,13 @@ static void __inline__ __raw_spin_lock(r } } -static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static void __inline__ __raw_spin_lock_flags(__raw_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; CLEAR_IO_SYNC; while (1) { - if (likely(__spin_trylock(lock) == 0)) + if (likely(___raw_spin_trylock(lock) == 0)) break; local_save_flags(flags_dis); local_irq_restore(flags); @@ -138,7 +138,7 @@ static void __inline__ __raw_spin_lock_f } } -static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) +static __inline__ void __raw_spin_unlock(__raw_spinlock_t *lock) { SYNC_IO; __asm__ __volatile__("# __raw_spin_unlock\n\t" @@ -147,7 +147,7 @@ static __inline__ void __raw_spin_unlock } #ifdef CONFIG_PPC64 -extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); +extern void __raw_spin_unlock_wait(__raw_spinlock_t *lock); #else #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) @@ -179,7 +179,7 @@ extern void __raw_spin_unlock_wait(raw_s * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static long __inline__ __read_trylock(raw_rwlock_t *rw) +static long __inline__ __read_trylock(__raw_rwlock_t *rw) { long tmp; @@ -203,7 +203,7 @@ static long __inline__ __read_trylock(ra * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static __inline__ long __write_trylock(raw_rwlock_t *rw) +static __inline__ long __write_trylock(__raw_rwlock_t *rw) { long tmp, token; @@ -223,7 +223,7 @@ static __inline__ long __write_trylock(r return tmp; } -static void __inline__ __raw_read_lock(raw_rwlock_t *rw) +static void __inline__ __raw_read_lock(__raw_rwlock_t *rw) { while (1) { if (likely(__read_trylock(rw) > 0)) @@ -237,7 +237,7 @@ static void __inline__ __raw_read_lock(r } } -static void __inline__ __raw_write_lock(raw_rwlock_t *rw) +static void __inline__ __raw_write_lock(__raw_rwlock_t *rw) { while (1) { if (likely(__write_trylock(rw) == 0)) @@ -251,17 +251,17 @@ static void __inline__ __raw_write_lock( } } -static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) +static int __inline__ __raw_read_trylock(__raw_rwlock_t *rw) { return __read_trylock(rw) > 0; } -static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) +static int __inline__ __raw_write_trylock(__raw_rwlock_t *rw) { return __write_trylock(rw) == 0; } -static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) +static void __inline__ __raw_read_unlock(__raw_rwlock_t *rw) { long tmp; @@ -278,7 +278,7 @@ static void __inline__ __raw_read_unlock : "cr0", "memory"); } -static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) +static __inline__ void __raw_write_unlock(__raw_rwlock_t *rw) { __asm__ __volatile__("# write_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); Index: linux-rt-rebase.q/include/asm-powerpc/spinlock_types.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/spinlock_types.h +++ linux-rt-rebase.q/include/asm-powerpc/spinlock_types.h @@ -7,13 +7,13 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile signed int lock; -} raw_rwlock_t; +} __raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { 0 } Index: linux-rt-rebase.q/include/asm-ppc/ocp.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ppc/ocp.h +++ linux-rt-rebase.q/include/asm-ppc/ocp.h @@ -27,10 +27,10 @@ #include #include #include +#include #include #include -#include #include #ifdef CONFIG_PPC_OCP patches/preempt-irqs-x86-64.patch0000664000077200007720000000362510655544573016070 0ustar mingomingo--- arch/x86_64/kernel/i8259.c | 3 ++- arch/x86_64/kernel/io_apic.c | 3 ++- arch/x86_64/kernel/time.c | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) Index: linux-rt-rebase.q/arch/x86_64/kernel/i8259.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/i8259.c +++ linux-rt-rebase.q/arch/x86_64/kernel/i8259.c @@ -395,7 +395,8 @@ device_initcall(i8259A_init_sysfs); * IRQ2 is cascade interrupt to second interrupt controller */ -static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; +static struct irqaction irq2 = { no_action, IRQF_NODELAY, CPU_MASK_NONE, "cascade", NULL, NULL}; + DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [0 ... IRQ0_VECTOR - 1] = -1, [IRQ0_VECTOR] = 0, Index: linux-rt-rebase.q/arch/x86_64/kernel/io_apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/io_apic.c +++ linux-rt-rebase.q/arch/x86_64/kernel/io_apic.c @@ -803,9 +803,10 @@ static void ioapic_register_intr(int irq if (trigger) set_irq_chip_and_handler_name(irq, &ioapic_chip, handle_fasteoi_irq, "fasteoi"); - else + else { set_irq_chip_and_handler_name(irq, &ioapic_chip, handle_edge_irq, "edge"); + } } static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, Index: linux-rt-rebase.q/arch/x86_64/kernel/time.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/time.c +++ linux-rt-rebase.q/arch/x86_64/kernel/time.c @@ -256,7 +256,8 @@ static unsigned int __init tsc_calibrate static struct irqaction irq0 = { .handler = timer_event_interrupt, - .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING, + .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | + IRQF_NODELAY, .mask = CPU_MASK_NONE, .name = "timer" }; patches/ppc-add-ppc32-mcount.patch0000664000077200007720000000434510655544572016325 0ustar mingomingo--- arch/powerpc/kernel/entry_32.S | 82 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) Index: linux/arch/powerpc/kernel/entry_32.S =================================================================== --- linux.orig/arch/powerpc/kernel/entry_32.S +++ linux/arch/powerpc/kernel/entry_32.S @@ -989,3 +989,85 @@ machine_check_in_rtas: /* XXX load up BATs and panic */ #endif /* CONFIG_PPC_RTAS */ + +#ifdef CONFIG_MCOUNT +/* + * mcount() is not the same as _mcount(). The callers of mcount() have a + * normal context. The callers of _mcount() do not have a stack frame and + * have not saved the "caller saves" registers. + */ +_GLOBAL(mcount) + stwu r1,-16(r1) + mflr r3 + lis r5,mcount_enabled@ha + lwz r5,mcount_enabled@l(r5) + stw r3,20(r1) + cmpwi r5,0 + beq 1f + /* r3 contains lr (eip), put parent lr (parent_eip) in r4 */ + lwz r4,16(r1) + lwz r4,4(r4) + bl __trace +1: + lwz r0,20(r1) + mtlr r0 + addi r1,r1,16 + blr + +/* + * The -pg flag, which is specified in the case of CONFIG_MCOUNT, causes the + * C compiler to add a call to _mcount() at the start of each function + * preamble, before the stack frame is created. An example of this preamble + * code is: + * + * mflr r0 + * lis r12,-16354 + * stw r0,4(r1) + * addi r0,r12,-19652 + * bl 0xc00034c8 <_mcount> + * mflr r0 + * stwu r1,-16(r1) + */ +_GLOBAL(_mcount) +#define M_STK_SIZE 48 + /* Would not expect to need to save cr, but glibc version of */ + /* _mcount() does, so cautiously saving it here too. */ + stwu r1,-M_STK_SIZE(r1) + stw r3, 12(r1) + stw r4, 16(r1) + stw r5, 20(r1) + stw r6, 24(r1) + mflr r3 /* will use as first arg to __trace() */ + mfcr r4 + lis r5,mcount_enabled@ha + lwz r5,mcount_enabled@l(r5) + cmpwi r5,0 + stw r3, 44(r1) /* lr */ + stw r4, 8(r1) /* cr */ + stw r7, 28(r1) + stw r8, 32(r1) + stw r9, 36(r1) + stw r10,40(r1) + beq 1f + /* r3 contains lr (eip), put parent lr (parent_eip) in r4 */ + lwz r4,M_STK_SIZE+4(r1) + bl __trace +1: + lwz r8, 8(r1) /* cr */ + lwz r9, 44(r1) /* lr */ + lwz r3, 12(r1) + lwz r4, 16(r1) + lwz r5, 20(r1) + mtcrf 0xff,r8 + mtctr r9 + lwz r0, 52(r1) + lwz r6, 24(r1) + lwz r7, 28(r1) + lwz r8, 32(r1) + lwz r9, 36(r1) + lwz r10,40(r1) + addi r1,r1,M_STK_SIZE + mtlr r0 + bctr + +#endif /* CONFIG_MCOUNT */ patches/rt-time-starvation-fix-update.patch0000664000077200007720000000146610655544576020407 0ustar mingomingo--- kernel/time/timekeeping.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/kernel/time/timekeeping.c =================================================================== --- linux-rt-rebase.q.orig/kernel/time/timekeeping.c +++ linux-rt-rebase.q/kernel/time/timekeeping.c @@ -484,7 +484,7 @@ static void clocksource_adjust(s64 offse */ void update_wall_time(void) { - cycle_t cycle_now; + cycle_t cycle_now, offset; /* Make sure we're fully resumed: */ if (unlikely(timekeeping_suspended)) @@ -495,6 +495,7 @@ void update_wall_time(void) #else cycle_now = clock->cycle_last + clock->cycle_interval; #endif + offset = (cycle_now - clock->cycle_last) & clock->mask; clocksource_accumulate(clock, cycle_now); clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; patches/2.6.21-rc6-lockless6-speculative-get-page.patch0000664000077200007720000002662310655544576021726 0ustar mingomingoFrom: Nick Piggin Subject: [patch 6/9] mm: speculative get page If we can be sure that elevating the page_count on a pagecache page will pin it, we can speculatively run this operation, and subsequently check to see if we hit the right page rather than relying on holding a lock or otherwise pinning a reference to the page. This can be done if get_page/put_page behaves consistently throughout the whole tree (ie. if we "get" the page after it has been used for something else, we must be able to free it with a put_page). Actually, there is a period where the count behaves differently: when the page is free or if it is a constituent page of a compound page. We need an atomic_inc_not_zero operation to ensure we don't try to grab the page in either case. This patch introduces the core locking protocol to the pagecache (ie. adds page_cache_get_speculative, and tweaks some update-side code to make it work). [Hugh notices that PG_nonewrefs might be dispensed with entirely if current set_page_nonewrefs instead atomically save the page count and temporarily set it to zero. This is a nice idea, and simplifies find_get_page very much, but cannot be applied to all current SetPageNoNewRefs sites. Need to verify that add_to_page_cache and add_to_swap_cache can cope without it or make do some other way. Also, migration pages with PagePrivate set means that the filesystem has a ref on the page, so it might muck with page count, which is a big problem. ] Signed-off-by: Nick Piggin --- include/linux/page-flags.h | 28 ++++++++++++ include/linux/pagemap.h | 105 +++++++++++++++++++++++++++++++++++++++++++++ mm/filemap.c | 2 mm/migrate.c | 7 ++- mm/swap_state.c | 2 mm/vmscan.c | 10 +++- 6 files changed, 149 insertions(+), 5 deletions(-) Index: linux-rt-rebase.q/include/linux/page-flags.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/page-flags.h +++ linux-rt-rebase.q/include/linux/page-flags.h @@ -83,6 +83,8 @@ #define PG_private 11 /* If pagecache, has fs-private data */ #define PG_writeback 12 /* Page is under writeback */ +#define PG_nonewrefs 13 /* Block concurrent pagecache lookups + * while testing refcount */ #define PG_compound 14 /* Part of a compound page */ #define PG_swapcache 15 /* Swap page: swp_entry_t in private */ @@ -260,6 +262,11 @@ static inline void __ClearPageTail(struc #define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags) #define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags) +#define PageNoNewRefs(page) test_bit(PG_nonewrefs, &(page)->flags) +#define SetPageNoNewRefs(page) set_bit(PG_nonewrefs, &(page)->flags) +#define ClearPageNoNewRefs(page) clear_bit(PG_nonewrefs, &(page)->flags) +#define __ClearPageNoNewRefs(page) __clear_bit(PG_nonewrefs, &(page)->flags) + struct page; /* forward declaration */ extern void cancel_dirty_page(struct page *page, unsigned int account_size); @@ -272,4 +279,25 @@ static inline void set_page_writeback(st test_set_page_writeback(page); } +static inline void set_page_nonewrefs(struct page *page) +{ + preempt_disable(); + SetPageNoNewRefs(page); + smp_wmb(); +} + +static inline void __clear_page_nonewrefs(struct page *page) +{ + smp_wmb(); + __ClearPageNoNewRefs(page); + preempt_enable(); +} + +static inline void clear_page_nonewrefs(struct page *page) +{ + smp_wmb(); + ClearPageNoNewRefs(page); + preempt_enable(); +} + #endif /* PAGE_FLAGS_H */ Index: linux-rt-rebase.q/include/linux/pagemap.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/pagemap.h +++ linux-rt-rebase.q/include/linux/pagemap.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include /* for in_interrupt() */ /* * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page @@ -62,6 +64,109 @@ static inline void mapping_set_gfp_mask( #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +/* + * speculatively take a reference to a page. + * If the page is free (_count == 0), then _count is untouched, and 0 + * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * + * This function must be run in the same rcu_read_lock() section as has + * been used to lookup the page in the pagecache radix-tree: this allows + * allocators to use a synchronize_rcu() to stabilize _count. + * + * Unless an RCU grace period has passed, the count of all pages coming out + * of the allocator must be considered unstable. page_count may return higher + * than expected, and put_page must be able to do the right thing when the + * page has been finished with (because put_page is what is used to drop an + * invalid speculative reference). + * + * After incrementing the refcount, this function spins until PageNoNewRefs + * is clear, then a read memory barrier is issued. + * + * This forms the core of the lockless pagecache locking protocol, where + * the lookup-side (eg. find_get_page) has the following pattern: + * 1. find page in radix tree + * 2. conditionally increment refcount + * 3. wait for PageNoNewRefs + * 4. check the page is still in pagecache + * + * Remove-side (that cares about _count, eg. reclaim) has the following: + * A. SetPageNoNewRefs + * B. check refcount is correct + * C. remove page + * D. ClearPageNoNewRefs + * + * There are 2 critical interleavings that matter: + * - 2 runs before B: in this case, B sees elevated refcount and bails out + * - B runs before 2: in this case, 3 ensures 4 will not run until *after* C + * (after D, even). In which case, 4 will notice C and lookup side can retry + * + * It is possible that between 1 and 2, the page is removed then the exact same + * page is inserted into the same position in pagecache. That's OK: the + * old find_get_page using tree_lock could equally have run before or after + * the write-side, depending on timing. + * + * Pagecache insertion isn't a big problem: either 1 will find the page or + * it will not. Likewise, the old find_get_page could run either before the + * insertion or afterwards, depending on timing. + */ +static inline int page_cache_get_speculative(struct page *page) +{ + VM_BUG_ON(in_interrupt()); + +#ifndef CONFIG_SMP +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing + * this for us. + * + * Pagecache won't be truncated from interrupt context, so if we have + * found a page in the radix tree here, we have pinned its refcount by + * disabling preempt, and hence no need for the "speculative get" that + * SMP requires. + */ + VM_BUG_ON(page_count(page) == 0); + atomic_inc(&page->_count); + +#else + if (unlikely(!get_page_unless_zero(page))) + return 0; /* page has been freed */ + + /* + * Note that get_page_unless_zero provides a memory barrier. + * This is needed to ensure PageNoNewRefs is evaluated after the + * page refcount has been raised. See below comment. + */ + + while (unlikely(PageNoNewRefs(page))) + cpu_relax(); + + /* + * smp_rmb is to ensure the load of page->flags (for PageNoNewRefs()) + * is performed before a future load used to ensure the page is + * the correct on (usually: page->mapping and page->index). + * + * Those places that set PageNoNewRefs have the following pattern: + * SetPageNoNewRefs(page) + * wmb(); + * if (page_count(page) == X) + * remove page from pagecache + * wmb(); + * ClearPageNoNewRefs(page) + * + * If the load was out of order, page->mapping might be loaded before + * the page is removed from pagecache but PageNoNewRefs evaluated + * after the ClearPageNoNewRefs(). + */ + smp_rmb(); + +#endif + VM_BUG_ON(PageCompound(page) && (struct page *)page_private(page) != page); + + return 1; +} + #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else Index: linux-rt-rebase.q/mm/filemap.c =================================================================== --- linux-rt-rebase.q.orig/mm/filemap.c +++ linux-rt-rebase.q/mm/filemap.c @@ -441,6 +441,7 @@ int add_to_page_cache(struct page *page, int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (error == 0) { + set_page_nonewrefs(page); write_lock_irq(&mapping->tree_lock); error = radix_tree_insert(&mapping->page_tree, offset, page); if (!error) { @@ -452,6 +453,7 @@ int add_to_page_cache(struct page *page, __inc_zone_page_state(page, NR_FILE_PAGES); } write_unlock_irq(&mapping->tree_lock); + clear_page_nonewrefs(page); radix_tree_preload_end(); } return error; Index: linux-rt-rebase.q/mm/migrate.c =================================================================== --- linux-rt-rebase.q.orig/mm/migrate.c +++ linux-rt-rebase.q/mm/migrate.c @@ -302,6 +302,7 @@ static int migrate_page_move_mapping(str return 0; } + set_page_nonewrefs(page); write_lock_irq(&mapping->tree_lock); pslot = radix_tree_lookup_slot(&mapping->page_tree, @@ -310,6 +311,7 @@ static int migrate_page_move_mapping(str if (page_count(page) != 2 + !!PagePrivate(page) || (struct page *)radix_tree_deref_slot(pslot) != page) { write_unlock_irq(&mapping->tree_lock); + clear_page_nonewrefs(page); return -EAGAIN; } @@ -325,6 +327,9 @@ static int migrate_page_move_mapping(str #endif radix_tree_replace_slot(pslot, newpage); + page->mapping = NULL; + write_unlock_irq(&mapping->tree_lock); + clear_page_nonewrefs(page); /* * Drop cache reference from old page. @@ -345,8 +350,6 @@ static int migrate_page_move_mapping(str __dec_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(newpage, NR_FILE_PAGES); - write_unlock_irq(&mapping->tree_lock); - return 0; } Index: linux-rt-rebase.q/mm/swap_state.c =================================================================== --- linux-rt-rebase.q.orig/mm/swap_state.c +++ linux-rt-rebase.q/mm/swap_state.c @@ -79,6 +79,7 @@ static int __add_to_swap_cache(struct pa BUG_ON(PagePrivate(page)); error = radix_tree_preload(gfp_mask); if (!error) { + set_page_nonewrefs(page); write_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); @@ -90,6 +91,7 @@ static int __add_to_swap_cache(struct pa __inc_zone_page_state(page, NR_FILE_PAGES); } write_unlock_irq(&swapper_space.tree_lock); + clear_page_nonewrefs(page); radix_tree_preload_end(); } return error; Index: linux-rt-rebase.q/mm/vmscan.c =================================================================== --- linux-rt-rebase.q.orig/mm/vmscan.c +++ linux-rt-rebase.q/mm/vmscan.c @@ -369,6 +369,7 @@ int remove_mapping(struct address_space BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); + set_page_nonewrefs(page); write_lock_irq(&mapping->tree_lock); /* * The non racy check for a busy page. @@ -406,17 +407,20 @@ int remove_mapping(struct address_space __delete_from_swap_cache(page); write_unlock_irq(&mapping->tree_lock); swap_free(swap); - __put_page(page); /* The pagecache ref */ - return 1; + goto free_it; } __remove_from_page_cache(page); write_unlock_irq(&mapping->tree_lock); - __put_page(page); + +free_it: + __clear_page_nonewrefs(page); + __put_page(page); /* The pagecache ref */ return 1; cannot_free: write_unlock_irq(&mapping->tree_lock); + clear_page_nonewrefs(page); return 0; } patches/preempt-realtime-sched.patch0000664000077200007720000010640310655544575017126 0ustar mingomingo--- include/linux/sched.h | 47 +++ kernel/sched.c | 740 +++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 695 insertions(+), 92 deletions(-) Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -89,6 +89,16 @@ struct sched_param { #include +#ifdef CONFIG_PREEMPT +extern int kernel_preemption; +#else +# define kernel_preemption 0 +#endif +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int voluntary_preemption; +#else +# define voluntary_preemption 0 +#endif #ifdef CONFIG_PREEMPT_SOFTIRQS extern int softirq_preemption; #else @@ -193,6 +203,28 @@ print_cfs_rq(struct seq_file *m, int cpu #define set_task_state(tsk, state_value) \ set_mb((tsk)->state, (state_value)) +// #define PREEMPT_DIRECT + +#ifdef CONFIG_X86_LOCAL_APIC +extern void nmi_show_all_regs(void); +#else +# define nmi_show_all_regs() do { } while (0) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct exec_domain; + /* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to @@ -408,6 +440,11 @@ extern signed long FASTCALL(schedule_tim extern signed long schedule_timeout_interruptible(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); asmlinkage void schedule(void); +/* + * This one can be called with interrupts disabled, only + * to be used by lowlevel arch code! + */ +asmlinkage void __sched __schedule(void); struct nsproxy; struct user_namespace; @@ -1454,6 +1491,15 @@ extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) +#ifdef CONFIG_PREEMPT_RT +extern void __put_task_struct_cb(struct rcu_head *rhp); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + call_rcu(&t->rcu, __put_task_struct_cb); +} +#else extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) @@ -1461,6 +1507,7 @@ static inline void put_task_struct(struc if (atomic_dec_and_test(&t->usage)) __put_task_struct(t); } +#endif /* * Per process flags Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -4,6 +4,7 @@ * Kernel scheduler and related syscalls * * Copyright (C) 1991-2002 Linus Torvalds + * Copyright (C) 2004 Red Hat, Inc., Ingo Molnar * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe @@ -16,6 +17,7 @@ * by Davide Libenzi, preemptible kernel bits by Robert Love. * 2003-09-03 Interactivity tuning by Con Kolivas. * 2004-04-02 Scheduler domains code by Nick Piggin + * 2004-10-13 Real-Time Preemption support by Ingo Molnar * 2007-04-15 Work begun on replacing all interactivity tuning with a * fair scheduling design by Con Kolivas. * 2007-05-05 Load balancing (smp-nice) and other improvements @@ -56,6 +58,7 @@ #include #include #include +#include #include #include #include @@ -105,6 +108,20 @@ unsigned long long __attribute__((weak)) #define NICE_0_LOAD SCHED_LOAD_SCALE #define NICE_0_SHIFT SCHED_LOAD_SHIFT +#if (BITS_PER_LONG < 64) +#define JIFFIES_TO_NS64(TIME) \ + ((unsigned long long)(TIME) * ((unsigned long) (1000000000 / HZ))) + +#define NS64_TO_JIFFIES(TIME) \ + ((((unsigned long long)((TIME)) >> BITS_PER_LONG) * \ + (1 + NS_TO_JIFFIES(~0UL))) + NS_TO_JIFFIES((unsigned long)(TIME))) +#else /* BITS_PER_LONG < 64 */ + +#define NS64_TO_JIFFIES(TIME) NS_TO_JIFFIES(TIME) +#define JIFFIES_TO_NS64(TIME) JIFFIES_TO_NS(TIME) + +#endif /* BITS_PER_LONG < 64 */ + /* * These are the 'tuning knobs' of the scheduler: * @@ -154,6 +171,32 @@ static unsigned int static_prio_timeslic return SCALE_PRIO(DEF_TIMESLICE, static_prio); } +#define TASK_PREEMPTS_CURR(p, rq) \ + ((p)->prio < (rq)->curr->prio) + +/* + * Tweaks for current + */ + +#ifdef CURRENT_PTR +struct task_struct * const ___current = &init_task; +struct task_struct ** const current_ptr = (struct task_struct ** const)&___current; +struct thread_info * const current_ti = &init_thread_union.thread_info; +struct thread_info ** const current_ti_ptr = (struct thread_info ** const)¤t_ti; + +EXPORT_SYMBOL(___current); +EXPORT_SYMBOL(current_ti); + +/* + * The scheduler itself doesnt want 'current' to be cached + * during context-switches: + */ +# undef current +# define current __current() +# undef current_thread_info +# define current_thread_info() __current_thread_info() +#endif + static inline int rt_policy(int policy) { if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) @@ -227,7 +270,7 @@ struct rt_rq { * acquire operations must be ordered by ascending &runqueue. */ struct rq { - spinlock_t lock; /* runqueue lock */ + raw_spinlock_t lock; /* runqueue lock */ /* * nr_running and cpu_load should be in the same cacheline because @@ -258,6 +301,13 @@ struct rq { */ unsigned long nr_uninterruptible; +#ifdef CONFIG_PREEMPT_RT + unsigned long rt_nr_running; + unsigned long rt_nr_uninterruptible; +#endif + + unsigned long switch_timestamp; + unsigned long slice_avg; struct task_struct *curr, *idle; unsigned long next_balance; struct mm_struct *prev_mm; @@ -300,6 +350,11 @@ struct rq { /* try_to_wake_up() stats */ unsigned long ttwu_cnt; unsigned long ttwu_local; + + /* RT-overload stats: */ + unsigned long rto_schedule; + unsigned long rto_wakeup; + unsigned long rto_pulled; #endif struct lock_class_key rq_lock_key; }; @@ -412,11 +467,23 @@ static inline void set_task_cfs_rq(struc } #endif +/* + * We really dont want to do anything complex within switch_to() + * on PREEMPT_RT - this check enforces this. + */ +#ifdef prepare_arch_switch +# ifdef CONFIG_PREEMPT_RT +# error FIXME +# else +# define _finish_arch_switch finish_arch_switch +# endif +#endif + #ifndef prepare_arch_switch # define prepare_arch_switch(next) do { } while (0) #endif #ifndef finish_arch_switch -# define finish_arch_switch(prev) do { } while (0) +# define _finish_arch_switch(prev) do { } while (0) #endif #ifndef __ARCH_WANT_UNLOCKED_CTXSW @@ -442,7 +509,7 @@ static inline void finish_lock_switch(st */ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); - spin_unlock_irq(&rq->lock); + spin_unlock(&rq->lock); } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ @@ -483,8 +550,8 @@ static inline void finish_lock_switch(st smp_wmb(); prev->oncpu = 0; #endif -#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW - local_irq_enable(); +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_disable(); #endif } #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ @@ -556,6 +623,53 @@ static inline struct rq *this_rq_lock(vo return rq; } +#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP) +static __cacheline_aligned_in_smp atomic_t rt_overload; +#endif + +static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) +{ +#ifdef CONFIG_PREEMPT_RT + if (rt_task(p)) { + rq->rt_nr_running++; +# ifdef CONFIG_SMP + if (rq->rt_nr_running == 2) + atomic_inc(&rt_overload); +# endif + } +#endif +} + +static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq) +{ +#ifdef CONFIG_PREEMPT_RT + if (rt_task(p)) { + WARN_ON(!rq->rt_nr_running); + rq->rt_nr_running--; +# ifdef CONFIG_SMP + if (rq->rt_nr_running == 1) + atomic_dec(&rt_overload); +# endif + } +#endif +} + +static inline void incr_rt_nr_uninterruptible(struct task_struct *p, struct rq *rq) +{ +#ifdef CONFIG_PREEMPT_RT + if (rt_task(p)) + rq->rt_nr_uninterruptible++; +#endif +} + +static inline void decr_rt_nr_uninterruptible(struct task_struct *p, struct rq *rq) +{ +#ifdef CONFIG_PREEMPT_RT + if (rt_task(p)) + rq->rt_nr_uninterruptible--; +#endif +} + /* * CPU frequency is/was unstable - start new by setting prev_clock_raw: */ @@ -892,6 +1006,8 @@ static inline int normal_prio(struct tas prio = MAX_RT_PRIO-1 - p->rt_priority; else prio = __normal_prio(p); + + trace_special_pid(p->pid, PRIO(p), __PRIO(prio)); return prio; } @@ -915,6 +1031,13 @@ static int effective_prio(struct task_st return p->prio; } +static inline void trace_start_sched_wakeup(struct task_struct *p, + struct rq *rq) +{ + if (TASK_PREEMPTS_CURR(p, rq) && (p != rq->curr)) + __trace_start_sched_wakeup(p); +} + /* * activate_task - move a task to the runqueue. */ @@ -922,8 +1045,12 @@ static void activate_task(struct rq *rq, { u64 now = rq_clock(rq); - if (p->state == TASK_UNINTERRUPTIBLE) + if (p->state == TASK_UNINTERRUPTIBLE) { rq->nr_uninterruptible--; + decr_rt_nr_uninterruptible(p, rq); + } + + trace_special_pid(p->pid, PRIO(p), rq->nr_running); enqueue_task(rq, p, wakeup, now); inc_nr_running(p, rq, now); @@ -936,8 +1063,12 @@ static inline void activate_idle_task(st { u64 now = rq_clock(rq); - if (p->state == TASK_UNINTERRUPTIBLE) + if (p->state == TASK_UNINTERRUPTIBLE) { rq->nr_uninterruptible--; + decr_rt_nr_uninterruptible(p, rq); + } + + trace_special_pid(p->pid, PRIO(p), rq->nr_running); enqueue_task(rq, p, 0, now); inc_nr_running(p, rq, now); @@ -950,8 +1081,12 @@ static void deactivate_task(struct rq *r { u64 now = rq_clock(rq); - if (p->state == TASK_UNINTERRUPTIBLE) + if (p->state == TASK_UNINTERRUPTIBLE) { rq->nr_uninterruptible++; + incr_rt_nr_uninterruptible(p, rq); + } + + trace_special_pid(p->pid, PRIO(p), rq->nr_running); dequeue_task(rq, p, sleep, now); dec_nr_running(p, rq, now); @@ -972,6 +1107,39 @@ unsigned long weighted_cpuload(const int return cpu_rq(cpu)->ls.load.weight; } +/* + * Pick up the highest-prio task: + */ +static inline struct task_struct * +pick_next_task(struct rq *rq, struct task_struct *prev, u64 now) +{ + struct sched_class *class; + struct task_struct *p; + + /* + * Optimization: we know that if all tasks are in + * the fair class we can call that function directly: + */ + if (likely(rq->nr_running == rq->cfs.nr_running)) { + p = fair_sched_class.pick_next_task(rq, now); + if (likely(p)) + return p; + } + + class = sched_class_highest; + for ( ; ; ) { + p = class->pick_next_task(rq, now); + if (p) + return p; + /* + * Will never be NULL as the idle class always + * returns a non-NULL p: + */ + class = class->next; + } +} + + static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) { #ifdef CONFIG_SMP @@ -1249,6 +1417,119 @@ nextgroup: return idlest; } +#ifdef CONFIG_PREEMPT_RT + +static struct task_struct * pick_rt_task(struct rq *src_rq, int this_cpu) +{ + struct list_head *head, *curr; + struct rt_prio_array *array = &src_rq->rt.active; + struct task_struct *tmp; + int idx; + + WARN_ON(!spin_is_locked(&src_rq->lock)); + + idx = sched_find_first_bit(array->bitmap); +next_in_bitmap: + /* + * Only non-RT tasks available - abort the search: + */ + if (idx >= MAX_RT_PRIO) + return NULL; + + head = array->queue + idx; + curr = head->next; +next_in_queue: + tmp = list_entry(curr, struct task_struct, run_list); + /* + * Return the highest-prio non-running RT task (if task + * may run on this CPU): + */ + if (!task_running(src_rq, tmp) && + cpu_isset(this_cpu, tmp->cpus_allowed)) + return tmp; + + curr = curr->next; + if (curr != head) + goto next_in_queue; + + idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1); + goto next_in_bitmap; +} + +static int double_lock_balance(struct rq *this_rq, struct rq *busiest); + +/* + * Pull RT tasks from other CPUs in the RT-overload + * case. Interrupts are disabled, local rq is locked. + */ +static void balance_rt_tasks(struct rq *this_rq, int this_cpu) +{ + struct task_struct *p, *next; + struct rq *src_rq; + int cpu; + + WARN_ON(!irqs_disabled()); + + /* + * No need to do array switching - there can be no + * RT tasks in the expired array and the idle task + * is more than enough for comparing against RT tasks: + */ + next = pick_next_task(this_rq, this_rq->curr, rq_clock(this_rq)); + + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + src_rq = cpu_rq(cpu); + if (src_rq->rt_nr_running <= 1) + continue; + + /* + * We can potentially drop this_rq's lock in + * double_lock_balance, and another CPU could + * steal our next task - hence we must cause + * the caller to recalculate the next task + * in that case: + */ + if (double_lock_balance(this_rq, src_rq)) + next = pick_next_task(this_rq, this_rq->curr, + rq_clock(this_rq)); + /* + * Are there still pullable RT tasks? + */ + if (src_rq->rt_nr_running <= 1) { + spin_unlock(&src_rq->lock); + continue; + } + + p = pick_rt_task(src_rq, this_cpu); + + /* + * Do we have an RT task that preempts + * the to-be-scheduled task? + */ + if (p && (p->prio < next->prio)) { + WARN_ON(p == src_rq->curr); + WARN_ON(!p->se.on_rq); + schedstat_inc(this_rq, rto_pulled); + + set_task_cpu(p, this_cpu); + + deactivate_task(src_rq, p, 0); + activate_task(this_rq, p, 0); + /* + * We continue with the search, just in + * case there's an even higher prio task + * in another runqueue. (low likelyhood + * but possible) + */ + } + spin_unlock(&src_rq->lock); + } +} + +#endif + /* * find_idlest_cpu - find the idlest cpu among the cpus in group. */ @@ -1417,6 +1698,14 @@ try_to_wake_up(struct task_struct *p, un int new_cpu; #endif + trace_special_sym(); +#ifdef CONFIG_PREEMPT_RT + /* + * sync wakeups can increase wakeup latencies: + */ + if (rt_task(p)) + sync = 0; +#endif rq = task_rq_lock(p, &flags); old_state = p->state; if (!(old_state & state)) @@ -1522,9 +1811,45 @@ out_set_cpu: cpu = task_cpu(p); } + /* + * If a newly woken up RT task cannot preempt the + * current (RT) task (on a target runqueue) then try + * to find another CPU it can preempt: + */ + if (rt_task(p) && !TASK_PREEMPTS_CURR(p, rq)) { + struct rq *this_rq = cpu_rq(this_cpu); + /* + * Special-case: the task on this CPU can be + * preempted. In that case there's no need to + * trigger reschedules on other CPUs, we can + * mark the current task for reschedule. + * + * (Note that it's safe to access this_rq without + * extra locking in this particular case, because + * we are on the current CPU.) + */ + if (TASK_PREEMPTS_CURR(p, this_rq)) + set_tsk_need_resched(this_rq->curr); + else + /* + * Neither the intended target runqueue + * nor the current CPU can take this task. + * Trigger a reschedule on all other CPUs + * nevertheless, maybe one of them can take + * this task: + */ + smp_send_reschedule_allbutself(); + + schedstat_inc(this_rq, rto_wakeup); + } + out_activate: #endif /* CONFIG_SMP */ + activate_task(rq, p, 1); + + trace_start_sched_wakeup(p, rq); + /* * Sync wakeups (i.e. those types of wakeups where the waker * has indicated that it will leave the CPU in short order) @@ -1535,10 +1860,20 @@ out_activate: */ if (!sync || cpu != this_cpu) check_preempt_curr(rq, p); + else { + if (TASK_PREEMPTS_CURR(p, rq)) + set_tsk_need_resched_delayed(rq->curr); + } + if (rq->curr && p && rq && _need_resched()) + trace_special_pid(p->pid, PRIO(p), PRIO(rq->curr)); + success = 1; out_running: - p->state = TASK_RUNNING; + if (mutex) + p->state = TASK_RUNNING_MUTEX; + else + p->state = TASK_RUNNING; out: task_rq_unlock(rq, &flags); @@ -1552,7 +1887,6 @@ int fastcall wake_up_process(struct task ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0, 0); - mcount(); return ret; } EXPORT_SYMBOL(wake_up_process); @@ -1564,7 +1898,6 @@ int fastcall wake_up_process_sync(struct ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 1, 0); - mcount(); return ret; } EXPORT_SYMBOL(wake_up_process_sync); @@ -1574,7 +1907,6 @@ int fastcall wake_up_process_mutex(struc int ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0, 1); - mcount(); return ret; } EXPORT_SYMBOL(wake_up_process_mutex); @@ -1584,16 +1916,13 @@ int fastcall wake_up_process_mutex_sync( int ret = try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_RUNNING_MUTEX | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 1, 1); - mcount(); return ret; } EXPORT_SYMBOL(wake_up_process_mutex_sync); int fastcall wake_up_state(struct task_struct *p, unsigned int state) { - int ret = try_to_wake_up(p, state | TASK_RUNNING_MUTEX, 0, 0); - mcount(); - return ret; + return try_to_wake_up(p, state | TASK_RUNNING_MUTEX, 0, 0); } /* @@ -1834,13 +2163,24 @@ static inline void finish_task_switch(st * be dropped twice. * Manfred Spraul */ +#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP) + /* + * If we pushed an RT task off the runqueue, + * then kick other CPUs, they might run it: + */ + if (unlikely(rt_task(current) && prev->se.on_rq && rt_task(prev))) { + schedstat_inc(rq, rto_schedule); + smp_send_reschedule_allbutself(); + } +#endif prev_state = prev->state; - finish_arch_switch(prev); + _finish_arch_switch(prev); finish_lock_switch(rq, prev); fire_sched_in_preempt_notifiers(current); trace_stop_sched_switched(current); if (mm) mmdrop(mm); + if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this @@ -1858,12 +2198,15 @@ static inline void finish_task_switch(st asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { - struct rq *rq = this_rq(); - - finish_task_switch(rq, prev); + preempt_disable(); // TODO: move this to fork setup + finish_task_switch(this_rq(), prev); + __preempt_enable_no_resched(); + local_irq_enable(); #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ preempt_enable(); +#else + preempt_check_resched(); #endif if (current->set_child_tid) put_user(current->pid, current->set_child_tid); @@ -1912,6 +2255,11 @@ context_switch(struct rq *rq, struct tas trace_cmdline(); +#ifdef CURRENT_PTR + barrier(); + *current_ptr = next; + *current_ti_ptr = next->thread_info; +#endif /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); @@ -1959,6 +2307,43 @@ unsigned long nr_uninterruptible(void) return sum; } +unsigned long nr_uninterruptible_cpu(int cpu) +{ + return cpu_rq(cpu)->nr_uninterruptible; +} + +#if defined(CONFIG_PREEMPT_RT) +unsigned long rt_nr_running(void) +{ + unsigned long i, sum = 0; + + for_each_online_cpu(i) + sum += cpu_rq(i)->rt_nr_running; + + return sum; +} + +unsigned long rt_nr_running_cpu(int cpu) +{ + return cpu_rq(cpu)->rt_nr_running; +} + +unsigned long rt_nr_uninterruptible(void) +{ + unsigned long i, sum = 0; + + for_each_online_cpu(i) + sum += cpu_rq(i)->rt_nr_uninterruptible; + + return sum; +} + +unsigned long rt_nr_uninterruptible_cpu(int cpu) +{ + return cpu_rq(cpu)->rt_nr_uninterruptible; +} +#endif + unsigned long long nr_context_switches(void) { int i; @@ -2099,7 +2484,7 @@ static void double_rq_unlock(struct rq * /* * double_lock_balance - lock the busiest runqueue, this_rq is locked already. */ -static void double_lock_balance(struct rq *this_rq, struct rq *busiest) +static int double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) __acquires(busiest->lock) __acquires(this_rq->lock) @@ -2114,9 +2499,12 @@ static void double_lock_balance(struct r spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); spin_lock(&this_rq->lock); + + return 1; } else spin_lock(&busiest->lock); } + return 0; } /* @@ -3352,6 +3740,8 @@ void scheduler_tick(void) struct rq *rq = cpu_rq(cpu); struct task_struct *curr = rq->curr; + BUG_ON(!irqs_disabled()); + spin_lock(&rq->lock); if (curr != rq->idle) /* FIXME: needed? */ curr->sched_class->task_tick(rq, curr); @@ -3369,8 +3759,11 @@ void scheduler_tick(void) */ static noinline void __schedule_bug(struct task_struct *prev) { - printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", - prev->comm, preempt_count(), prev->pid); + stop_trace(); + + printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d, CPU#%d\n", + prev->comm, preempt_count(), prev->pid, smp_processor_id()); + debug_show_held_locks(prev); if (irqs_disabled()) print_irqtrace_events(prev); @@ -3382,6 +3775,8 @@ static noinline void __schedule_bug(stru */ static inline void schedule_debug(struct task_struct *prev) { + WARN_ON(system_state == SYSTEM_BOOTING); + /* * Test if we are atomic. Since do_exit() needs to call into * schedule() atomically, we ignore that path for now. @@ -3393,44 +3788,16 @@ static inline void schedule_debug(struct profile_hit(SCHED_PROFILING, __builtin_return_address(0)); schedstat_inc(this_rq(), sched_cnt); -} - -/* - * Pick up the highest-prio task: - */ -static inline struct task_struct * -pick_next_task(struct rq *rq, struct task_struct *prev, u64 now) -{ - struct sched_class *class; - struct task_struct *p; - /* - * Optimization: we know that if all tasks are in - * the fair class we can call that function directly: - */ - if (likely(rq->nr_running == rq->cfs.nr_running)) { - p = fair_sched_class.pick_next_task(rq, now); - if (likely(p)) - return p; - } + trace_special_sym(); - class = sched_class_highest; - for ( ; ; ) { - p = class->pick_next_task(rq, now); - if (p) - return p; - /* - * Will never be NULL as the idle class always - * returns a non-NULL p: - */ - class = class->next; - } + schedstat_inc(this_rq(), sched_cnt); } /* * schedule() is the main scheduler function. */ -asmlinkage void __sched schedule(void) +asmlinkage void __sched __schedule(void) { struct task_struct *prev, *next; long *switch_count; @@ -3438,7 +3805,6 @@ asmlinkage void __sched schedule(void) u64 now; int cpu; -need_resched: preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); @@ -3447,24 +3813,34 @@ need_resched: switch_count = &prev->nivcsw; release_kernel_lock(prev); -need_resched_nonpreemptible: schedule_debug(prev); spin_lock_irq(&rq->lock); + cpu = smp_processor_id(); clear_tsk_need_resched(prev); clear_tsk_need_resched_delayed(prev); - if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { + if ((prev->state & ~TASK_RUNNING_MUTEX) && + !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely((prev->state & TASK_INTERRUPTIBLE) && unlikely(signal_pending(prev)))) { prev->state = TASK_RUNNING; } else { + touch_softlockup_watchdog(); deactivate_task(rq, prev, 1); } switch_count = &prev->nvcsw; } + if (preempt_count() & PREEMPT_ACTIVE) + sub_preempt_count(PREEMPT_ACTIVE); + +#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP) + if (unlikely(atomic_read(&rt_overload))) + balance_rt_tasks(rq, cpu); +#endif + if (unlikely(!rq->nr_running)) idle_balance(cpu, rq); @@ -3480,24 +3856,93 @@ need_resched_nonpreemptible: ++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */ + __preempt_enable_no_resched(); } else { - spin_unlock_irq(&rq->lock); + __preempt_enable_no_resched(); + spin_unlock(&rq->lock); trace_stop_sched_switched(next); } - if (unlikely(reacquire_kernel_lock(current) < 0)) { - cpu = smp_processor_id(); - rq = cpu_rq(cpu); - goto need_resched_nonpreemptible; + reacquire_kernel_lock(current); + if (!irqs_disabled()) { + static int once = 1; + if (once) { + once = 0; + print_irqtrace_events(current); + WARN_ON(1); + } } - __preempt_enable_no_resched(); - if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || - test_thread_flag(TIF_NEED_RESCHED_DELAYED))) - goto need_resched; +} + +/* + * schedule() is the main scheduler function. + */ +asmlinkage void __sched schedule(void) +{ + WARN_ON(system_state == SYSTEM_BOOTING); + /* + * Test if we have interrupts disabled. + */ + if (unlikely(irqs_disabled())) { + stop_trace(); + printk(KERN_ERR "BUG: scheduling with irqs disabled: " + "%s/0x%08x/%d\n", current->comm, preempt_count(), + current->pid); + print_symbol("caller is %s\n", + (long)__builtin_return_address(0)); + dump_stack(); + } + + if (unlikely(current->flags & PF_NOSCHED)) { + current->flags &= ~PF_NOSCHED; + printk(KERN_ERR "%s:%d userspace BUG: scheduling in " + "user-atomic context!\n", current->comm, current->pid); + dump_stack(); + send_sig(SIGUSR2, current, 1); + } + + local_irq_disable(); + + do { + __schedule(); + } while (unlikely(test_thread_flag(TIF_NEED_RESCHED) || + test_thread_flag(TIF_NEED_RESCHED_DELAYED))); + + local_irq_enable(); } EXPORT_SYMBOL(schedule); + #ifdef CONFIG_PREEMPT + +/* + * Global flag to turn preemption off on a CONFIG_PREEMPT kernel: + */ +int kernel_preemption = 1; + +static int __init preempt_setup (char *str) +{ + if (!strncmp(str, "off", 3)) { + if (kernel_preemption) { + printk(KERN_INFO "turning off kernel preemption!\n"); + kernel_preemption = 0; + } + return 1; + } + if (!strncmp(str, "on", 2)) { + if (!kernel_preemption) { + printk(KERN_INFO "turning on kernel preemption!\n"); + kernel_preemption = 1; + } + return 1; + } + get_option(&str, &kernel_preemption); + + return 1; +} + +__setup("preempt=", preempt_setup); + /* * this is the entry point to schedule() from in-kernel preemption * off of preempt_enable. Kernel preemptions off return from interrupt @@ -3510,6 +3955,8 @@ asmlinkage void __sched preempt_schedule struct task_struct *task = current; int saved_lock_depth; #endif + if (!kernel_preemption) + return; /* * If there is a non-zero preempt_count or interrupts are disabled, * we do not want to preempt the current task. Just return.. @@ -3518,6 +3965,7 @@ asmlinkage void __sched preempt_schedule return; need_resched: + local_irq_disable(); add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we @@ -3528,25 +3976,25 @@ need_resched: saved_lock_depth = task->lock_depth; task->lock_depth = -1; #endif - schedule(); + __schedule(); #ifdef CONFIG_PREEMPT_BKL task->lock_depth = saved_lock_depth; #endif - sub_preempt_count(PREEMPT_ACTIVE); - /* we could miss a preemption opportunity between schedule and now */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || test_thread_flag(TIF_NEED_RESCHED_DELAYED))) goto need_resched; + local_irq_enable(); } + EXPORT_SYMBOL(preempt_schedule); /* - * this is the entry point to schedule() from kernel preemption - * off of irq context. - * Note, that this is called and return with irqs disabled. This will - * protect us against recursive calling from irq. + * this is is the entry point for the IRQ return path. Called with + * interrupts disabled. To avoid infinite irq-entry recursion problems + * with fast-paced IRQ sources we do all of this carefully to never + * enable interrupts again. */ asmlinkage void __sched preempt_schedule_irq(void) { @@ -3555,10 +4003,18 @@ asmlinkage void __sched preempt_schedule struct task_struct *task = current; int saved_lock_depth; #endif - /* Catch callers which need to be fixed */ - WARN_ON_ONCE(ti->preempt_count || !irqs_disabled()); + + if (!kernel_preemption) + return; + /* + * If there is a non-zero preempt_count then just return. + * (interrupts are disabled) + */ + if (unlikely(ti->preempt_count)) + return; need_resched: + local_irq_disable(); add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we @@ -3569,14 +4025,13 @@ need_resched: saved_lock_depth = task->lock_depth; task->lock_depth = -1; #endif - local_irq_enable(); - schedule(); + __schedule(); + local_irq_disable(); + #ifdef CONFIG_PREEMPT_BKL task->lock_depth = saved_lock_depth; #endif - sub_preempt_count(PREEMPT_ACTIVE); - /* we could miss a preemption opportunity between schedule and now */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || @@ -3926,7 +4381,7 @@ EXPORT_SYMBOL(sleep_on_timeout); void rt_mutex_setprio(struct task_struct *p, int prio) { unsigned long flags; - int oldprio, on_rq; + int oldprio, prev_resched, on_rq; struct rq *rq; u64 now; @@ -3947,6 +4402,9 @@ void rt_mutex_setprio(struct task_struct p->prio = prio; + trace_special_pid(p->pid, __PRIO(oldprio), PRIO(p)); + prev_resched = _need_resched(); + if (on_rq) { enqueue_task(rq, p, 0, now); /* @@ -3961,6 +4419,7 @@ void rt_mutex_setprio(struct task_struct check_preempt_curr(rq, p); } } + trace_special(prev_resched, _need_resched(), 0); task_rq_unlock(rq, &flags); } @@ -4550,14 +5009,17 @@ asmlinkage long sys_sched_yield(void) */ spin_unlock_no_resched(&rq->lock); - schedule(); + __schedule(); + + local_irq_enable(); + preempt_check_resched(); return 0; } static void __cond_resched(void) { -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) __might_sleep(__FILE__, __LINE__); #endif /* @@ -4566,10 +5028,11 @@ static void __cond_resched(void) * cond_resched() call. */ do { + local_irq_disable(); add_preempt_count(PREEMPT_ACTIVE); - schedule(); - sub_preempt_count(PREEMPT_ACTIVE); + __schedule(); } while (need_resched()); + local_irq_enable(); } int __sched cond_resched(void) @@ -4595,7 +5058,7 @@ int __cond_resched_raw_spinlock(raw_spin { int ret = 0; - if (need_lockbreak(lock)) { + if (need_lockbreak_raw(lock)) { spin_unlock(lock); cpu_relax(); ret = 1; @@ -4611,6 +5074,25 @@ int __cond_resched_raw_spinlock(raw_spin } EXPORT_SYMBOL(__cond_resched_raw_spinlock); +#ifdef CONFIG_PREEMPT_RT + +int __cond_resched_spinlock(spinlock_t *lock) +{ +#if (defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)) || defined(CONFIG_PREEMPT_RT) + if (lock->break_lock) { + lock->break_lock = 0; + spin_unlock_no_resched(lock); + __cond_resched(); + spin_lock(lock); + return 1; + } +#endif + return 0; +} +EXPORT_SYMBOL(__cond_resched_spinlock); + +#endif + /* * Voluntarily preempt a process context that has softirqs disabled: */ @@ -4657,29 +5139,73 @@ int cond_resched_hardirq_context(void) WARN_ON_ONCE(!irqs_disabled()); if (hardirq_need_resched()) { +#ifndef CONFIG_PREEMPT_RT irq_exit(); +#endif local_irq_enable(); __cond_resched(); +#ifndef CONFIG_PREEMPT_RT local_irq_disable(); __irq_enter(); - +#endif return 1; } return 0; } EXPORT_SYMBOL(cond_resched_hardirq_context); +#ifdef CONFIG_PREEMPT_VOLUNTARY + +int voluntary_preemption = 1; + +EXPORT_SYMBOL(voluntary_preemption); + +static int __init voluntary_preempt_setup (char *str) +{ + if (!strncmp(str, "off", 3)) + voluntary_preemption = 0; + else + get_option(&str, &voluntary_preemption); + if (!voluntary_preemption) + printk("turning off voluntary preemption!\n"); + + return 1; +} + +__setup("voluntary-preempt=", voluntary_preempt_setup); + +#endif + /** * yield - yield the current processor to other threads. * * This is a shortcut for kernel-space yielding - it marks the * thread runnable and calls sys_sched_yield(). */ -void __sched yield(void) +void __sched __yield(void) { set_current_state(TASK_RUNNING); sys_sched_yield(); } + +void __sched yield(void) +{ + static int once = 1; + + /* + * it's a bug to rely on yield() with RT priorities. We print + * the first occurance after bootup ... this will still give + * us an idea about the scope of the problem, without spamming + * the syslog: + */ + if (once && rt_task(current)) { + once = 0; + printk(KERN_ERR "BUG: %s:%d RT task yield()-ing!\n", + current->comm, current->pid); + dump_stack(); + } + __yield(); +} EXPORT_SYMBOL(yield); /* @@ -4844,6 +5370,7 @@ static void show_task(struct task_struct void show_state_filter(unsigned long state_filter) { struct task_struct *g, *p; + int do_unlock = 1; #if BITS_PER_LONG == 32 printk(KERN_INFO @@ -4852,7 +5379,16 @@ void show_state_filter(unsigned long sta printk(KERN_INFO " task PC stack pid father\n"); #endif +#ifdef CONFIG_PREEMPT_RT + if (!read_trylock(&tasklist_lock)) { + printk("hm, tasklist_lock write-locked.\n"); + printk("ignoring ...\n"); + do_unlock = 0; + } +#else read_lock(&tasklist_lock); +#endif + do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow @@ -4868,7 +5404,9 @@ void show_state_filter(unsigned long sta #ifdef CONFIG_SCHED_DEBUG sysrq_sched_debug_show(); #endif - read_unlock(&tasklist_lock); + if (do_unlock) + read_unlock(&tasklist_lock); + /* * Only show locks if all tasks are dumped: */ @@ -4909,7 +5447,9 @@ void __cpuinit init_idle(struct task_str spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ -#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) +#if defined(CONFIG_PREEMPT) && \ + !defined(CONFIG_PREEMPT_BKL) && \ + !defined(CONFIG_PREEMPT_RT) task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); #else task_thread_info(idle)->preempt_count = 0; @@ -5024,11 +5564,18 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) { struct rq *rq_dest, *rq_src; + unsigned long flags; int ret = 0, on_rq; if (unlikely(cpu_is_offline(dest_cpu))) return ret; + /* + * PREEMPT_RT: this relies on write_lock_irq(&tasklist_lock) + * disabling interrupts - which on PREEMPT_RT does not do: + */ + local_irq_save(flags); + rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); @@ -5043,6 +5590,7 @@ static int __migrate_task(struct task_st on_rq = p->se.on_rq; if (on_rq) deactivate_task(rq_src, p, 0); + set_task_cpu(p, dest_cpu); if (on_rq) { activate_task(rq_dest, p, 0); @@ -5051,6 +5599,8 @@ static int __migrate_task(struct task_st ret = 1; out: double_rq_unlock(rq_src, rq_dest); + local_irq_restore(flags); + return ret; } @@ -6612,6 +7162,9 @@ void __init sched_init(void) atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current); +#ifdef CONFIG_PREEMPT_RT + printk("Real-Time Preemption Support (C) 2004-2007 Ingo Molnar\n"); +#endif /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, @@ -6633,14 +7186,17 @@ void __might_sleep(char *file, int line) if ((in_atomic() || irqs_disabled()) && system_state == SYSTEM_RUNNING && !oops_in_progress) { + if (debug_direct_keyboard && hardirq_count()) + return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; stop_trace(); printk(KERN_ERR "BUG: sleeping function called from invalid" - " context at %s:%d\n", file, line); - printk("in_atomic():%d, irqs_disabled():%d\n", - in_atomic(), irqs_disabled()); + " context %s(%d) at %s:%d\n", + current->comm, current->pid, file, line); + printk("in_atomic():%d [%08x], irqs_disabled():%d\n", + in_atomic(), preempt_count(), irqs_disabled()); debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); patches/sched-rt-stats.patch0000664000077200007720000000242210655544577015431 0ustar mingomingoOn Wed, Jul 25, 2007 at 10:05:04AM +0200, Ingo Molnar wrote: > > * Ankita Garg wrote: > > > Hi, > > > > This patch adds support to display captured -rt stats under > > /proc/schedstat. > > hm, could you add it to /proc/sched_debug instead? That's where all the > runqueue values are showing up normally. I'm also a bit wary about > introducing a new schedstats version for -rt. So, I have merged my previous patch (to display rt_nr_running info in sched_debug.c) with this one. Signed-off-by: Ankita Garg [mingo@elte.hu: fix it to work on !SCHEDSTATS too] Signed-off-by: Ingo Molnar -- kernel/sched_debug.c | 11 +++++++++++ 1 file changed, 11 insertions(+) Index: linux-rt-rebase.q/kernel/sched_debug.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched_debug.c +++ linux-rt-rebase.q/kernel/sched_debug.c @@ -164,6 +164,17 @@ static void print_cpu(struct seq_file *m P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[4]); +#ifdef CONFIG_PREEMPT_RT + /* Print rt related rq stats */ + P(rt_nr_running); + P(rt_nr_uninterruptible); +#ifdef CONFIG_SCHEDSTATS + P(rto_schedule); + P(rto_wakeup); + P(rto_pulled); +#endif +#endif + #undef P print_cfs_stats(m, cpu, now); patches/preempt-realtime-powerpc-missing-raw-spinlocks.patch0000664000077200007720000000765010655544574023763 0ustar mingomingoFrom sshtylyov@ru.mvista.com Thu Jun 21 22:24:22 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from imap.sh.mvista.com (unknown [63.81.120.155]) by mail.tglx.de (Postfix) with ESMTP id 2149065C065 for ; Thu, 21 Jun 2007 22:24:22 +0200 (CEST) Received: from wasted.dev.rtsoft.ru (unknown [10.150.0.9]) by imap.sh.mvista.com (Postfix) with ESMTP id D27113EC9; Thu, 21 Jun 2007 13:24:15 -0700 (PDT) From: Sergei Shtylyov Organization: MontaVista Software Inc. To: tglx@linutronix.de, bruce.ashfield@gmail.com, rostedt@goodmis.org Subject: [PATCH] (2.6.20-rt3) PowerPC: convert spinlocks into raw Date: Thu, 21 Jun 2007 23:25:58 +0300 User-Agent: KMail/1.5 MIME-Version: 1.0 Content-Disposition: inline Content-Type: text/plain; charset="iso-8859-1" Message-Id: <200706220025.58799.sshtylyov@ru.mvista.com> X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Convert the spinlocks in the PowerPC interrupt related code into the raw ones, also convert the PURR and PMC related spinlocks... Signed-off-by: Mark A. Greer Signed-off-by: Sergei Shtylyov --- Resending in hopes it still can apply -- if it doesn't, bug me again... :-) --- arch/powerpc/kernel/pmc.c | 2 +- arch/powerpc/sysdev/i8259.c | 2 +- arch/powerpc/sysdev/ipic.c | 2 +- arch/powerpc/sysdev/mpic.c | 2 +- include/asm-powerpc/mpic.h | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/kernel/pmc.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/pmc.c +++ linux-rt-rebase.q/arch/powerpc/kernel/pmc.c @@ -37,7 +37,7 @@ static void dummy_perf(struct pt_regs *r } -static DEFINE_SPINLOCK(pmc_owner_lock); +static DEFINE_RAW_SPINLOCK(pmc_owner_lock); static void *pmc_owner_caller; /* mostly for debugging */ perf_irq_t perf_irq = dummy_perf; Index: linux-rt-rebase.q/arch/powerpc/sysdev/i8259.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/sysdev/i8259.c +++ linux-rt-rebase.q/arch/powerpc/sysdev/i8259.c @@ -23,7 +23,7 @@ static unsigned char cached_8259[2] = { #define cached_A1 (cached_8259[0]) #define cached_21 (cached_8259[1]) -static DEFINE_SPINLOCK(i8259_lock); +static DEFINE_RAW_SPINLOCK(i8259_lock); static struct device_node *i8259_node; static struct irq_host *i8259_host; Index: linux-rt-rebase.q/arch/powerpc/sysdev/ipic.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/sysdev/ipic.c +++ linux-rt-rebase.q/arch/powerpc/sysdev/ipic.c @@ -30,7 +30,7 @@ #include "ipic.h" static struct ipic * primary_ipic; -static DEFINE_SPINLOCK(ipic_lock); +static DEFINE_RAW_SPINLOCK(ipic_lock); static struct ipic_info ipic_info[] = { [9] = { Index: linux-rt-rebase.q/arch/powerpc/sysdev/mpic.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/sysdev/mpic.c +++ linux-rt-rebase.q/arch/powerpc/sysdev/mpic.c @@ -46,7 +46,7 @@ static struct mpic *mpics; static struct mpic *mpic_primary; -static DEFINE_SPINLOCK(mpic_lock); +static DEFINE_RAW_SPINLOCK(mpic_lock); #ifdef CONFIG_PPC32 /* XXX for now */ #ifdef CONFIG_IRQ_ALL_CPUS Index: linux-rt-rebase.q/include/asm-powerpc/mpic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/mpic.h +++ linux-rt-rebase.q/include/asm-powerpc/mpic.h @@ -280,7 +280,7 @@ struct mpic #ifdef CONFIG_MPIC_U3_HT_IRQS /* The fixup table */ struct mpic_irq_fixup *fixups; - spinlock_t fixup_lock; + raw_spinlock_t fixup_lock; #endif /* Register access method */ patches/ppc-fix-clocksource-timebase-shift.patch0000664000077200007720000000224310655544572021342 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Mon May 14 17:23:17 2007 Date: Mon, 14 May 2007 17:23:17 +0900 From: Tsutomu OWA To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [patch 5/5] powerpc 2.6.21-rt1] fix clocksource_timebase.shift value Calculate clocksource_timebase.shift from tb_ticks_per_jiffy to get an accurate translation, though I don't understand why current version of clocksource_timebase.shift could be constant... Signed-off-by: Tsutomu OWA -- owa --- arch/powerpc/kernel/time.c | 3 +++ 1 file changed, 3 insertions(+) Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -950,6 +950,9 @@ static int __init init_timebase_clocksou if (__USE_RTC()) return -ENODEV; +#ifdef CONFIG_PPC64 + clocksource_timebase.shift = tb_ticks_per_jiffy / 1000000; +#endif clocksource_timebase.mult = clocksource_hz2mult(tb_ticks_per_sec, clocksource_timebase.shift); return clocksource_register(&clocksource_timebase); patches/preempt-realtime-loopback.patch0000664000077200007720000000070210655544575017625 0ustar mingomingo--- drivers/net/loopback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/drivers/net/loopback.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/loopback.c +++ linux-rt-rebase.q/drivers/net/loopback.c @@ -159,7 +159,7 @@ static int loopback_xmit(struct sk_buff lb_stats->packets++; put_cpu(); - netif_rx(skb); + netif_rx_ni(skb); return 0; } patches/acpi-remove-the-useless-ifdef-code.patch0000664000077200007720000000333010655544570021205 0ustar mingomingoSubject: ACPI: remove the now unused ifdef code The conversion of x86-64 to clock events makes the #ifdef CONFIG_GENERIC_CLOCKEVENTS n the timer broadcast functions useless. Remove it. Signed-off-by: Thomas Gleixner Cc: Ingo Molnar Cc: john stultz Cc: Andi Kleen Signed-off-by: Andrew Morton --- drivers/acpi/processor_idle.c | 12 ------------ 1 file changed, 12 deletions(-) Index: linux/drivers/acpi/processor_idle.c =================================================================== --- linux.orig/drivers/acpi/processor_idle.c +++ linux/drivers/acpi/processor_idle.c @@ -203,21 +203,12 @@ static void acpi_timer_check_state(int s static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { -#ifdef CONFIG_GENERIC_CLOCKEVENTS unsigned long reason; reason = pr->power.timer_broadcast_on_state < INT_MAX ? CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; clockevents_notify(reason, &pr->id); -#else - cpumask_t mask = cpumask_of_cpu(pr->id); - - if (pr->power.timer_broadcast_on_state < INT_MAX) - on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); - else - on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); -#endif } /* Power(C) State timer broadcast control */ @@ -225,8 +216,6 @@ static void acpi_state_timer_broadcast(s struct acpi_processor_cx *cx, int broadcast) { -#ifdef CONFIG_GENERIC_CLOCKEVENTS - int state = cx - pr->power.states; if (state >= pr->power.timer_broadcast_on_state) { @@ -236,7 +225,6 @@ static void acpi_state_timer_broadcast(s CLOCK_EVT_NOTIFY_BROADCAST_EXIT; clockevents_notify(reason, &pr->id); } -#endif } #else patches/2.6.21-rc6-lockless3-radix-tree-gang-slot-lookups.patch0000664000077200007720000002420310655544576023327 0ustar mingomingoFrom: Nick Piggin Subject: [patch 3/9] radix-tree: gang slot lookups Introduce gang_lookup_slot and gang_lookup_slot_tag functions, which are used by lockless pagecache. Signed-off-by: Nick Piggin --- include/linux/radix-tree.h | 12 ++- lib/radix-tree.c | 176 +++++++++++++++++++++++++++++++++++++++------ 2 files changed, 166 insertions(+), 22 deletions(-) Index: linux-rt-rebase.q/include/linux/radix-tree.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/radix-tree.h +++ linux-rt-rebase.q/include/linux/radix-tree.h @@ -99,12 +99,15 @@ do { \ * * The notable exceptions to this rule are the following functions: * radix_tree_lookup + * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup + * radix_tree_gang_lookup_slot * radix_tree_gang_lookup_tag + * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * - * The first 4 functions are able to be called locklessly, using RCU. The + * The first 7 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. @@ -159,6 +162,9 @@ void *radix_tree_delete(struct radix_tre unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); +unsigned int +radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items); /* * On a mutex based kernel we can freely schedule within the radix code: */ @@ -182,6 +188,10 @@ unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag); +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); static inline void radix_tree_preload_end(void) Index: linux-rt-rebase.q/lib/radix-tree.c =================================================================== --- linux-rt-rebase.q.orig/lib/radix-tree.c +++ linux-rt-rebase.q/lib/radix-tree.c @@ -343,18 +343,17 @@ EXPORT_SYMBOL(radix_tree_insert); * Returns: the slot corresponding to the position @index in the * radix tree @root. This is useful for update-if-exists operations. * - * This function cannot be called under rcu_read_lock, it must be - * excluded from writers, as must the returned slot for subsequent - * use by radix_tree_deref_slot() and radix_tree_replace slot. - * Caller must hold tree write locked across slot lookup and - * replace. + * This function can be called under rcu_read_lock iff the slot is not + * modified by radix_tree_replace_slot, otherwise it must be called + * exclusive from other writers. Any dereference of the slot must be done + * using radix_tree_deref_slot. */ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) { unsigned int height, shift; struct radix_tree_node *node, **slot; - node = root->rnode; + node = rcu_dereference(root->rnode); if (node == NULL) return NULL; @@ -374,7 +373,7 @@ void **radix_tree_lookup_slot(struct rad do { slot = (struct radix_tree_node **) (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = *slot; + node = rcu_dereference(*slot); if (node == NULL) return NULL; @@ -611,7 +610,7 @@ EXPORT_SYMBOL(radix_tree_tag_get); #endif static unsigned int -__lookup(struct radix_tree_node *slot, void **results, unsigned long index, +__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, unsigned int max_items, unsigned long *next_index) { unsigned int nr_found = 0; @@ -645,11 +644,9 @@ __lookup(struct radix_tree_node *slot, v /* Bottom level: grab some items */ for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { - struct radix_tree_node *node; index++; - node = slot->slots[i]; - if (node) { - results[nr_found++] = rcu_dereference(node); + if (slot->slots[i]) { + results[nr_found++] = &(slot->slots[i]); if (nr_found == max_items) goto out; } @@ -703,13 +700,22 @@ radix_tree_gang_lookup(struct radix_tree ret = 0; while (ret < max_items) { - unsigned int nr_found; + unsigned int nr_found, slots_found, i; unsigned long next_index; /* Index of next search */ if (cur_index > max_index) break; - nr_found = __lookup(node, results + ret, cur_index, + slots_found = __lookup(node, (void ***)results + ret, cur_index, max_items - ret, &next_index); + nr_found = 0; + for (i = 0; i < slots_found; i++) { + struct radix_tree_node *slot; + slot = *(((void ***)results)[ret + i]); + if (!slot) + continue; + results[ret + nr_found] = rcu_dereference(slot); + nr_found++; + } ret += nr_found; if (next_index == 0) break; @@ -720,12 +726,71 @@ radix_tree_gang_lookup(struct radix_tree } EXPORT_SYMBOL(radix_tree_gang_lookup); +/** + * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * + * Performs an index-ascending scan of the tree for present items. Places + * their slots at *@results and returns the number of items which were + * placed at *@results. + * + * The implementation is naive. + * + * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must + * be dereferenced with radix_tree_deref_slot, and if using only RCU + * protection, radix_tree_deref_slot may fail requiring a retry. + */ +unsigned int +radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items) +{ + unsigned long max_index; + struct radix_tree_node *node; + unsigned long cur_index = first_index; + unsigned int ret; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup(node, results + ret, cur_index, + max_items - ret, &next_index); + ret += slots_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_slot); + /* * FIXME: the two tag_get()s here should use find_next_bit() instead of * open-coding the search. */ static unsigned int -__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, +__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, unsigned int max_items, unsigned long *next_index, unsigned int tag) { unsigned int nr_found = 0; @@ -770,9 +835,8 @@ __lookup_tag(struct radix_tree_node *slo * lookup ->slots[x] without a lock (ie. can't * rely on its value remaining the same). */ - if (node) { - node = rcu_dereference(node); - results[nr_found++] = node; + if (slot->slots[j]) { + results[nr_found++] = &slot->slots[j]; if (nr_found == max_items) goto out; } @@ -831,13 +895,22 @@ radix_tree_gang_lookup_tag(struct radix_ ret = 0; while (ret < max_items) { - unsigned int nr_found; + unsigned int slots_found, nr_found, i; unsigned long next_index; /* Index of next search */ if (cur_index > max_index) break; - nr_found = __lookup_tag(node, results + ret, cur_index, - max_items - ret, &next_index, tag); + slots_found = __lookup_tag(node, (void ***)results + ret, + cur_index, max_items - ret, &next_index, tag); + nr_found = 0; + for (i = 0; i < slots_found; i++) { + struct radix_tree_node *slot; + slot = *((void ***)results)[ret + i]; + if (!slot) + continue; + results[ret + nr_found] = rcu_dereference(slot); + nr_found++; + } ret += nr_found; if (next_index == 0) break; @@ -849,6 +922,67 @@ radix_tree_gang_lookup_tag(struct radix_ EXPORT_SYMBOL(radix_tree_gang_lookup_tag); /** + * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a + * radix tree based on a tag + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * @tag: the tag index (< RADIX_TREE_MAX_TAGS) + * + * Performs an index-ascending scan of the tree for present items which + * have the tag indexed by @tag set. Places the slots at *@results and + * returns the number of slots which were placed at *@results. + */ +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag) +{ + struct radix_tree_node *node; + unsigned long max_index; + unsigned long cur_index = first_index; + unsigned int ret; + + /* check the root's tag bit */ + if (!root_tag_get(root, tag)) + return 0; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup_tag(node, results + ret, + cur_index, max_items - ret, &next_index, tag); + ret += slots_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); + + +/** * radix_tree_shrink - shrink height of a radix tree to minimal * @root radix tree root */ patches/qrcu.patch0000664000077200007720000001463010655544576013541 0ustar mingomingoFrom: "Paul E. McKenney" Subject: [PATCH] QRCU with lockless fastpath Hello! This is an updated version of Oleg Nesterov's QRCU that avoids the earlier lock acquisition on the synchronize_qrcu() fastpath. This passes rcutorture on x86 and the weakly ordered POWER. A promela model of the code passes as noted before for 2 readers and 3 updaters and for 3 readers and 2 updaters. 3 readers and 3 updaters runs every machine that I have access to out of memory -- nothing like a little combinatorial explosion! However, after some thought, the proof ended up being simple enough: 1. If synchronize_qrcu() exits too soon, then by definition there has been a reader present during synchronize_srcu()'s full execution. 2. The counter corresponding to this reader will be at least 1 at all times. 3. The synchronize_qrcu() code forces at least one of the counters to be at least one at all times -- if there is a reader, the sum will be at least two. (Unfortunately, we cannot fetch the pair of counters atomically.) 4. Therefore, the only way that synchronize_qrcu()s fastpath can see a sum of 1 is if it races with another synchronize_qrcu() -- the first synchronize_qrcu() must read one of the counters before the second synchronize_qrcu() increments it, and must read the other counter after the second synchronize_qrcu() decrements it. There can be at most one reader present through this entire operation -- otherwise, the first synchronize_qrcu() will see a sum of 2 or greater. 5. But the second synchronize_qrcu() will not release the mutex until after the reader is done. During this time, the first synchronize_qrcu() will always see a sum of at least 2, and therefore cannot take the remainder of the fastpath until the reader is done. 6. Because the second synchronize_qrcu() holds the mutex, no other synchronize_qrcu() can manipulate the counters until the reader is done. A repeat of the race called out in #4 above therefore cannot happen until after the reader is done, in which case it is safe for the first synchronize_qrcu() to proceed. Therefore, two summations of the counter separated by a memory barrier suffices and the implementation shown below also suffices. (And, yes, the fastpath -could- check for a sum of zero and exit immediately, but this would help only in case of a three-way race between two synchronize_qrcu()s and a qrcu_read_unlock(), would add another compare, so is not worth it.) Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 22 +++++++++++++ kernel/srcu.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+) Index: linux-rt-rebase.q/include/linux/srcu.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/srcu.h +++ linux-rt-rebase.q/include/linux/srcu.h @@ -27,6 +27,8 @@ #ifndef _LINUX_SRCU_H #define _LINUX_SRCU_H +#include + struct srcu_struct_array { int c[2]; }; @@ -50,4 +52,24 @@ void srcu_read_unlock(struct srcu_struct void synchronize_srcu(struct srcu_struct *sp); long srcu_batches_completed(struct srcu_struct *sp); +/* + * fully compatible with srcu, but optimized for writers. + */ + +struct qrcu_struct { + int completed; + atomic_t ctr[2]; + wait_queue_head_t wq; + struct mutex mutex; +}; + +int init_qrcu_struct(struct qrcu_struct *qp); +int qrcu_read_lock(struct qrcu_struct *qp); +void qrcu_read_unlock(struct qrcu_struct *qp, int idx); +void synchronize_qrcu(struct qrcu_struct *qp); + +static inline void cleanup_qrcu_struct(struct qrcu_struct *qp) +{ +} + #endif Index: linux-rt-rebase.q/kernel/srcu.c =================================================================== --- linux-rt-rebase.q.orig/kernel/srcu.c +++ linux-rt-rebase.q/kernel/srcu.c @@ -256,3 +256,89 @@ EXPORT_SYMBOL_GPL(srcu_read_unlock); EXPORT_SYMBOL_GPL(synchronize_srcu); EXPORT_SYMBOL_GPL(srcu_batches_completed); EXPORT_SYMBOL_GPL(srcu_readers_active); + +int init_qrcu_struct(struct qrcu_struct *qp) +{ + qp->completed = 0; + atomic_set(qp->ctr + 0, 1); + atomic_set(qp->ctr + 1, 0); + init_waitqueue_head(&qp->wq); + mutex_init(&qp->mutex); + + return 0; +} + +int qrcu_read_lock(struct qrcu_struct *qp) +{ + for (;;) { + int idx = qp->completed & 0x1; + if (likely(atomic_inc_not_zero(qp->ctr + idx))) + return idx; + } +} + +void qrcu_read_unlock(struct qrcu_struct *qp, int idx) +{ + if (atomic_dec_and_test(qp->ctr + idx)) + wake_up(&qp->wq); +} + +void synchronize_qrcu(struct qrcu_struct *qp) +{ + int idx; + + smp_mb(); /* Force preceding change to happen before fastpath check. */ + + /* + * Fastpath: If the two counters sum to "1" at a given point in + * time, there are no readers. However, it takes two separate + * loads to sample both counters, which won't occur simultaneously. + * So we might race with a counter switch, so that we might see + * ctr[0]==0, then the counter might switch, then we might see + * ctr[1]==1 (unbeknownst to us because there is a reader still + * there). So we do a read memory barrier and recheck. If the + * same race happens again, there must have been a second counter + * switch. This second counter switch could not have happened + * until all preceding readers finished, so if the condition + * is true both times, we may safely proceed. + * + * This relies critically on the atomic increment and atomic + * decrement being seen as executing in order. + */ + + if (atomic_read(&qp->ctr[0]) + atomic_read(&qp->ctr[1]) <= 1) { + smp_rmb(); /* Keep two checks independent. */ + if (atomic_read(&qp->ctr[0]) + atomic_read(&qp->ctr[1]) <= 1) + goto out; + } + + mutex_lock(&qp->mutex); + + idx = qp->completed & 0x1; + if (atomic_read(qp->ctr + idx) == 1) + goto out_unlock; + + atomic_inc(qp->ctr + (idx ^ 0x1)); + + /* + * Prevent subsequent decrement from being seen before previous + * increment -- such an inversion could cause the fastpath + * above to falsely conclude that there were no readers. Also, + * reduce the likelihood that qrcu_read_lock() will loop. + */ + + smp_mb__after_atomic_inc(); + qp->completed++; + + atomic_dec(qp->ctr + idx); + __wait_event(qp->wq, !atomic_read(qp->ctr + idx)); +out_unlock: + mutex_unlock(&qp->mutex); +out: + smp_mb(); /* force subsequent free after qrcu_read_unlock(). */ +} + +EXPORT_SYMBOL_GPL(init_qrcu_struct); +EXPORT_SYMBOL_GPL(qrcu_read_lock); +EXPORT_SYMBOL_GPL(qrcu_read_unlock); +EXPORT_SYMBOL_GPL(synchronize_qrcu); patches/rt-mutex-x86-64.patch0000664000077200007720000004171410655544573015226 0ustar mingomingo--- arch/x86_64/Kconfig | 15 ++++--- arch/x86_64/kernel/entry.S | 18 ++++----- arch/x86_64/kernel/tsc_sync.c | 2 - arch/x86_64/kernel/vsyscall.c | 2 - arch/x86_64/kernel/x8664_ksyms.c | 10 +++-- arch/x86_64/lib/thunk.S | 12 +++--- include/asm-x86_64/semaphore.h | 69 +++++++++++++++++++++--------------- include/asm-x86_64/spinlock.h | 28 +++++++------- include/asm-x86_64/spinlock_types.h | 4 +- include/asm-x86_64/thread_info.h | 2 + 10 files changed, 91 insertions(+), 71 deletions(-) Index: linux-rt-rebase.q/arch/x86_64/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/Kconfig +++ linux-rt-rebase.q/arch/x86_64/Kconfig @@ -86,13 +86,6 @@ config ISA config SBUS bool -config RWSEM_GENERIC_SPINLOCK - bool - default y - -config RWSEM_XCHGADD_ALGORITHM - bool - config GENERIC_HWEIGHT bool default y @@ -369,6 +362,14 @@ config NUMA If the system is EM64T, you should say N unless your system is EM64T NUMA. +config RWSEM_GENERIC_SPINLOCK + bool + default y + +config RWSEM_XCHGADD_ALGORITHM + depends on !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT + bool + config K8_NUMA bool "Old style AMD Opteron NUMA detection" depends on NUMA && PCI Index: linux-rt-rebase.q/arch/x86_64/kernel/entry.S =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/entry.S +++ linux-rt-rebase.q/arch/x86_64/kernel/entry.S @@ -310,8 +310,8 @@ sysret_check: /* Handle reschedules */ /* edx: work, edi: workmask */ sysret_careful: - bt $TIF_NEED_RESCHED,%edx - jnc sysret_signal + testl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED),%edx + jz sysret_signal TRACE_IRQS_ON sti pushq %rdi @@ -334,7 +334,7 @@ sysret_signal: leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 xorl %esi,%esi # oldset -> arg2 call ptregscall_common -1: movl $_TIF_NEED_RESCHED,%edi +1: movl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED),%edi /* Use IRET because user could have changed frame. This works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ cli @@ -389,8 +389,8 @@ int_with_check: /* First do a reschedule test. */ /* edx: work, edi: workmask */ int_careful: - bt $TIF_NEED_RESCHED,%edx - jnc int_very_careful + testl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED),%edx + jz int_very_careful TRACE_IRQS_ON sti pushq %rdi @@ -425,7 +425,7 @@ int_signal: movq %rsp,%rdi # &ptregs -> arg1 xorl %esi,%esi # oldset -> arg2 call do_notify_resume -1: movl $_TIF_NEED_RESCHED,%edi +1: movl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED),%edi int_restore_rest: RESTORE_REST cli @@ -629,8 +629,8 @@ bad_iret: /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE - bt $TIF_NEED_RESCHED,%edx - jnc retint_signal + testl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED),%edx + jz retint_signal TRACE_IRQS_ON sti pushq %rdi @@ -656,7 +656,7 @@ retint_signal: RESTORE_REST cli TRACE_IRQS_OFF - movl $_TIF_NEED_RESCHED,%edi + movl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED),%edi GET_THREAD_INFO(%rcx) jmp retint_check Index: linux-rt-rebase.q/arch/x86_64/kernel/tsc_sync.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/tsc_sync.c +++ linux-rt-rebase.q/arch/x86_64/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata __raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; static __cpuinitdata int nr_warps; Index: linux-rt-rebase.q/arch/x86_64/kernel/vsyscall.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/vsyscall.c +++ linux-rt-rebase.q/arch/x86_64/kernel/vsyscall.c @@ -62,7 +62,7 @@ int __vgetcpu_mode __section_vgetcpu_mod struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data = { - .lock = SEQLOCK_UNLOCKED, + .lock = __RAW_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), .sysctl_enabled = 1, }; Index: linux-rt-rebase.q/arch/x86_64/kernel/x8664_ksyms.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/x8664_ksyms.c +++ linux-rt-rebase.q/arch/x86_64/kernel/x8664_ksyms.c @@ -11,10 +11,12 @@ EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(__down_failed); -EXPORT_SYMBOL(__down_failed_interruptible); -EXPORT_SYMBOL(__down_failed_trylock); -EXPORT_SYMBOL(__up_wakeup); +#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +EXPORT_SYMBOL(__compat_down_failed); +EXPORT_SYMBOL(__compat_down_failed_interruptible); +EXPORT_SYMBOL(__compat_down_failed_trylock); +EXPORT_SYMBOL(__compat_up_wakeup); +#endif EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); Index: linux-rt-rebase.q/arch/x86_64/lib/thunk.S =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/lib/thunk.S +++ linux-rt-rebase.q/arch/x86_64/lib/thunk.S @@ -40,11 +40,13 @@ thunk rwsem_wake_thunk,rwsem_wake thunk rwsem_downgrade_thunk,rwsem_downgrade_wake #endif - - thunk __down_failed,__down - thunk_retrax __down_failed_interruptible,__down_interruptible - thunk_retrax __down_failed_trylock,__down_trylock - thunk __up_wakeup,__up + +#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK + thunk __compat_down_failed,__compat_down + thunk_retrax __compat_down_failed_interruptible,__compat_down_interruptible + thunk_retrax __compat_down_failed_trylock,__compat_down_trylock + thunk __compat_up_wakeup,__compat_up +#endif #ifdef CONFIG_TRACE_IRQFLAGS /* put return address in rdi (arg1) */ Index: linux-rt-rebase.q/include/asm-x86_64/semaphore.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/semaphore.h +++ linux-rt-rebase.q/include/asm-x86_64/semaphore.h @@ -5,6 +5,10 @@ #ifdef __KERNEL__ +#ifndef CONFIG_PREEMPT_RT +# define compat_semaphore semaphore +#endif + /* * SMP- and interrupt-safe semaphores.. * @@ -43,29 +47,34 @@ #include #include -struct semaphore { +struct compat_semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; }; -#define __SEMAPHORE_INITIALIZER(name, n) \ +#define __COMPAT_SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .sleepers = 0, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) +#define __COMPAT_MUTEX_INITIALIZER(name) \ + __COMPAT_SEMAPHORE_INITIALIZER(name,1) + +#define __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct compat_semaphore name = __COMPAT_SEMAPHORE_INITIALIZER(name,count) -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) +#define COMPAT_DECLARE_MUTEX(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,1) +#define COMPAT_DECLARE_MUTEX_LOCKED(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,0) -static inline void sema_init (struct semaphore *sem, int val) +#define compat_sema_count(sem) atomic_read(&(sem)->count) + +static inline void compat_sema_init (struct compat_semaphore *sem, int val) { /* - * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); + * *sem = (struct compat_semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * * i'd rather use the more flexible initialization above, but sadly * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. @@ -75,32 +84,33 @@ static inline void sema_init (struct sem init_waitqueue_head(&sem->wait); } -static inline void init_MUTEX (struct semaphore *sem) +static inline void compat_init_MUTEX (struct compat_semaphore *sem) { - sema_init(sem, 1); + compat_sema_init(sem, 1); } -static inline void init_MUTEX_LOCKED (struct semaphore *sem) +static inline void compat_init_MUTEX_LOCKED (struct compat_semaphore *sem) { - sema_init(sem, 0); + compat_sema_init(sem, 0); } -asmlinkage void __down_failed(void /* special register calling convention */); -asmlinkage int __down_failed_interruptible(void /* params in registers */); -asmlinkage int __down_failed_trylock(void /* params in registers */); -asmlinkage void __up_wakeup(void /* special register calling convention */); +asmlinkage void __compat_down_failed(void /* special register calling convention */); +asmlinkage int __compat_down_failed_interruptible(void /* params in registers */); +asmlinkage int __compat_down_failed_trylock(void /* params in registers */); +asmlinkage void __compat_up_wakeup(void /* special register calling convention */); -asmlinkage void __down(struct semaphore * sem); -asmlinkage int __down_interruptible(struct semaphore * sem); -asmlinkage int __down_trylock(struct semaphore * sem); -asmlinkage void __up(struct semaphore * sem); +asmlinkage void __compat_down(struct compat_semaphore * sem); +asmlinkage int __compat_down_interruptible(struct compat_semaphore * sem); +asmlinkage int __compat_down_trylock(struct compat_semaphore * sem); +asmlinkage void __compat_up(struct compat_semaphore * sem); +asmlinkage int compat_sem_is_locked(struct compat_semaphore *sem); /* * This is ugly, but we want the default case to fall through. * "__down_failed" is a special asm handler that calls the C * routine that actually waits. See arch/x86_64/kernel/semaphore.c */ -static inline void down(struct semaphore * sem) +static inline void compat_down(struct compat_semaphore * sem) { might_sleep(); @@ -108,7 +118,7 @@ static inline void down(struct semaphore "# atomic down operation\n\t" LOCK_PREFIX "decl %0\n\t" /* --sem->count */ "jns 1f\n\t" - "call __down_failed\n" + "call __compat_down_failed\n" "1:" :"=m" (sem->count) :"D" (sem) @@ -119,7 +129,7 @@ static inline void down(struct semaphore * Interruptible try to acquire a semaphore. If we obtained * it, return zero. If we were interrupted, returns -EINTR */ -static inline int down_interruptible(struct semaphore * sem) +static inline int compat_down_interruptible(struct compat_semaphore * sem) { int result; @@ -130,7 +140,7 @@ static inline int down_interruptible(str "xorl %0,%0\n\t" LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "jns 2f\n\t" - "call __down_failed_interruptible\n" + "call __compat_down_failed_interruptible\n" "2:\n" :"=&a" (result), "=m" (sem->count) :"D" (sem) @@ -142,7 +152,7 @@ static inline int down_interruptible(str * Non-blockingly attempt to down() a semaphore. * Returns zero if we acquired it */ -static inline int down_trylock(struct semaphore * sem) +static inline int compat_down_trylock(struct compat_semaphore * sem) { int result; @@ -151,7 +161,7 @@ static inline int down_trylock(struct se "xorl %0,%0\n\t" LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "jns 2f\n\t" - "call __down_failed_trylock\n\t" + "call __compat_down_failed_trylock\n\t" "2:\n" :"=&a" (result), "=m" (sem->count) :"D" (sem) @@ -165,17 +175,20 @@ static inline int down_trylock(struct se * The default case (no contention) will result in NO * jumps for both down() and up(). */ -static inline void up(struct semaphore * sem) +static inline void compat_up(struct compat_semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ "jg 1f\n\t" - "call __up_wakeup\n" + "call __compat_up_wakeup\n" "1:" :"=m" (sem->count) :"D" (sem) :"memory"); } + +#include + #endif /* __KERNEL__ */ #endif Index: linux-rt-rebase.q/include/asm-x86_64/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/spinlock.h +++ linux-rt-rebase.q/include/asm-x86_64/spinlock.h @@ -17,12 +17,12 @@ * (the type definitions are in asm/spinlock_types.h) */ -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(__raw_spinlock_t *lock) { return *(volatile signed int *)(&(lock)->slock) <= 0; } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(__raw_spinlock_t *lock) { asm volatile( "\n1:\t" @@ -40,7 +40,7 @@ static inline void __raw_spin_lock(raw_s * Same as __raw_spin_lock, but reenable interrupts during spinning. */ #ifndef CONFIG_PROVE_LOCKING -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static inline void __raw_spin_lock_flags(__raw_spinlock_t *lock, unsigned long flags) { asm volatile( "\n1:\t" @@ -65,7 +65,7 @@ static inline void __raw_spin_lock_flags } #endif -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(__raw_spinlock_t *lock) { int oldval; @@ -77,12 +77,12 @@ static inline int __raw_spin_trylock(raw return oldval > 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(__raw_spinlock_t *lock) { asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(__raw_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); @@ -102,17 +102,17 @@ static inline void __raw_spin_unlock_wai * with the high bit (sign) being the "contended" bit. */ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int __raw_read_can_lock(__raw_rwlock_t *lock) { return (int)(lock)->lock > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int __raw_write_can_lock(__raw_rwlock_t *lock) { return (lock)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" "jns 1f\n" @@ -121,7 +121,7 @@ static inline void __raw_read_lock(raw_r ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" "jz 1f\n" @@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_ ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int __raw_read_trylock(__raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); @@ -140,7 +140,7 @@ static inline int __raw_read_trylock(raw return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int __raw_write_trylock(__raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -149,12 +149,12 @@ static inline int __raw_write_trylock(ra return 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" : "=m" (rw->lock) : : "memory"); Index: linux-rt-rebase.q/include/asm-x86_64/spinlock_types.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/spinlock_types.h +++ linux-rt-rebase.q/include/asm-x86_64/spinlock_types.h @@ -7,13 +7,13 @@ typedef struct { unsigned int slock; -} raw_spinlock_t; +} __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } typedef struct { unsigned int lock; -} raw_rwlock_t; +} __raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } Index: linux-rt-rebase.q/include/asm-x86_64/thread_info.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/thread_info.h +++ linux-rt-rebase.q/include/asm-x86_64/thread_info.h @@ -111,6 +111,7 @@ static inline struct thread_info *stack_ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ #define TIF_IRET 5 /* force IRET */ +#define TIF_NEED_RESCHED_DELAYED 6 /* reschedul on return to userspace */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ @@ -133,6 +134,7 @@ static inline struct thread_info *stack_ #define _TIF_SECCOMP (1< -- owa --- arch/powerpc/kernel/entry_64.S | 5 +++++ 1 file changed, 5 insertions(+) Index: linux-rt-rebase.q/arch/powerpc/kernel/entry_64.S =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/entry_64.S +++ linux-rt-rebase.q/arch/powerpc/kernel/entry_64.S @@ -580,6 +580,11 @@ do_work: user_work: #endif + /* here we are preempting the current task */ + li r0,1 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + /* Enable interrupts */ ori r10,r10,MSR_EE mtmsrd r10,1 patches/RT_utsname.patch0000664000077200007720000000254710655544577014655 0ustar mingomingo--- init/Makefile | 2 +- scripts/mkcompile_h | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/init/Makefile =================================================================== --- linux-rt-rebase.q.orig/init/Makefile +++ linux-rt-rebase.q/init/Makefile @@ -30,4 +30,4 @@ $(obj)/version.o: include/linux/compile. include/linux/compile.h: FORCE @echo ' CHK $@' $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(CFLAGS)" + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT)" "$(CC) $(CFLAGS)" Index: linux-rt-rebase.q/scripts/mkcompile_h =================================================================== --- linux-rt-rebase.q.orig/scripts/mkcompile_h +++ linux-rt-rebase.q/scripts/mkcompile_h @@ -2,7 +2,8 @@ TARGET=$1 ARCH=$2 SMP=$3 PREEMPT=$4 -CC=$5 +PREEMPT_RT=$5 +CC=$6 # If compile.h exists already and we don't own autoconf.h # (i.e. we're not the same user who did make *config), don't @@ -43,6 +44,7 @@ UTS_VERSION="#$VERSION" CONFIG_FLAGS="" if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi +if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length patches/Allocate-RTSJ-memory-for-TCK-conformance-test.patch0000664000077200007720000001171210655544576023047 0ustar mingomingoAllocate RTSJ memory for TCK conformance test. From: Theodore Ts'o This kernel message allocates memory which is required by the real-time TCK conformance test which tests the JVM's RTSJ implementation. Unfortunately, RTSJ requires that Java programs have direct access to physical memory. This kernel reserves memory which can then be used by an external /dev/rmem loadable kernel module. Signed-off-by: "Theodore Ts'o" --- drivers/char/Kconfig | 7 +++ drivers/char/Makefile | 2 drivers/char/alloc_rtsj_mem.c | 88 ++++++++++++++++++++++++++++++++++++++++++ init/main.c | 7 +++ 4 files changed, 104 insertions(+) Index: linux-rt-rebase.q/drivers/char/Kconfig =================================================================== --- linux-rt-rebase.q.orig/drivers/char/Kconfig +++ linux-rt-rebase.q/drivers/char/Kconfig @@ -1107,6 +1107,13 @@ config RMEM patching /dev/mem because we don't expect this functionality to ever be accepted into mainline. +config ALLOC_RTSJ_MEM + tristate "RTSJ-specific hack to reserve memory" + default m + help + The RTSJ TCK conformance test requires reserving some physical + memory for testing /dev/rmem. + config DEVPORT bool depends on !M68K Index: linux-rt-rebase.q/drivers/char/Makefile =================================================================== --- linux-rt-rebase.q.orig/drivers/char/Makefile +++ linux-rt-rebase.q/drivers/char/Makefile @@ -114,6 +114,8 @@ obj-$(CONFIG_PS3_FLASH) += ps3flash.o obj-$(CONFIG_JS_RTC) += js-rtc.o js-rtc-y = rtc.o +obj-$(CONFIG_ALLOC_RTSJ_MEM) += alloc_rtsj_mem.o + # Files generated that shall be removed upon make clean clean-files := consolemap_deftbl.c defkeymap.c Index: linux-rt-rebase.q/drivers/char/alloc_rtsj_mem.c =================================================================== --- /dev/null +++ linux-rt-rebase.q/drivers/char/alloc_rtsj_mem.c @@ -0,0 +1,88 @@ +/* + * alloc_rtsj_mem.c -- Hack to allocate some memory + * + * Copyright (C) 2005 by Theodore Ts'o + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include + +#include + +MODULE_AUTHOR("Theodore Tso"); +MODULE_DESCRIPTION("RTSJ alloc memory"); +MODULE_LICENSE("GPL"); + +static void *mem = 0; +int size = 0, addr = 0; + +module_param(size, int, 0444); +module_param(addr, int, 0444); + +static void __exit shutdown_module(void) +{ + kfree(mem); +} + +#ifndef MODULE +void __init alloc_rtsj_mem_early_setup(void) +{ + if (size > PAGE_SIZE*2) { + mem = alloc_bootmem(size); + if (mem) { + printk(KERN_INFO "alloc_rtsj_mem: got %d bytes " + "using alloc_bootmem\n", size); + } else { + printk(KERN_INFO "alloc_rtsj_mem: failed to " + "get %d bytes from alloc_bootmem\n", size); + } + } +} +#endif + +static int __init startup_module(void) +{ + static char test_string[] = "The BOFH: Servicing users the way the " + "military\n\tservices targets for 15 years.\n"; + + if (!size) + return 0; + + if (!mem) { + mem = kmalloc(size, GFP_KERNEL); + if (mem) { + printk(KERN_INFO "alloc_rtsj_mem: got %d bytes " + "using kmalloc\n", size); + } else { + printk(KERN_ERR "alloc_rtsj_mem: failed to get " + "%d bytes using kmalloc\n", size); + return -ENOMEM; + } + } + memcpy(mem, test_string, min(sizeof(test_string), (size_t) size)); + addr = virt_to_phys(mem); + return 0; +} + +module_init(startup_module); +module_exit(shutdown_module); + Index: linux-rt-rebase.q/init/main.c =================================================================== --- linux-rt-rebase.q.orig/init/main.c +++ linux-rt-rebase.q/init/main.c @@ -105,6 +105,12 @@ static inline void acpi_early_init(void) #ifndef CONFIG_DEBUG_RODATA static inline void mark_rodata_ro(void) { } #endif +#ifdef CONFIG_ALLOC_RTSJ_MEM +extern void alloc_rtsj_mem_early_setup(void); +#else +static inline void alloc_rtsj_mem_early_setup(void) { } +#endif + #ifdef CONFIG_TC extern void tc_init(void); @@ -619,6 +625,7 @@ asmlinkage void __init start_kernel(void #endif vfs_caches_init_early(); cpuset_init_early(); + alloc_rtsj_mem_early_setup(); mem_init(); kmem_cache_init(); setup_per_cpu_pageset(); patches/preempt-realtime-compile-fixes.patch0000664000077200007720000000114110655544575020575 0ustar mingomingo--- drivers/block/paride/pseudo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/drivers/block/paride/pseudo.h =================================================================== --- linux-rt-rebase.q.orig/drivers/block/paride/pseudo.h +++ linux-rt-rebase.q/drivers/block/paride/pseudo.h @@ -43,7 +43,7 @@ static unsigned long ps_timeout; static int ps_tq_active = 0; static int ps_nice = 0; -static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); +static __attribute__((unused)) DEFINE_SPINLOCK(ps_spinlock); static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int); patches/preempt-realtime-arm-integrator.patch0000664000077200007720000000211710655544574020767 0ustar mingomingo--- arch/arm/mach-integrator/core.c | 2 +- arch/arm/mach-integrator/pci_v3.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/arch/arm/mach-integrator/core.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mach-integrator/core.c +++ linux-rt-rebase.q/arch/arm/mach-integrator/core.c @@ -164,7 +164,7 @@ static struct amba_pl010_data integrator #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_BASE) + INTEGRATOR_HDR_CTRL_OFFSET -static DEFINE_SPINLOCK(cm_lock); +static DEFINE_RAW_SPINLOCK(cm_lock); /** * cm_control - update the CM_CTRL register. Index: linux-rt-rebase.q/arch/arm/mach-integrator/pci_v3.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mach-integrator/pci_v3.c +++ linux-rt-rebase.q/arch/arm/mach-integrator/pci_v3.c @@ -162,7 +162,7 @@ * 7:2 register number * */ -static DEFINE_SPINLOCK(v3_lock); +static DEFINE_RAW_SPINLOCK(v3_lock); #define PCI_BUS_NONMEM_START 0x00000000 #define PCI_BUS_NONMEM_SIZE SZ_256M patches/preempt-realtime-arm.patch0000664000077200007720000001527310655544574016622 0ustar mingomingo--- arch/arm/kernel/dma.c | 2 +- arch/arm/kernel/irq.c | 2 +- arch/arm/kernel/signal.c | 8 ++++++++ arch/arm/kernel/smp.c | 2 +- arch/arm/kernel/traps.c | 4 ++-- arch/arm/mm/consistent.c | 2 +- arch/arm/mm/copypage-v4mc.c | 2 +- arch/arm/mm/copypage-v6.c | 2 +- arch/arm/mm/copypage-xscale.c | 2 +- arch/arm/mm/mmu.c | 2 +- include/asm-arm/dma.h | 2 +- include/asm-arm/tlb.h | 9 ++++++--- 12 files changed, 25 insertions(+), 14 deletions(-) Index: linux-rt-rebase.q/arch/arm/kernel/dma.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/dma.c +++ linux-rt-rebase.q/arch/arm/kernel/dma.c @@ -20,7 +20,7 @@ #include -DEFINE_SPINLOCK(dma_spin_lock); +DEFINE_RAW_SPINLOCK(dma_spin_lock); EXPORT_SYMBOL(dma_spin_lock); static dma_t dma_chan[MAX_DMA_CHANNELS]; Index: linux-rt-rebase.q/arch/arm/kernel/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/irq.c +++ linux-rt-rebase.q/arch/arm/kernel/irq.c @@ -100,7 +100,7 @@ unlock: /* Handle bad interrupts */ static struct irq_desc bad_irq_desc = { .handle_irq = handle_bad_irq, - .lock = SPIN_LOCK_UNLOCKED + .lock = RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock) }; /* Index: linux-rt-rebase.q/arch/arm/kernel/signal.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/signal.c +++ linux-rt-rebase.q/arch/arm/kernel/signal.c @@ -623,6 +623,14 @@ static int do_signal(sigset_t *oldset, s siginfo_t info; int signr; +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif + /* * We want the common case to go fast, which * is why we may in certain cases get here from Index: linux-rt-rebase.q/arch/arm/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/smp.c +++ linux-rt-rebase.q/arch/arm/kernel/smp.c @@ -522,7 +522,7 @@ static void ipi_call_function(unsigned i cpu_clear(cpu, data->unfinished); } -static DEFINE_SPINLOCK(stop_lock); +static DEFINE_RAW_SPINLOCK(stop_lock); /* * ipi_cpu_stop - handle IPI from smp_send_stop() Index: linux-rt-rebase.q/arch/arm/kernel/traps.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/traps.c +++ linux-rt-rebase.q/arch/arm/kernel/traps.c @@ -233,7 +233,7 @@ static void __die(const char *str, int e } } -DEFINE_SPINLOCK(die_lock); +DEFINE_RAW_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. @@ -276,7 +276,7 @@ void arm_notify_die(const char *str, str } static LIST_HEAD(undef_hook); -static DEFINE_SPINLOCK(undef_lock); +static DEFINE_RAW_SPINLOCK(undef_lock); void register_undef_hook(struct undef_hook *hook) { Index: linux-rt-rebase.q/arch/arm/mm/consistent.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mm/consistent.c +++ linux-rt-rebase.q/arch/arm/mm/consistent.c @@ -40,7 +40,7 @@ * These are the page tables (2MB each) covering uncached, DMA consistent allocations */ static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; -static DEFINE_SPINLOCK(consistent_lock); +static DEFINE_RAW_SPINLOCK(consistent_lock); /* * VM region handling support. Index: linux-rt-rebase.q/arch/arm/mm/copypage-v4mc.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mm/copypage-v4mc.c +++ linux-rt-rebase.q/arch/arm/mm/copypage-v4mc.c @@ -30,7 +30,7 @@ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_CACHEABLE) -static DEFINE_SPINLOCK(minicache_lock); +static DEFINE_RAW_SPINLOCK(minicache_lock); /* * ARMv4 mini-dcache optimised copy_user_page Index: linux-rt-rebase.q/arch/arm/mm/copypage-v6.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mm/copypage-v6.c +++ linux-rt-rebase.q/arch/arm/mm/copypage-v6.c @@ -26,7 +26,7 @@ #define from_address (0xffff8000) #define to_address (0xffffc000) -static DEFINE_SPINLOCK(v6_lock); +static DEFINE_RAW_SPINLOCK(v6_lock); /* * Copy the user page. No aliasing to deal with so we can just Index: linux-rt-rebase.q/arch/arm/mm/copypage-xscale.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mm/copypage-xscale.c +++ linux-rt-rebase.q/arch/arm/mm/copypage-xscale.c @@ -32,7 +32,7 @@ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_CACHEABLE) -static DEFINE_SPINLOCK(minicache_lock); +static DEFINE_RAW_SPINLOCK(minicache_lock); /* * XScale mini-dcache optimised copy_user_page Index: linux-rt-rebase.q/arch/arm/mm/mmu.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mm/mmu.c +++ linux-rt-rebase.q/arch/arm/mm/mmu.c @@ -25,7 +25,7 @@ #include "mm.h" -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +DEFINE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); extern void _stext, _etext, __data_start, _end; extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; Index: linux-rt-rebase.q/include/asm-arm/dma.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/dma.h +++ linux-rt-rebase.q/include/asm-arm/dma.h @@ -27,7 +27,7 @@ typedef unsigned int dmamode_t; #define DMA_MODE_CASCADE 2 #define DMA_AUTOINIT 4 -extern spinlock_t dma_spin_lock; +extern raw_spinlock_t dma_spin_lock; static inline unsigned long claim_dma_lock(void) { Index: linux-rt-rebase.q/include/asm-arm/tlb.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-arm/tlb.h +++ linux-rt-rebase.q/include/asm-arm/tlb.h @@ -36,15 +36,18 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; + int cpu; }; -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); +DECLARE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); static inline struct mmu_gather * tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) { - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); + int cpu; + struct mmu_gather *tlb = &get_cpu_var_locked(mmu_gathers, &cpu); + tlb->cpu = cpu; tlb->mm = mm; tlb->fullmm = full_mm_flush; @@ -60,7 +63,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, u /* keep the page table cache within bounds */ check_pgt_cache(); - put_cpu_var(mmu_gathers); + put_cpu_var_locked(mmu_gathers, tlb->cpu); } #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) patches/ich-force-hpet-ich5-quirk-to-force-detect-enable.patch0000664000077200007720000001230610655544570023541 0ustar mingomingoFrom: Venki Pallipadi force_enable hpet for ICH5. Signed-off-by: Venkatesh Pallipadi Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andi Kleen Cc: john stultz Cc: Greg KH Signed-off-by: Andrew Morton --- arch/i386/kernel/hpet.c | 2 arch/i386/kernel/quirks.c | 101 +++++++++++++++++++++++++++++++++++++++++++++- include/asm-i386/hpet.h | 2 include/linux/pci_ids.h | 1 4 files changed, 103 insertions(+), 3 deletions(-) Index: linux/arch/i386/kernel/hpet.c =================================================================== --- linux.orig/arch/i386/kernel/hpet.c +++ linux/arch/i386/kernel/hpet.c @@ -181,7 +181,7 @@ static void hpet_start_counter(void) static void hpet_resume_device(void) { - ich_force_hpet_resume(); + force_hpet_resume(); } static void hpet_restart_counter(void) Index: linux/arch/i386/kernel/quirks.c =================================================================== --- linux.orig/arch/i386/kernel/quirks.c +++ linux/arch/i386/kernel/quirks.c @@ -53,9 +53,15 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IN #if defined(CONFIG_HPET_TIMER) unsigned long force_hpet_address; +static enum { + NONE_FORCE_HPET_RESUME, + OLD_ICH_FORCE_HPET_RESUME, + ICH_FORCE_HPET_RESUME +} force_hpet_resume_type; + static void __iomem *rcba_base; -void ich_force_hpet_resume(void) +static void ich_force_hpet_resume(void) { u32 val; @@ -133,6 +139,7 @@ static void ich_force_enable_hpet(struct iounmap(rcba_base); printk(KERN_DEBUG "Failed to force enable HPET\n"); } else { + force_hpet_resume_type = ICH_FORCE_HPET_RESUME; printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", force_hpet_address); } @@ -148,4 +155,96 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, ich_force_enable_hpet); + + +static struct pci_dev *cached_dev; + +static void old_ich_force_hpet_resume(void) +{ + u32 val, gen_cntl; + + if (!force_hpet_address || !cached_dev) + return; + + pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); + gen_cntl &= (~(0x7 << 15)); + gen_cntl |= (0x4 << 15); + + pci_write_config_dword(cached_dev, 0xD0, gen_cntl); + pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); + val = gen_cntl >> 15; + val &= 0x7; + if (val == 0x4) + printk(KERN_DEBUG "Force enabled HPET at resume\n"); + else + BUG(); +} + +static void old_ich_force_enable_hpet(struct pci_dev *dev) +{ + u32 val, gen_cntl; + + if (hpet_address || force_hpet_address) + return; + + pci_read_config_dword(dev, 0xD0, &gen_cntl); + /* + * Bit 17 is HPET enable bit. + * Bit 16:15 control the HPET base address. + */ + val = gen_cntl >> 15; + val &= 0x7; + if (val & 0x4) { + val &= 0x3; + force_hpet_address = 0xFED00000 | (val << 12); + printk(KERN_DEBUG "HPET at base address 0x%lx\n", + force_hpet_address); + cached_dev = dev; + return; + } + + /* + * HPET is disabled. Trying enabling at FED00000 and check + * whether it sticks + */ + gen_cntl &= (~(0x7 << 15)); + gen_cntl |= (0x4 << 15); + pci_write_config_dword(dev, 0xD0, gen_cntl); + + pci_read_config_dword(dev, 0xD0, &gen_cntl); + + val = gen_cntl >> 15; + val &= 0x7; + if (val & 0x4) { + /* HPET is enabled in HPTC. Just not reported by BIOS */ + val &= 0x3; + force_hpet_address = 0xFED00000 | (val << 12); + printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", + force_hpet_address); + force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; + return; + } + + printk(KERN_DEBUG "Failed to force enable HPET\n"); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, + old_ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, + old_ich_force_enable_hpet); + +void force_hpet_resume(void) +{ + switch (force_hpet_resume_type) { + case ICH_FORCE_HPET_RESUME: + return ich_force_hpet_resume(); + + case OLD_ICH_FORCE_HPET_RESUME: + return old_ich_force_hpet_resume(); + + default: + break; + } +} + #endif Index: linux/include/asm-i386/hpet.h =================================================================== --- linux.orig/include/asm-i386/hpet.h +++ linux/include/asm-i386/hpet.h @@ -68,7 +68,7 @@ extern unsigned long force_hpet_address; extern int is_hpet_enabled(void); extern int hpet_enable(void); extern unsigned long hpet_readl(unsigned long a); -extern void ich_force_hpet_resume(void); +extern void force_hpet_resume(void); #ifdef CONFIG_HPET_EMULATE_RTC Index: linux/include/linux/pci_ids.h =================================================================== --- linux.orig/include/linux/pci_ids.h +++ linux/include/linux/pci_ids.h @@ -2215,6 +2215,7 @@ #define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5 #define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6 #define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db +#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc #define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd #define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1 #define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2 patches/preempt-realtime-powerpc-update.patch0000664000077200007720000000420110655544574020767 0ustar mingomingo--- arch/powerpc/Kconfig.debug | 4 ++++ arch/powerpc/kernel/idle.c | 2 +- include/asm-powerpc/hw_irq.h | 2 +- include/asm-powerpc/pmac_feature.h | 2 +- 4 files changed, 7 insertions(+), 3 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/Kconfig.debug =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/Kconfig.debug +++ linux-rt-rebase.q/arch/powerpc/Kconfig.debug @@ -2,6 +2,10 @@ menu "Kernel hacking" source "lib/Kconfig.debug" +config TRACE_IRQFLAGS_SUPPORT + bool + default y + config DEBUG_STACKOVERFLOW bool "Check for stack overflows" depends on DEBUG_KERNEL Index: linux-rt-rebase.q/arch/powerpc/kernel/idle.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/idle.c +++ linux-rt-rebase.q/arch/powerpc/kernel/idle.c @@ -100,7 +100,7 @@ void cpu_idle(void) tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); schedule(); preempt_disable(); } Index: linux-rt-rebase.q/include/asm-powerpc/hw_irq.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/hw_irq.h +++ linux-rt-rebase.q/include/asm-powerpc/hw_irq.h @@ -120,7 +120,7 @@ static inline void raw_local_irq_save_pt #define hard_irq_enable() local_irq_enable() #define hard_irq_disable() local_irq_disable() -#include +#include #endif /* CONFIG_PPC64 */ Index: linux-rt-rebase.q/include/asm-powerpc/pmac_feature.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/pmac_feature.h +++ linux-rt-rebase.q/include/asm-powerpc/pmac_feature.h @@ -378,7 +378,7 @@ extern struct macio_chip* macio_find(str * Those are exported by pmac feature for internal use by arch code * only like the platform function callbacks, do not use directly in drivers */ -extern spinlock_t feature_lock; +extern raw_spinlock_t feature_lock; extern struct device_node *uninorth_node; extern u32 __iomem *uninorth_base; patches/preempt-realtime-cfs-accounting-fix.patch0000664000077200007720000000217210655544575021525 0ustar mingomingoSubject: [Patch RT] Fix CFS load balancing for RT tasks From: Sébastien Dugué The RT overload mechanism of the O(1) scheduler has not been activated in the new CFS. This patch fixes that by inserting calls to inc_rt_tasks() and dec_rt_tasks() in enqueue_task_rt() and dequeue_task_rt() respectively, which enables the balance_rt_tasks() to be run in the rt_overload case. Signed-off-by: Sébastien Dugué --- kernel/sched_rt.c | 4 ++++ 1 file changed, 4 insertions(+) Index: linux-rt-rebase.q/kernel/sched_rt.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched_rt.c +++ linux-rt-rebase.q/kernel/sched_rt.c @@ -32,6 +32,8 @@ enqueue_task_rt(struct rq *rq, struct ta list_add_tail(&p->run_list, array->queue + p->prio); __set_bit(p->prio, array->bitmap); + + inc_rt_tasks(p, rq); } /* @@ -44,6 +46,8 @@ dequeue_task_rt(struct rq *rq, struct ta update_curr_rt(rq, now); + dec_rt_tasks(p, rq); + list_del(&p->run_list); if (list_empty(array->queue + p->prio)) __clear_bit(p->prio, array->bitmap); patches/rt-page_alloc.patch0000664000077200007720000001465510655544573015304 0ustar mingomingoSubject: rt-friendly per-cpu pages From: Ingo Molnar rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. Signed-off-by: Ingo Molnar --- mm/page_alloc.c | 111 +++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 79 insertions(+), 32 deletions(-) Index: linux-rt-rebase.q/mm/page_alloc.c =================================================================== --- linux-rt-rebase.q.orig/mm/page_alloc.c +++ linux-rt-rebase.q/mm/page_alloc.c @@ -145,6 +145,53 @@ static unsigned long __meminitdata dma_r EXPORT_SYMBOL(movable_zone); #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ +#ifdef CONFIG_PREEMPT_RT +static DEFINE_PER_CPU_LOCKED(int, pcp_locks); +#endif + +static inline void __lock_cpu_pcp(unsigned long *flags, int cpu) +{ +#ifdef CONFIG_PREEMPT_RT + spin_lock(&__get_cpu_lock(pcp_locks, cpu)); + flags = 0; +#else + local_irq_save(*flags); +#endif +} + +static inline void lock_cpu_pcp(unsigned long *flags, int *this_cpu) +{ +#ifdef CONFIG_PREEMPT_RT + (void)get_cpu_var_locked(pcp_locks, this_cpu); + flags = 0; +#else + local_irq_save(*flags); + *this_cpu = smp_processor_id(); +#endif +} + +static inline void unlock_cpu_pcp(unsigned long flags, int this_cpu) +{ +#ifdef CONFIG_PREEMPT_RT + put_cpu_var_locked(pcp_locks, this_cpu); +#else + local_irq_restore(flags); +#endif +} + +static struct per_cpu_pageset * +get_zone_pcp(struct zone *zone, unsigned long *flags, int *this_cpu) +{ + lock_cpu_pcp(flags, this_cpu); + return zone_pcp(zone, *this_cpu); +} + +static void +put_zone_pcp(struct zone *zone, unsigned long flags, int this_cpu) +{ + unlock_cpu_pcp(flags, this_cpu); +} + #if MAX_NUMNODES > 1 int nr_node_ids __read_mostly = MAX_NUMNODES; EXPORT_SYMBOL(nr_node_ids); @@ -399,8 +446,8 @@ static inline int page_is_buddy(struct p * -- wli */ -static inline void __free_one_page(struct page *page, - struct zone *zone, unsigned int order) +static inline void +__free_one_page(struct page *page, struct zone *zone, unsigned int order) { unsigned long page_idx; int order_size = 1 << order; @@ -504,8 +551,9 @@ static void free_one_page(struct zone *z static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; - int i; int reserved = 0; + int this_cpu; + int i; for (i = 0 ; i < (1 << order) ; ++i) reserved += free_pages_check(page + i); @@ -517,10 +565,10 @@ static void __free_pages_ok(struct page arch_free_page(page, order); kernel_map_pages(page, 1 << order, 0); - local_irq_save(flags); - __count_vm_events(PGFREE, 1 << order); + lock_cpu_pcp(&flags, &this_cpu); + count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, order); - local_irq_restore(flags); + unlock_cpu_pcp(flags, this_cpu); } /* @@ -687,23 +735,19 @@ static int rmqueue_bulk(struct zone *zon */ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { - unsigned long flags; int to_drain; - local_irq_save(flags); if (pcp->count >= pcp->batch) to_drain = pcp->batch; else to_drain = pcp->count; free_pages_bulk(zone, to_drain, &pcp->list, 0); pcp->count -= to_drain; - local_irq_restore(flags); } #endif static void __drain_pages(unsigned int cpu) { - unsigned long flags; struct zone *zone; int i; @@ -714,14 +758,16 @@ static void __drain_pages(unsigned int c continue; pset = zone_pcp(zone, cpu); + if (!pset) { + WARN_ON(1); + continue; + } for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { struct per_cpu_pages *pcp; pcp = &pset->pcp[i]; - local_irq_save(flags); free_pages_bulk(zone, pcp->count, &pcp->list, 0); pcp->count = 0; - local_irq_restore(flags); } } } @@ -767,10 +813,11 @@ void mark_free_pages(struct zone *zone) void drain_local_pages(void) { unsigned long flags; + int this_cpu; - local_irq_save(flags); - __drain_pages(smp_processor_id()); - local_irq_restore(flags); + lock_cpu_pcp(&flags, &this_cpu); + __drain_pages(this_cpu); + unlock_cpu_pcp(flags, this_cpu); } #endif /* CONFIG_HIBERNATION */ @@ -780,8 +827,10 @@ void drain_local_pages(void) static void fastcall free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); + struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; unsigned long flags; + int this_cpu; if (PageAnon(page)) page->mapping = NULL; @@ -793,24 +842,25 @@ static void fastcall free_hot_cold_page( arch_free_page(page, 0); kernel_map_pages(page, 1, 0); - pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; - local_irq_save(flags); - __count_vm_event(PGFREE); + pset = get_zone_pcp(zone, &flags, &this_cpu); + pcp = &pset->pcp[cold]; + + count_vm_event(PGFREE); + list_add(&page->lru, &pcp->list); pcp->count++; if (pcp->count >= pcp->high) { free_pages_bulk(zone, pcp->batch, &pcp->list, 0); pcp->count -= pcp->batch; } - local_irq_restore(flags); - put_cpu(); + put_zone_pcp(zone, flags, this_cpu); } void fastcall free_hot_page(struct page *page) { free_hot_cold_page(page, 0); } - + void fastcall free_cold_page(struct page *page) { free_hot_cold_page(page, 1); @@ -842,18 +892,17 @@ void split_page(struct page *page, unsig static struct page *buffered_rmqueue(struct zonelist *zonelist, struct zone *zone, int order, gfp_t gfp_flags) { + int cold = !!(gfp_flags & __GFP_COLD); + struct per_cpu_pageset *pset; unsigned long flags; struct page *page; - int cold = !!(gfp_flags & __GFP_COLD); - int cpu; + int this_cpu; again: - cpu = get_cpu(); + pset = get_zone_pcp(zone, &flags, &this_cpu); if (likely(order == 0)) { - struct per_cpu_pages *pcp; + struct per_cpu_pages *pcp = &pset->pcp[cold]; - pcp = &zone_pcp(zone, cpu)->pcp[cold]; - local_irq_save(flags); if (!pcp->count) { pcp->count = rmqueue_bulk(zone, 0, pcp->batch, &pcp->list); @@ -864,7 +913,7 @@ again: list_del(&page->lru); pcp->count--; } else { - spin_lock_irqsave(&zone->lock, flags); + spin_lock(&zone->lock); page = __rmqueue(zone, order); spin_unlock(&zone->lock); if (!page) @@ -873,8 +922,7 @@ again: __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(zonelist, zone); - local_irq_restore(flags); - put_cpu(); + put_zone_pcp(zone, flags, this_cpu); VM_BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) @@ -882,8 +930,7 @@ again: return page; failed: - local_irq_restore(flags); - put_cpu(); + put_zone_pcp(zone, flags, this_cpu); return NULL; } patches/lockdep_lock_set_subclass_fix.patch0000664000077200007720000000101410655544576020630 0ustar mingomingo Signed-off-by: Peter Zijlstra --- kernel/lockdep.c | 3 +++ 1 file changed, 3 insertions(+) Index: linux-rt-rebase.q/kernel/lockdep.c =================================================================== --- linux-rt-rebase.q.orig/kernel/lockdep.c +++ linux-rt-rebase.q/kernel/lockdep.c @@ -2750,6 +2750,9 @@ lock_set_subclass(struct lockdep_map *lo { unsigned long flags; + if (unlikely(!lock_stat && !prove_locking)) + return; + if (unlikely(current->lockdep_recursion)) return; patches/softlockup-better-printout.patch0000664000077200007720000000757310655544576020135 0ustar mingomingoSubject: softlockup: improve debug output From: Ingo Molnar improve the debuggability of kernel lockups by enhancing the debug output of the softlockup detector: print the task that causes the lockup and try to print a more intelligent backtrace. the old format was: BUG: soft lockup detected on CPU#1! [] show_trace_log_lvl+0x19/0x2e [] show_trace+0x12/0x14 [] dump_stack+0x14/0x16 [] softlockup_tick+0xbe/0xd0 [] run_local_timers+0x12/0x14 [] update_process_times+0x3e/0x63 [] tick_sched_timer+0x7c/0xc0 [] hrtimer_interrupt+0x135/0x1ba [] smp_apic_timer_interrupt+0x6e/0x80 [] apic_timer_interrupt+0x33/0x38 [] syscall_call+0x7/0xb ======================= the new format is: BUG: soft lockup detected on CPU#1! [prctl:2363] Pid: 2363, comm: prctl EIP: 0060:[] CPU: 1 EIP is at sys_prctl+0x24/0x18c EFLAGS: 00000213 Not tainted (2.6.22-cfs-v20 #26) EAX: 00000001 EBX: 000003e7 ECX: 00000001 EDX: f6df0000 ESI: 000003e7 EDI: 000003e7 EBP: f6df0fb0 DS: 007b ES: 007b FS: 00d8 CR0: 8005003b CR2: 4d8c3340 CR3: 3731d000 CR4: 000006d0 [] show_trace_log_lvl+0x19/0x2e [] show_trace+0x12/0x14 [] show_regs+0x1ab/0x1b3 [] softlockup_tick+0xef/0x108 [] run_local_timers+0x12/0x14 [] update_process_times+0x3e/0x63 [] tick_sched_timer+0x7c/0xc0 [] hrtimer_interrupt+0x135/0x1ba [] smp_apic_timer_interrupt+0x6e/0x80 [] apic_timer_interrupt+0x33/0x38 [] syscall_call+0x7/0xb ======================= note that in the old format we only knew that some system call locked up, we didnt know _which_. With the new format we know that it's at a specific place in sys_prctl(). [which was where i created an artificial kernel lockup to test the new format.] this is also useful if the lockup happens in user-space - the user-space EIP (and other registers) will be printed too. (such a lockup would either suggest that the task was running at SCHED_FIFO:99 and looping for more than 10 seconds, or that the softlockup detector has a false-positive.) the task name is printed too first, just in case we dont manage to print a useful backtrace. Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) Index: linux-rt-rebase.q/kernel/softlockup.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softlockup.c +++ linux-rt-rebase.q/kernel/softlockup.c @@ -15,6 +15,8 @@ #include #include +#include + static DEFINE_RAW_SPINLOCK(print_lock); static DEFINE_PER_CPU(unsigned long, touch_timestamp); @@ -70,6 +72,7 @@ void softlockup_tick(void) int this_cpu = smp_processor_id(); unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); unsigned long print_timestamp; + struct pt_regs *regs; unsigned long now; if (touch_timestamp == 0) { @@ -99,15 +102,21 @@ void softlockup_tick(void) wake_up_process(per_cpu(watchdog_task, this_cpu)); /* Warn about unreasonable 10+ seconds delays: */ - if (now > (touch_timestamp + 10)) { - per_cpu(print_timestamp, this_cpu) = touch_timestamp; + if (now <= (touch_timestamp + 10)) + return; + + regs = get_irq_regs(); - spin_lock(&print_lock); - printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n", - this_cpu); + per_cpu(print_timestamp, this_cpu) = touch_timestamp; + + spin_lock(&print_lock); + printk(KERN_ERR "BUG: soft lockup detected on CPU#%d! [%s:%d]\n", + this_cpu, current->comm, current->pid); + if (regs) + show_regs(regs); + else dump_stack(); - spin_unlock(&print_lock); - } + spin_unlock(&print_lock); } /* patches/fix-emac-locking-2.6.16.patch0000664000077200007720000000556310655544574016437 0ustar mingomingo drivers/net/ibm_emac/ibm_emac_core.c | 11 +++++++++++ drivers/net/ibm_emac/ibm_emac_core.h | 2 ++ 2 files changed, 13 insertions(+) Index: linux-rt-rebase.q/drivers/net/ibm_emac/ibm_emac_core.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/ibm_emac/ibm_emac_core.c +++ linux-rt-rebase.q/drivers/net/ibm_emac/ibm_emac_core.c @@ -1059,6 +1059,8 @@ static inline int emac_xmit_finish(struc ++dev->stats.tx_packets; dev->stats.tx_bytes += len; + spin_unlock(&dev->tx_lock); + return 0; } @@ -1072,6 +1074,7 @@ static int emac_start_xmit(struct sk_buf u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb); + spin_lock(&dev->tx_lock); slot = dev->tx_slot++; if (dev->tx_slot == NUM_TX_BUFF) { dev->tx_slot = 0; @@ -1134,6 +1137,8 @@ static int emac_start_xmit_sg(struct sk_ if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE)) return emac_start_xmit(skb, ndev); + spin_lock(&dev->tx_lock); + len -= skb->data_len; /* Note, this is only an *estimation*, we can still run out of empty @@ -1202,6 +1207,7 @@ static int emac_start_xmit_sg(struct sk_ stop_queue: netif_stop_queue(ndev); DBG2("%d: stopped TX queue" NL, dev->def->index); + spin_unlock(&dev->tx_lock); return 1; } #else @@ -1241,6 +1247,7 @@ static void emac_poll_tx(void *param) DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt, dev->ack_slot); + spin_lock(&dev->tx_lock); if (dev->tx_cnt) { u16 ctrl; int slot = dev->ack_slot, n = 0; @@ -1250,6 +1257,7 @@ static void emac_poll_tx(void *param) struct sk_buff *skb = dev->tx_skb[slot]; ++n; + spin_unlock(&dev->tx_lock); if (skb) { dev_kfree_skb(skb); dev->tx_skb[slot] = NULL; @@ -1259,6 +1267,7 @@ static void emac_poll_tx(void *param) if (unlikely(EMAC_IS_BAD_TX(ctrl))) emac_parse_tx_error(dev, ctrl); + spin_lock(&dev->tx_lock); if (--dev->tx_cnt) goto again; } @@ -1271,6 +1280,7 @@ static void emac_poll_tx(void *param) DBG2("%d: tx %d pkts" NL, dev->def->index, n); } } + spin_unlock(&dev->tx_lock); } static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot, @@ -1963,6 +1973,7 @@ static int __init emac_probe(struct ocp_ dev->ldev = &ocpdev->dev; dev->def = ocpdev->def; SET_MODULE_OWNER(ndev); + spin_lock_init(&dev->tx_lock); /* Find MAL device we are connected to */ maldev = Index: linux-rt-rebase.q/drivers/net/ibm_emac/ibm_emac_core.h =================================================================== --- linux-rt-rebase.q.orig/drivers/net/ibm_emac/ibm_emac_core.h +++ linux-rt-rebase.q/drivers/net/ibm_emac/ibm_emac_core.h @@ -193,6 +193,8 @@ struct ocp_enet_private { struct ibm_emac_error_stats estats; struct net_device_stats nstats; + spinlock_t tx_lock; + struct device* ldev; }; patches/softirq-per-cpu-assumptions-fixes.patch0000664000077200007720000001303410655544576021323 0ustar mingomingo--- kernel/hrtimer.c | 38 +++++++++++++++++++++----------------- kernel/sched.c | 2 +- kernel/softirq.c | 5 +++-- kernel/timer.c | 2 +- 4 files changed, 26 insertions(+), 21 deletions(-) Index: linux-rt-rebase.q/kernel/hrtimer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/hrtimer.c +++ linux-rt-rebase.q/kernel/hrtimer.c @@ -336,9 +336,9 @@ static inline int hrtimer_is_hres_enable /* * Is the high resolution mode active ? */ -static inline int hrtimer_hres_active(void) +static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) { - return __get_cpu_var(hrtimer_bases).hres_active; + return cpu_base->hres_active; } /* @@ -415,11 +415,12 @@ static int hrtimer_reprogram(struct hrti */ static void retrigger_next_event(void *arg) { - struct hrtimer_cpu_base *base; + struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); + struct timespec realtime_offset; unsigned long seq; - if (!hrtimer_hres_active()) + if (!hrtimer_hres_active(base)) return; do { @@ -429,8 +430,6 @@ static void retrigger_next_event(void *a -wall_to_monotonic.tv_nsec); } while (read_seqretry(&xtime_lock, seq)); - base = &__get_cpu_var(hrtimer_bases); - /* Adjust CLOCK_REALTIME offset */ spin_lock(&base->lock); base->clock_base[CLOCK_REALTIME].offset = @@ -552,10 +551,8 @@ static inline int hrtimer_enqueue_reprog /* * Switch to high resolution mode */ -static int hrtimer_switch_to_hres(void) +static int hrtimer_switch_to_hres(struct hrtimer_cpu_base *base) { - int cpu = smp_processor_id(); - struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); unsigned long flags; if (base->hres_active) @@ -566,7 +563,7 @@ static int hrtimer_switch_to_hres(void) if (tick_init_highres()) { local_irq_restore(flags); printk(KERN_WARNING "Could not switch to high resolution " - "mode on CPU %d\n", cpu); + "mode on CPU %d\n", raw_smp_processor_id()); return 0; } base->hres_active = 1; @@ -584,9 +581,15 @@ static int hrtimer_switch_to_hres(void) #else -static inline int hrtimer_hres_active(void) { return 0; } +static inline int hrtimer_hres_active(struct hrtimer_cpu_base *base) +{ + return 0; +} static inline int hrtimer_is_hres_enabled(void) { return 0; } -static inline int hrtimer_switch_to_hres(void) { return 0; } +static inline int hrtimer_switch_to_hres(struct hrtimer_cpu_base *base) +{ + return 0; +} static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, struct hrtimer_clock_base *base) @@ -778,7 +781,7 @@ static void __remove_hrtimer(struct hrti if (base->first == &timer->node) { base->first = rb_next(&timer->node); /* Reprogram the clock event device. if enabled */ - if (reprogram && hrtimer_hres_active()) + if (reprogram && hrtimer_hres_active(base->cpu_base)) hrtimer_force_reprogram(base->cpu_base); } rb_erase(&timer->node, &base->active); @@ -950,7 +953,7 @@ ktime_t hrtimer_get_next_event(void) spin_lock_irqsave(&cpu_base->lock, flags); - if (!hrtimer_hres_active()) { + if (!hrtimer_hres_active(cpu_base)) { for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { struct hrtimer *timer; @@ -1247,10 +1250,11 @@ static inline void run_hrtimer_queue(str */ void hrtimer_run_queues(void) { - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + struct hrtimer_cpu_base *cpu_base; int i; - if (hrtimer_hres_active()) + cpu_base = &per_cpu(hrtimer_bases, raw_smp_processor_id()); + if (hrtimer_hres_active(cpu_base)) return; /* @@ -1262,7 +1266,7 @@ void hrtimer_run_queues(void) * deadlock vs. xtime_lock. */ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) - if (hrtimer_switch_to_hres()) + if (hrtimer_switch_to_hres(cpu_base)) return; hrtimer_get_softirq_time(cpu_base); Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -3504,7 +3504,7 @@ out: */ static void run_rebalance_domains(struct softirq_action *h) { - int this_cpu = smp_processor_id(); + int this_cpu = raw_smp_processor_id(); struct rq *this_rq = cpu_rq(this_cpu); enum cpu_idle_type idle = this_rq->idle_at_tick ? CPU_IDLE : CPU_NOT_IDLE; Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -411,12 +411,12 @@ void do_softirq_from_hardirq(void) { unsigned long p_flags; - if (!local_softirq_pending()) - return; /* * 'immediate' softirq execution, from hardirq context: */ local_irq_disable(); + if (!local_softirq_pending()) + goto out; __local_bh_disable((unsigned long)__builtin_return_address(0)); #ifndef CONFIG_PREEMPT_SOFTIRQS trace_softirq_enter(); @@ -436,6 +436,7 @@ void do_softirq_from_hardirq(void) current->flags &= ~PF_SOFTIRQ; _local_bh_enable(); +out: local_irq_enable(); } Index: linux-rt-rebase.q/kernel/timer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/timer.c +++ linux-rt-rebase.q/kernel/timer.c @@ -1013,7 +1013,7 @@ static inline void update_times(void) */ static void run_timer_softirq(struct softirq_action *h) { - tvec_base_t *base = __get_cpu_var(tvec_bases); + tvec_base_t *base = per_cpu(tvec_bases, raw_smp_processor_id()); update_times(); hrtimer_run_queues(); patches/latency-tracer-disable-across-trace-cmdline.patch0000664000077200007720000000422610655544571023075 0ustar mingomingoFrom jan.altenberg@linutronix.de Tue Jun 19 16:07:25 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from [192.168.0.182] (unknown [91.89.185.36]) (using SSLv3 with cipher RC4-MD5 (128/128 bits)) (No client certificate requested) by mail.tglx.de (Postfix) with ESMTP id C80AC65C065; Tue, 19 Jun 2007 16:07:25 +0200 (CEST) Subject: freeze with mcount_enabled=1 From: Jan Altenberg To: Ingo Molnar Cc: tglx@linutronix.de Content-Type: text/plain Date: Tue, 19 Jun 2007 16:07:25 +0200 Message-Id: <1182262045.3793.111.camel@bender> Mime-Version: 1.0 X-Mailer: Evolution 2.8.3 (2.8.3-2.fc6) X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Hi Ingo, Hi Thomas, I've seen reproducable freezes on ARM for user triggered latency traces with mcount_enabled = 1: echo 1 > /proc/sys/kernel/mcount_enabled cyclictest -p80 -n -b 300 ends up in a frozen system. I added some instrumentation to the latency tracer code and it looks like the freeze happens when _trace_cmdline() is called from user_trace_start(). _trace_cmdline() calls ____trace() and after that we freeze... With the following Patch, the freezes seem to disappear (tested on a SAM926x and an EP93). I think, this isn't the correct solution, but it might give you an idea, what goes wrong. Let me know, if you have something new for testing. Regards, Jan --- --- kernel/latency_trace.c | 2 ++ 1 file changed, 2 insertions(+) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -2365,10 +2365,12 @@ long user_trace_start(void) #endif reset_trace_idx(cpu, tr); + atomic_inc(&tr->disabled); tr->critical_sequence = max_sequence; tr->preempt_timestamp = get_monotonic_cycles(); tr->critical_start = CALLER_ADDR0; _trace_cmdline(cpu, tr); + atomic_dec(&tr->disabled); mcount(); WARN_ON(!irqs_disabled()); patches/tasklet-redesign.patch0000664000077200007720000002113010655544574016023 0ustar mingomingoFrom: Ingo Molnar tasklet redesign: make it saner and make it easier to thread. Signed-off-by: Ingo Molnar ---- include/linux/interrupt.h | 39 ++++++----- kernel/softirq.c | 155 +++++++++++++++++++++++++++++++--------------- 2 files changed, 128 insertions(+), 66 deletions(-) Index: linux-rt-rebase.q/include/linux/interrupt.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/interrupt.h +++ linux-rt-rebase.q/include/linux/interrupt.h @@ -323,8 +323,9 @@ extern void wait_for_softirq(int softirq to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its excecution is still not started, it will be executed only once. - * If this tasklet is already running on another CPU (or schedule is called - from tasklet itself), it is rescheduled for later. + * If this tasklet is already running on another CPU, it is rescheduled + for later. + * Schedule must not be called from the tasklet itself (a lockup occurs) * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. @@ -349,15 +350,25 @@ struct tasklet_struct name = { NULL, 0, enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ - TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ + TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ + TASKLET_STATE_PENDING /* Tasklet is pending */ }; -#ifdef CONFIG_SMP +#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) +#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } +static inline int tasklet_tryunlock(struct tasklet_struct *t) +{ + return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; +} + static inline void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_clear_bit(); @@ -369,9 +380,10 @@ static inline void tasklet_unlock_wait(s while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } } #else -#define tasklet_trylock(t) 1 -#define tasklet_unlock_wait(t) do { } while (0) -#define tasklet_unlock(t) do { } while (0) +# define tasklet_trylock(t) 1 +# define tasklet_tryunlock(t) 1 +# define tasklet_unlock_wait(t) do { } while (0) +# define tasklet_unlock(t) do { } while (0) #endif extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t)); @@ -404,17 +416,8 @@ static inline void tasklet_disable(struc smp_mb(); } -static inline void tasklet_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic_dec(); - atomic_dec(&t->count); -} - -static inline void tasklet_hi_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic_dec(); - atomic_dec(&t->count); -} +extern fastcall void tasklet_enable(struct tasklet_struct *t); +extern fastcall void tasklet_hi_enable(struct tasklet_struct *t); extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -457,14 +457,24 @@ struct tasklet_head static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; +static void inline +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) +{ + if (tasklet_trylock(t)) { + WARN_ON(t->next != NULL); + t->next = head->list; + head->list = t; + raise_softirq_irqoff(nr); + tasklet_unlock(t); + } +} + void fastcall __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); - t->next = __get_cpu_var(tasklet_vec).list; - __get_cpu_var(tasklet_vec).list = t; - raise_softirq_irqoff(TASKLET_SOFTIRQ); + __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ); local_irq_restore(flags); } @@ -475,81 +485,130 @@ void fastcall __tasklet_hi_schedule(stru unsigned long flags; local_irq_save(flags); - t->next = __get_cpu_var(tasklet_hi_vec).list; - __get_cpu_var(tasklet_hi_vec).list = t; - raise_softirq_irqoff(HI_SOFTIRQ); + __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__tasklet_hi_schedule); -static void tasklet_action(struct softirq_action *a) +void fastcall tasklet_enable(struct tasklet_struct *t) { - struct tasklet_struct *list; + if (!atomic_dec_and_test(&t->count)) + return; + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) + tasklet_schedule(t); +} - local_irq_disable(); - list = __get_cpu_var(tasklet_vec).list; - __get_cpu_var(tasklet_vec).list = NULL; - local_irq_enable(); +EXPORT_SYMBOL(tasklet_enable); + +void fastcall tasklet_hi_enable(struct tasklet_struct *t) +{ + if (!atomic_dec_and_test(&t->count)) + return; + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) + tasklet_hi_schedule(t); +} + +EXPORT_SYMBOL(tasklet_hi_enable); + +static void +__tasklet_action(struct softirq_action *a, struct tasklet_struct *list) +{ + int loops = 1000000; while (list) { struct tasklet_struct *t = list; list = list->next; + /* + * Should always succeed - after a tasklist got on the + * list (after getting the SCHED bit set from 0 to 1), + * nothing but the tasklet softirq it got queued to can + * lock it: + */ + if (!tasklet_trylock(t)) { + WARN_ON(1); + continue; + } + + t->next = NULL; + + /* + * If we cannot handle the tasklet because it's disabled, + * mark it as pending. tasklet_enable() will later + * re-schedule the tasklet. + */ + if (unlikely(atomic_read(&t->count))) { +out_disabled: + /* implicit unlock: */ + wmb(); + t->state = TASKLET_STATEF_PENDING; + continue; + } - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - BUG(); - t->func(t->data); + /* + * After this point on the tasklet might be rescheduled + * on another CPU, but it can only be added to another + * CPU's tasklet list if we unlock the tasklet (which we + * dont do yet). + */ + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + WARN_ON(1); + +again: + t->func(t->data); + + /* + * Try to unlock the tasklet. We must use cmpxchg, because + * another CPU might have scheduled or disabled the tasklet. + * We only allow the STATE_RUN -> 0 transition here. + */ + while (!tasklet_tryunlock(t)) { + /* + * If it got disabled meanwhile, bail out: + */ + if (atomic_read(&t->count)) + goto out_disabled; + /* + * If it got scheduled meanwhile, re-execute + * the tasklet function: + */ + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + goto again; + if (!--loops) { + printk("hm, tasklet state: %08lx\n", t->state); + WARN_ON(1); tasklet_unlock(t); - continue; + break; } - tasklet_unlock(t); } - - local_irq_disable(); - t->next = __get_cpu_var(tasklet_vec).list; - __get_cpu_var(tasklet_vec).list = t; - __do_raise_softirq_irqoff(TASKLET_SOFTIRQ); - local_irq_enable(); } } -static void tasklet_hi_action(struct softirq_action *a) +static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); - list = __get_cpu_var(tasklet_hi_vec).list; - __get_cpu_var(tasklet_hi_vec).list = NULL; + list = __get_cpu_var(tasklet_vec).list; + __get_cpu_var(tasklet_vec).list = NULL; local_irq_enable(); - while (list) { - struct tasklet_struct *t = list; + __tasklet_action(a, list); +} - list = list->next; +static void tasklet_hi_action(struct softirq_action *a) +{ + struct tasklet_struct *list; - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - BUG(); - t->func(t->data); - tasklet_unlock(t); - continue; - } - tasklet_unlock(t); - } + local_irq_disable(); + list = __get_cpu_var(tasklet_hi_vec).list; + __get_cpu_var(tasklet_hi_vec).list = NULL; + local_irq_enable(); - local_irq_disable(); - t->next = __get_cpu_var(tasklet_hi_vec).list; - __get_cpu_var(tasklet_hi_vec).list = t; - __do_raise_softirq_irqoff(HI_SOFTIRQ); - local_irq_enable(); - } + __tasklet_action(a, list); } - void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { patches/tasklet-more-fixes.patch0000664000077200007720000001575210655544574016316 0ustar mingomingoFrom linux-kernel-owner@vger.kernel.org Thu Jun 14 23:21:31 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=none autolearn=unavailable version=3.1.7-deb Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by mail.tglx.de (Postfix) with ESMTP id F2D8065C3D9 for ; Thu, 14 Jun 2007 23:21:31 +0200 (CEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756447AbXFNVVF (ORCPT ); Thu, 14 Jun 2007 17:21:05 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1753441AbXFNVUw (ORCPT ); Thu, 14 Jun 2007 17:20:52 -0400 Received: from e33.co.us.ibm.com ([32.97.110.151]:53331 "EHLO e33.co.us.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752693AbXFNVUv (ORCPT ); Thu, 14 Jun 2007 17:20:51 -0400 Received: from d03relay02.boulder.ibm.com (d03relay02.boulder.ibm.com [9.17.195.227]) by e33.co.us.ibm.com (8.13.8/8.13.8) with ESMTP id l5ELKnM3030113 for ; Thu, 14 Jun 2007 17:20:49 -0400 Received: from d03av01.boulder.ibm.com (d03av01.boulder.ibm.com [9.17.195.167]) by d03relay02.boulder.ibm.com (8.13.8/8.13.8/NCO v8.3) with ESMTP id l5ELKniv268710 for ; Thu, 14 Jun 2007 15:20:49 -0600 Received: from d03av01.boulder.ibm.com (loopback [127.0.0.1]) by d03av01.boulder.ibm.com (8.12.11.20060308/8.13.3) with ESMTP id l5ELKm9A010919 for ; Thu, 14 Jun 2007 15:20:49 -0600 Received: from [9.67.41.186] (wecm-9-67-41-186.wecm.ibm.com [9.67.41.186]) by d03av01.boulder.ibm.com (8.12.11.20060308/8.12.11) with ESMTP id l5ELKl3X010835; Thu, 14 Jun 2007 15:20:47 -0600 Subject: Re: [PATCH -rt] Fix TASKLET_STATE_SCHED WARN_ON() From: john stultz To: Ingo Molnar Cc: Thomas Gleixner , Steven Rostedt , "Paul E. McKenney" , lkml In-Reply-To: <1181096244.6018.20.camel@localhost> References: <1181096244.6018.20.camel@localhost> Content-Type: text/plain Date: Thu, 14 Jun 2007 14:20:20 -0700 Message-Id: <1181856020.6276.14.camel@localhost.localdomain> Mime-Version: 1.0 X-Mailer: Evolution 2.10.1 Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org X-Filter-To: .Kernel.LKML X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit On Tue, 2007-06-05 at 19:17 -0700, john stultz wrote: > Hey Ingo, > So we've been seeing the following trace fairly frequently on our SMP > boxes when running kernbench: > > BUG: at kernel/softirq.c:639 __tasklet_action() > > Call Trace: > [] dump_trace+0xaa/0x32a > [] show_trace+0x41/0x5c > [] dump_stack+0x15/0x17 > [] __tasklet_action+0xdf/0x12e > [] tasklet_action+0x27/0x29 > [] ksoftirqd+0x16c/0x271 > [] kthread+0xf5/0x128 > [] child_rip+0xa/0x12 > > > Paul also pointed this out awhile back: http://lkml.org/lkml/2007/2/25/1 > > > Anyway, I think I finally found the issue. Its a bit hard to explain, > but the idea is while __tasklet_action is running the tasklet function > on CPU1, if a call to tasklet_schedule() on CPU2 is made, and if right > after we mark the TASKLET_STATE_SCHED bit we are preempted, > __tasklet_action on CPU1 might be able to re-run the function, clear the > bit and unlock the tasklet before CPU2 enters __tasklet_common_schedule. > Once __tasklet_common_schedule locks the tasklet, we will add the > tasklet to the list with the TASKLET_STATE_SCHED *unset*. > > I've verified this race occurs w/ a WARN_ON in > __tasklet_common_schedule(). > > > This fix avoids this race by making sure *after* we've locked the > tasklet that the STATE_SCHED bit is set before adding it to the list. > > Does it look ok to you? > > thanks > -john > > Signed-off-by: John Stultz > > Index: 2.6-rt/kernel/softirq.c > =================================================================== > --- 2.6-rt.orig/kernel/softirq.c 2007-06-05 18:30:54.000000000 -0700 > +++ 2.6-rt/kernel/softirq.c 2007-06-05 18:36:44.000000000 -0700 > @@ -544,10 +544,17 @@ static void inline > __tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) > { > if (tasklet_trylock(t)) { > - WARN_ON(t->next != NULL); > - t->next = head->list; > - head->list = t; > - raise_softirq_irqoff(nr); > + /* We may have been preempted before tasklet_trylock > + * and __tasklet_action may have already run. > + * So double check the sched bit while the takslet > + * is locked before adding it to the list. > + */ > + if (test_bit(TASKLET_STATE_SCHED, &t->state)) { > + WARN_ON(t->next != NULL); > + t->next = head->list; > + head->list = t; > + raise_softirq_irqoff(nr); > + } > tasklet_unlock(t); > } > } So while digging on a strange OOM issue we were seeing (which actually ended up being fixed by Steven's softirq patch), I noticed that the fix above is incomplete. With only the patch above, we may no longer have unscheduled tasklets added to the list, but we may end up with scheduled tasklets that are not on the list (and will stay that way!). The following additional patch should correct this issue. Although since we weren't actually hitting it, the issue is a bit theoretical, so I've not been able to prove it really fixes anything. thanks -john --- kernel/softirq.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -462,6 +462,7 @@ static void inline __tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) { if (tasklet_trylock(t)) { +again: /* We may have been preempted before tasklet_trylock * and __tasklet_action may have already run. * So double check the sched bit while the takslet @@ -472,8 +473,21 @@ __tasklet_common_schedule(struct tasklet t->next = head->list; head->list = t; raise_softirq_irqoff(nr); + tasklet_unlock(t); + } else { + /* This is subtle. If we hit the corner case above + * It is possible that we get preempted right here, + * and another task has successfully called + * tasklet_schedule(), then this function, and + * failed on the trylock. Thus we must be sure + * before releasing the tasklet lock, that the + * SCHED_BIT is clear. Otherwise the tasklet + * may get its SCHED_BIT set, but not added to the + * list + */ + if (!tasklet_tryunlock(t)) + goto again; } - tasklet_unlock(t); } } patches/schedule_on_each_cpu-enhance-rt.patch0000664000077200007720000000140310655544577020723 0ustar mingomingo--- kernel/workqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/kernel/workqueue.c =================================================================== --- linux-rt-rebase.q.orig/kernel/workqueue.c +++ linux-rt-rebase.q/kernel/workqueue.c @@ -620,7 +620,7 @@ int schedule_on_each_cpu(void (*func)(vo } } - preempt_disable(); /* CPU hotplug */ + lock_cpu_hotplug(); for_each_online_cpu(cpu) { struct schedule_on_each_cpu_work *work; @@ -633,7 +633,7 @@ int schedule_on_each_cpu(void (*func)(vo set_bit(WORK_STRUCT_PENDING, work_data_bits(&work->work)); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), &work->work); } - preempt_enable(); + unlock_cpu_hotplug(); out: for_each_possible_cpu(cpu) { patches/series0000644000077200007720000003343410655544577012765 0ustar mingomingo# # base tree: 2.6.23-rc1 # # # Linus latest # #origin.patch # # CFS queue: # # # Futex-fixes in -mm # # # hrt: 2.6.23-rc1-hrt1 queue # # Basic cpuidle patches git-acpi.patch # # Generic hrtimer/time/clockevent/source patches # #i386-hpet-check-if-the-counter-works.patch #clockevents-remove-unused-code.patch nohz-fix-nohz-x86-dyntick-idle-handling.patch acpi-move-timer-broadcast-and-pmtimer-access-before-c3-arbiter-shutdown.patch # # x86-64 # clockevents-remove-unused-inline-function.patch clockevents-allow-build-without-runtime-use.patch x86_64-consolidate-tsc-calibration.patch i386-prepare-sharing-hpet-code.patch i386-hpet-add-x8664-hpet-bits.patch i386-prepare-sharing-pit-code.patch x86_64-use-i386-i8253-h.patch x86_64-preparatory-apic-set-lvtt.patch x86_64-apic-remove-bogus-pit-synchronization.patch x86_64-apic-shuffle-calibration-around.patch x86_64-apic-calibration-remove-divisor.patch x86_64-apic-change-setup-calling-convention.patch x86_64-apic-remove-nested-irq-disable.patch x86_64-prep-idle-loop-for-dynticks.patch x86_64-apic-add-clockevents-functions.patch x86_64-convert-to-clockevents.patch x86_64-remove-unused-code.patch x86_64-cleanup-apic-c.patch jiffies-remove-unused-macros.patch acpi-remove-the-useless-ifdef-code.patch i386-pit-remove-the-useless-ifdefs.patch i386-hpet-sharing-optimize.patch # # Venki's HPET series # ich-force-hpet-make-generic-time-capable-of-switching-broadcast-timer.patch ich-force-hpet-restructure-hpet-generic-clock-code.patch ich-force-hpet-ich7-or-later-quirk-to-force-detect-enable.patch ich-force-hpet-ich7-or-later-quirk-to-force-detect-enable-fix.patch ich-force-hpet-late-initialization-of-hpet-after-quirk.patch ich-force-hpet-ich5-quirk-to-force-detect-enable.patch ich-force-hpet-ich5-quirk-to-force-detect-enable-fix.patch ich-force-hpet-ich5-fix-a-bug-with-suspend-resume.patch ich-force-hpet-add-ich7_0-pciid-to-quirk-list.patch hpet-force-enable-on-ich34.patch hpet-force-enable-on-vt8235-37-chipsets.patch # # end of the -hrt queue # # # ARM clock events & co # ep93xx-timer-accuracy.patch ep93xx-clockevents.patch ep93xx-clockevents-fix.patch # arm-imx.patch - already upstream? # CHECKME arm-leds-timer.patch # Upstream submitted changes # # Check what's in mainline / mm or might be # upstream material. # spinlock-trylock-cleanup-sungem.patch x86_64-tsc-sync-irqflags-fix.patch neptune-no-at-keyboard.patch rtmutex-debug.h-cleanup.patch netpoll-8139too-fix.patch kprobes-preempt-fix.patch replace-bugon-by-warn-on.patch # Suspend / resume fixups i386-mark-atomic-irq-ops-raw.patch msi-suspend-resume-workaround.patch floppy-resume-fix.patch # # assorted fixlets from -mm: # # Check if they are really in -mm or should be submitted # hrtimers-overrun-api.patch mm-fix-latency.patch ioapic-fix-too-fast-clocks.patch fix-acpi-build-weirdness.patch write-try-lock-irqsave.patch move-native-irq.patch dont-unmask-io_apic.patch # # misc build beautification patches: # x86-64-smpboot-whitespace.patch gcc-warnings-shut-up.patch # # Various fixlets # # # Debugging patches # apic-dumpstack.patch netfilter-more-debugging.patch # # Latency tracer # nmi-profiling-base.patch add-notrace.patch redo-regparm-option.patch latency-tracing.patch latency-tracing-remove-trace-array.patch latency-tracer-disable-across-trace-cmdline.patch ns2cyc-result-fix.patch latency-tracing-i386-paravirt-fastcall.patch latency-tracing-i386.patch latency-tracing-x86_64.patch latency-tracing-ppc.patch ppc-remove-last-cpukhz.patch ppc-rename-xmon-mcount.patch ppc-add-mcount.patch ppc-mcount-dummy-functions.patch ppc-mark-notrace-mainline.patch ppc-add-ppc32-mcount.patch latency-tracer-printk-fix.patch latency-tracing-arm.patch latency-tracing-exclude-printk.patch latency-tracing-prctl-api-hack.patch latency-tracing-raw-spinlock-hack.patch latency-tracer-one-off-fix.patch smaller-trace.patch trace-name-plus.patch trace-with-caller-addr.patch trace-sti-mwait.patch latency-tracer-optimize-a-bit.patch idle-stop-critical-timing.patch arm-latency-tracer-support.patch latency-tracer-variable-threshold.patch # Needs to be rewritten to trigger on the procfs variable ! reset-latency-histogram.patch # # x86-64 unwinder # # not applied - it's a large chunk of code and Linus is very sceptical about it # #redo-unwinder.patch #unwinder-fix.patch # # lockdep queue: # latency-trace-fix.patch trace-cpuidle.patch lockdep-show-held-locks.patch lockdep-lock_set_subclass.patch lockdep-prettify.patch lockdep-more-entries.patch # # Revert loopback bh assumption patch # loopback-revert.patch # # hrtimer # hrtimer-trace.patch #hrtimer-no-getnstimeofday.patch #time-warp-detect.patch # # PPC gtod and highres support # ppc-gtod-support.patch ppc-gtod-support-fix.patch ppc-a-2.patch ppc-fix-clocksource-timebase-shift.patch ppc-remove-broken-vsyscall.patch ppc-read-persistent-clock.patch ppc-gtod-notrace-fix.patch ppc-clockevents.patch ppc-clockevents-fix.patch ppc-highres-dyntick.patch # # -rt queue: # inet_hash_bits.patch # tracing inet-hash-bits-ipv6-fix.patch undo-latency-tracing-raw-spinlock-hack.patch random-driver-latency-fix.patch latency-measurement-drivers.patch latency-measurement-drivers-fix.patch # # RCU preempt patches from Paul: # rcu-1.patch rcu-2.patch rcu-3.patch rcu-4.patch rcu-preempt-fix-nmi-watchdog.patch rcu-preempt-fix-rcu-torture.patch rcu-hrt-fixups.patch dynticks-rcu-rt-fixlet.patch rcu-tasklet-softirq.patch rcu-classic-fixup.patch # # ARM preperatory patches # rcu-warn-underflow.patch arm-cmpxchg.patch arm-fix-atomic-cmpxchg.patch arm-cmpxchg-support-armv6.patch arm-preempt-config.patch # # IRQ threading # preempt-softirqs-core.patch preempt-irqs-core.patch preempt-irqs-softirq-in-hardirq.patch preempt-irqs-direct-debug-keyboard.patch preempt-irqs-timer.patch preempt-irqs-hrtimer.patch preempt-irqs-i386.patch preempt-irqs-i386-ioapic-mask-quirk.patch preempt-irqs-mips.patch preempt-irqs-x86-64.patch preempt-irqs-x86-64-ioapic-mask-quirk.patch preempt-irqs-arm.patch preempt-irqs-arm-fix-oprofile.patch preempt-irqs-ppc.patch preempt-irqs-ppc-ack-irq-fixups.patch preempt-irqs-ppc-fix-b5.patch preempt-irqs-ppc-fix-b6.patch preempt-irqs-ppc-celleb-beatic-eoi.patch preempt-irqs-ppc-fix-more-fasteoi.patch preempt-irqs-ppc-preempt-schedule-irq-entry-fix.patch preempt-irqs-Kconfig.patch # # Real real time stuff :) # rt-apis.patch rt-slab-new.patch rt-page_alloc.patch # # rt-mutexes # rt-mutex-preempt-debugging.patch rt-mutex-irq-flags-checking.patch rt-mutex-trivial-tcp-preempt-fix.patch rt-mutex-trivial-route-cast-fix.patch rt-mutex-delayed-resched.patch rt-mutex-core.patch rt-mutex-trylock-export.patch rt-mutex-spinlock-might-sleep.patch rt-mutex-i386.patch rt-mutex-mips.patch rt-mutex-ppc.patch rt-mutex-ppc-fix-a5.patch rt-mutex-x86-64.patch rt-mutex-arm.patch rt-mutex-arm-fix.patch rt-mutex-drop-generic-TIF_NEED_RESCHED_DELAYED.patch rt-mutex-compat-semaphores.patch # # Per-CPU locking assumption cleanups: # percpu-locked-mm.patch percpu-locked-netfilter.patch percpu-locked-netfilter2.patch percpu-locked-powerpc-fixups.patch percpu-locked-powerpc-fixups-a6.patch # # Various preempt fixups # net-core-preempt-fix.patch bh-uptodate-lock.patch bh-state-lock.patch jbd_assertions_smp_only.patch # # Tasklet redesign # tasklet-redesign.patch tasklet-busy-loop-hack.patch # # Diable irq poll on -rt # tasklet-fix-preemption-race.patch tasklet-more-fixes.patch disable-irqpoll.patch # # Inaccurate -rt stats (should be replaced by CFS) # kstat-add-rt-stats.patch # # Posix-cpu-timers in a thread # preempt-realtime-warn-and-bug-on.patch cputimer-thread-rt_A0.patch cputimer-thread-rt-fix.patch posix-cpu-timers-fix.patch # # Various broken drivers # preempt-rt-cs5530-lock-ide-fix.patch vortex-fix.patch serial-locking-rt-cleanup.patch fix-emac-locking-2.6.16.patch # # Serial optimizing # serial-slow-machines.patch # # Realtime patches # # ARM: preempt-realtime-arm.patch arm-trace-preempt-idle.patch preempt-realtime-arm-bagde4.patch preempt-realtime-arm-footbridge.patch preempt-realtime-arm-integrator.patch preempt-realtime-arm-ixp4xx.patch preempt-realtime-arm-pxa.patch preempt-realtime-arm-shark.patch # MIPS: needs splitting preempt-realtime-mips.patch mips-gtod_clocksource.patch # X86-64: needs splitting preempt-realtime-x86_64.patch # IA64: needs splitting preempt-realtime-ia64.patch # PowerPC preempt-realtime-ppc-need-resched-delayed.patch preempt-realtime-ppc-more-resched-fixups.patch preempt-realtime-powerpc.patch preempt-realtime-powerpc-update.patch preempt-realtime-powerpc-a7.patch preempt-realtime-powerpc-b2.patch preempt-realtime-powerpc-b3.patch preempt-realtime-powerpc-b4.patch preempt-realtime-powerpc-add-raw-relax-macros.patch preempt-realtime-powerpc-tlb-batching.patch preempt-realtime-powerpc-celleb-raw-spinlocks.patch # # SuperH: needs splitting # preempt-realtime-powerpc-missing-raw-spinlocks.patch preempt-realtime-sh.patch # # i386 # preempt-realtime-i386.patch remove-check-pgt-cache-calls.patch preempt-irqs-i386-idle-poll-loop-fix.patch # # Core patch # # Note this is a convenience split up it is not supposed to compile # step by step. Needs some care, but it is way easier to handle than # the previous touch all in one patch # preempt-realtime-sched.patch preempt-realtime-mmdrop-delayed.patch preempt-realtime-sched-i386.patch preempt-realtime-prevent-idle-boosting.patch preempt-realtime-cfs-accounting-fix.patch preempt-realtime-core.patch preempt-realtime-fs-block.patch preempt-realtime-acpi.patch preempt-realtime-ipc.patch preempt-realtime-sound.patch preempt-realtime-mm.patch preempt-realtime-init-show-enabled-debugs.patch preempt-realtime-compile-fixes.patch preempt-realtime-console.patch preempt-realtime-debug-sysctl.patch preempt-realtime-ide.patch preempt-realtime-input.patch preempt-realtime-irqs.patch preempt-realtime-net-drivers.patch preempt-realtime-netconsole.patch preempt-realtime-printk.patch preempt-realtime-profiling.patch preempt-realtime-rawlocks.patch preempt-realtime-rcu.patch preempt-realtime-timer.patch preempt-realtime-usb.patch preempt-realtime-warn-and-bug-on-fix.patch # # Various -rt fixups # preempt-realtime-gtod-fixups.patch preempt-realtime-supress-cpulock-warning.patch preempt-realtime-supress-nohz-softirq-warning.patch preempt-realtime-net.patch preempt-realtime-net-softirq-fixups.patch preempt-realtime-loopback.patch preempt-realtime-drivers-pci-hotplug.patch preempt-realtime-8139too-rt-irq-flags-fix.patch # # Utility patches (not for upstream inclusion): # preempt-realtime-supress-rtc-printk.patch hrtimer-no-printk.patch nmi-profiling.patch panic-dont-stop-box.patch nmi-watchdog-disable.patch # # soft watchdog queue: # softlockup-fix.patch softlockup-add-irq-regs-h.patch softlockup-better-printout.patch softlockup-cleanups.patch softlockup-use-cpu-clock.patch # # Not yet reviewed # gtod-optimize.patch realtime-lsm.patch # # Futex updates # rcu-various-fixups.patch futex-performance-hack.patch futex-performance-hack-sysctl-fix.patch # # Pete's file locking scalability changes: # s_files-schedule_on_each_cpu_wq.patch schedule_on_cpu.patch s_files-pipe-fix.patch # # Pete's file locking scalability changes: # lockdep_lock_set_subclass_fix.patch qrcu.patch lock_list.patch percpu_list.patch s_files.patch # # START of Pete's ccur-pagecache queue # # # lockless pagecache # 2.6.21-rc6-lockless1-prep-find_lock_page.patch 2.6.21-rc6-lockless2-radix-tree-use-indirect-bit.patch 2.6.21-rc6-lockless3-radix-tree-gang-slot-lookups.patch 2.6.21-rc6-lockless4-__add_to_swap_cache-stuff.patch 2.6.21-rc6-lockless5-lockless-probe.patch 2.6.21-rc6-lockless6-speculative-get-page.patch 2.6.21-rc6-lockless7-lockless-pagecache-lookups.patch 2.6.21-rc6-lockless8-spinlock-tree_lock.patch # # concurrent (write side) page cache # radix-tree-concurrent.patch mapping_nrpages.patch lock_page_ref.patch mm-concurrent-pagecache.patch radix-tree-optimistic.patch radix-tree-optimistic-hist.patch radix-concurrent-lockdep.patch #radix-tree-path-compression.patch # # -rt bits # mm-concurrent-pagecache-rt.patch # # END of Pete's ccur-pagecache queue # # # kmap atomix fixes # kmap-atomic-prepare.patch pagefault-disable-cleanup.patch kmap-atomic-i386-fix.patch # # Not yet reviewed # select-error-leak-fix.patch #module-pde-race-fixes.patch fix-emergency-reboot.patch timer-freq-tweaks.patch # # Highmem modifications # highmem-revert-mainline.patch highmem_rewrite.patch highmem-redo-mainline.patch rt-kmap-scale-fix.patch # # Debug patches: # pause-on-oops-head-tail.patch i386-nmi-watchdog-show-regs.patch x86-64-traps-move-held-locks-output.patch # # x86-64 vsyscall modifications # x86-64-tscless-vgettimeofday.patch rt-time-starvation-fix.patch # # RT-Java testing stuff # rt-time-starvation-fix-update.patch Add-dev-rmem-device-driver-for-real-time-JVM-testing.patch Allocate-RTSJ-memory-for-TCK-conformance-test.patch # # Softirq modifications # new-softirq-code.patch softirq-per-cpu-assumptions-fixes.patch smp-processor-id-fixups.patch fix-migrating-softirq.patch vsyscall-add-notrace.patch fix-softirq-checks-for-non-rt-preempt-hardirq.patch # # Weird crap unearthed by -rt which needs to be investigated # irda-fix.patch nf_conntrack-weird-crash-fix.patch # # Needs proper fix # nf_conntrack-fix-smp-processor-id.patch print-might-sleep-hack.patch lockdep-rt-mutex.patch lockstat-rt-hooks.patch lockstat_bounce_rt.patch # # KVM: # kvm-rt.patch # # Add RT to uname and apply the version # RT_utsname.patch # # not yet backmerged tail patches: # preempt-rt-no-slub.patch paravirt-function-pointer-fix.patch quicklist-release-before-free-page.patch quicklist-release-before-free-page-fix.patch sched-rt-balance-fix.patch disable-lpptest-on-nonlinux.patch sched-rt-stats.patch mitigate-resched-flood.patch genirq-soft-resend.patch rcu-preempt-hotplug-hackaround.patch relay-fix.patch schedule_on_each_cpu-enhance.patch schedule_on_each_cpu-enhance-rt.patch atl-flags-fix.patch lockdep-rt-recursion-limit-fix.patch cond_resched_softirq-WARN-fix.patch irq-mask-fix.patch version.patch patches/rt-kmap-scale-fix.patch0000664000077200007720000001447610655544576016023 0ustar mingomingoHi Ingo, Apply on top of what is still in -rt. This seems to survive a kbuild -j64 & -j512 (although with that latter the machine goes off for a while, but does return with a kernel). If you can spare a cycle between hacking syslets and -rt, could you have a look at the logic this patch adds? --- Solve 2 deadlocks in the current kmap code. 1) akpm spotted a race in the waitqueue usage that could deadlock the machine. the very unlikely scenario was what we would not find a usable map in LAST_PKMAP tries but right before we hit schedule the very last returns. Solve this by keeping a free count. 2) akpm told about the kmap deadlock where multiple processes each require 2 maps (src, dst). When they deplete the maps for the src maps they will be stuck waiting for their dst maps. Solve this by by tracking (and limiting) kmap users and account two maps for each. This all adds more atomic globals, this will bounce like mad on real large smp. (perhaps add some __cacheline_aligned_on_smp) Signed-off-by: Peter Zijlstra --- include/linux/sched.h | 1 mm/highmem.c | 96 ++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 87 insertions(+), 10 deletions(-) Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -1529,6 +1529,7 @@ static inline void put_task_struct(struc #define PF_MEMALLOC 0x00000800 /* Allocating memory */ #define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ +#define PF_KMAP 0x00004000 /* this context has a kmap */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ Index: linux-rt-rebase.q/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/mm/highmem.c +++ linux-rt-rebase.q/mm/highmem.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -67,10 +68,12 @@ unsigned int nr_free_highpages (void) */ static atomic_t pkmap_count[LAST_PKMAP]; static atomic_t pkmap_hand; +static atomic_t pkmap_free; +static atomic_t pkmap_users; pte_t * pkmap_page_table; -static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); +static DECLARE_WAIT_QUEUE_HEAD(pkmap_wait); /* * Try to free a given kmap slot. @@ -85,6 +88,7 @@ static int pkmap_try_free(int pos) if (atomic_cmpxchg(&pkmap_count[pos], 1, 0) != 1) return -1; + atomic_dec(&pkmap_free); /* * TODO: add a young bit to make it CLOCK */ @@ -113,7 +117,8 @@ static inline void pkmap_put(atomic_t *c BUG(); case 1: - wake_up(&pkmap_map_wait); + atomic_inc(&pkmap_free); + wake_up(&pkmap_wait); } } @@ -122,11 +127,10 @@ static inline void pkmap_put(atomic_t *c static int pkmap_get_free(void) { int i, pos, flush; - DECLARE_WAITQUEUE(wait, current); restart: for (i = 0; i < LAST_PKMAP; i++) { - pos = atomic_inc_return(&pkmap_hand) % LAST_PKMAP; + pos = atomic_inc_return(&pkmap_hand) & LAST_PKMAP_MASK; flush = pkmap_try_free(pos); if (flush >= 0) goto got_one; @@ -135,10 +139,8 @@ restart: /* * wait for somebody else to unmap their entries */ - __set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&pkmap_map_wait, &wait); - schedule(); - remove_wait_queue(&pkmap_map_wait, &wait); + if (likely(!in_interrupt())) + wait_event(pkmap_wait, atomic_read(&pkmap_free) != 0); goto restart; @@ -147,7 +149,7 @@ got_one: #if 0 flush_tlb_kernel_range(PKMAP_ADDR(pos), PKMAP_ADDR(pos+1)); #else - int pos2 = (pos + 1) % LAST_PKMAP; + int pos2 = (pos + 1) & LAST_PKMAP_MASK; int nr; int entries[TLB_BATCH]; @@ -157,7 +159,7 @@ got_one: * Scan ahead of the hand to minimise search distances. */ for (i = 0, nr = 0; i < LAST_PKMAP && nr < TLB_BATCH; - i++, pos2 = (pos2 + 1) % LAST_PKMAP) { + i++, pos2 = (pos2 + 1) & LAST_PKMAP_MASK) { flush = pkmap_try_free(pos2); if (flush < 0) @@ -222,9 +224,79 @@ void kmap_flush_unused(void) WARN_ON_ONCE(1); } +/* + * Avoid starvation deadlock by limiting the number of tasks that can obtain a + * kmap to (LAST_PKMAP - KM_TYPE_NR*NR_CPUS)/2. + */ +static void kmap_account(void) +{ + int weight; + +#ifndef CONFIG_PREEMPT_RT + if (in_interrupt()) { + /* irqs can always get them */ + weight = -1; + } else +#endif + if (current->flags & PF_KMAP) { + current->flags &= ~PF_KMAP; + /* we already accounted the second */ + weight = 0; + } else { + /* mark 1, account 2 */ + current->flags |= PF_KMAP; + weight = 2; + } + + if (weight > 0) { + /* + * reserve KM_TYPE_NR maps per CPU for interrupt context + */ + const int target = LAST_PKMAP +#ifndef CONFIG_PREEMPT_RT + - KM_TYPE_NR*NR_CPUS +#endif + ; + +again: + wait_event(pkmap_wait, + atomic_read(&pkmap_users) + weight <= target); + + if (atomic_add_return(weight, &pkmap_users) > target) { + atomic_sub(weight, &pkmap_users); + goto again; + } + } +} + +static void kunmap_account(void) +{ + int weight; + +#ifndef CONFIG_PREEMPT_RT + if (in_irq()) { + weight = -1; + } else +#endif + if (current->flags & PF_KMAP) { + /* there was only 1 kmap, un-account both */ + current->flags &= ~PF_KMAP; + weight = 2; + } else { + /* there were two kmaps, un-account per kunmap */ + weight = 1; + } + + if (weight > 0) + atomic_sub(weight, &pkmap_users); + wake_up(&pkmap_wait); +} + fastcall void *kmap_high(struct page *page) { unsigned long vaddr; + + kmap_account(); again: vaddr = (unsigned long)page_address(page); if (vaddr) { @@ -265,6 +337,7 @@ fastcall void kunmap_high(struct page *p unsigned long vaddr = (unsigned long)page_address(page); BUG_ON(!vaddr); pkmap_put(&pkmap_count[PKMAP_NR(vaddr)]); + kunmap_account(); } EXPORT_SYMBOL(kunmap_high); @@ -409,6 +482,9 @@ void __init page_address_init(void) for (i = 0; i < ARRAY_SIZE(pkmap_count); i++) atomic_set(&pkmap_count[i], 1); + atomic_set(&pkmap_hand, 0); + atomic_set(&pkmap_free, LAST_PKMAP); + atomic_set(&pkmap_users, 0); #endif #ifdef HASHED_PAGE_VIRTUAL patches/i386-hpet-add-x8664-hpet-bits.patch0000664000077200007720000000737410655544570017437 0ustar mingomingoSubject: i386: prepare sharing the hpet code with x86_64 Add the x8664 specific bits (mapping) to share the hpet code later. Move the reserve_platform_timer call to late init. This is necessary for x86_64, as hpet enable() is called before memory is setup. i386 calls it in late_time_init, but it does not hurt to do it later for both. Pull in the x8664 hpet disable command line option as well. Signed-off-by: Thomas Gleixner --- arch/i386/kernel/hpet.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) Index: linux/arch/i386/kernel/hpet.c =================================================================== --- linux.orig/arch/i386/kernel/hpet.c +++ linux/arch/i386/kernel/hpet.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -7,6 +8,7 @@ #include #include +#include #include #include #include @@ -23,6 +25,10 @@ unsigned long hpet_address; static void __iomem *hpet_virt_address; +/* Temporary hack. Cleanup after x86_64 clock events conversion */ +#undef hpet_readl +#undef hpet_writel + static inline unsigned long hpet_readl(unsigned long a) { return readl(hpet_virt_address + a); @@ -33,6 +39,24 @@ static inline void hpet_writel(unsigned writel(d, hpet_virt_address + a); } +#ifdef CONFIG_X86_64 + +#include + +static inline void hpet_set_mapping(void) +{ + set_fixmap_nocache(FIX_HPET_BASE, hpet_address); + __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); + hpet_virt_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE); +} + +static inline void hpet_clear_mapping(void) +{ + hpet_virt_address = NULL; +} + +#else + static inline void hpet_set_mapping(void) { hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); @@ -43,6 +67,7 @@ static inline void hpet_clear_mapping(vo iounmap(hpet_virt_address); hpet_virt_address = NULL; } +#endif /* * HPET command line enable / disable @@ -59,6 +84,13 @@ static int __init hpet_setup(char* str) } __setup("hpet=", hpet_setup); +static int __init disable_hpet(char *str) +{ + boot_hpet_disable = 1; + return 1; +} +__setup("nohpet", disable_hpet); + static inline int is_hpet_capable(void) { return (!boot_hpet_disable && hpet_address); @@ -225,6 +257,13 @@ static cycle_t read_hpet(void) return (cycle_t)hpet_readl(HPET_COUNTER); } +#ifdef CONFIG_X86_64 +static cycle_t __vsyscall_fn vread_hpet(void) +{ + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); +} +#endif + static struct clocksource clocksource_hpet = { .name = "hpet", .rating = 250, @@ -233,6 +272,9 @@ static struct clocksource clocksource_hp .shift = HPET_SHIFT, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .resume = hpet_start_counter, +#ifdef CONFIG_X86_64 + .vread = vread_hpet, +#endif }; /* @@ -331,7 +373,6 @@ int __init hpet_enable(void) if (id & HPET_ID_LEGSUP) { hpet_enable_int(); - hpet_reserve_platform_timers(id); /* * Start hpet with the boot cpu mask and make it * global after the IO_APIC has been initialized. @@ -349,6 +390,22 @@ out_nohpet: return 0; } +/* + * Needs to be late, as the reserve_timer code calls kalloc ! + * + * Not a problem on i386 as hpet_enable is called from late_time_init, + * but on x86_64 it is necessary ! + */ +static __init int hpet_late_init(void) +{ + if (!is_hpet_capable()) + return -ENODEV; + + hpet_reserve_platform_timers(hpet_readl(HPET_ID)); + return 0; +} +fs_initcall(hpet_late_init); + #ifdef CONFIG_HPET_EMULATE_RTC /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET patches/trace-sti-mwait.patch0000664000077200007720000000714010655544572015573 0ustar mingomingo--- arch/i386/kernel/process.c | 2 ++ arch/x86_64/kernel/process.c | 7 +++++-- include/linux/irqflags.h | 9 +++++++++ kernel/latency_trace.c | 17 ++++++++++++++++- 4 files changed, 32 insertions(+), 3 deletions(-) Index: linux/arch/i386/kernel/process.c =================================================================== --- linux.orig/arch/i386/kernel/process.c +++ linux/arch/i386/kernel/process.c @@ -199,10 +199,12 @@ void cpu_idle(void) __get_cpu_var(irq_stat).idle_timestamp = jiffies; idle(); } + trace_preempt_exit_idle(); tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); + trace_preempt_enter_idle(); } } Index: linux/arch/x86_64/kernel/process.c =================================================================== --- linux.orig/arch/x86_64/kernel/process.c +++ linux/arch/x86_64/kernel/process.c @@ -232,10 +232,12 @@ void cpu_idle (void) __exit_idle(); } + trace_preempt_exit_idle(); tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); + trace_preempt_enter_idle(); } } @@ -265,9 +267,10 @@ static void mwait_idle(void) if (!need_resched()) { __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); - if (!need_resched()) + if (!need_resched()) { + trace_hardirqs_on(); __sti_mwait(0, 0); - else + } else local_irq_enable(); } else { local_irq_enable(); Index: linux/include/linux/irqflags.h =================================================================== --- linux.orig/include/linux/irqflags.h +++ linux/include/linux/irqflags.h @@ -16,6 +16,13 @@ extern void trace_hardirqs_off(void); extern void trace_softirqs_on(unsigned long ip); extern void trace_softirqs_off(unsigned long ip); +# ifdef CONFIG_CRITICAL_PREEMPT_TIMING + extern void trace_preempt_enter_idle(void); + extern void trace_preempt_exit_idle(void); +# else +# define trace_preempt_enter_idle() do { } while (0) +# define trace_preempt_exit_idle() do { } while (0) +# endif # define trace_hardirq_context(p) ((p)->hardirq_context) # define trace_softirq_context(p) ((p)->softirq_context) # define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) @@ -26,6 +33,8 @@ # define trace_softirq_exit() do { current->softirq_context--; } while (0) # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, #else +# define trace_preempt_enter_idle() do { } while (0) +# define trace_preempt_exit_idle() do { } while (0) # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) # define trace_softirqs_on(ip) do { } while (0) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -91,7 +91,8 @@ static inline int DEBUG_WARN_ON(int cond #ifdef CONFIG_CRITICAL_IRQSOFF_TIMING # ifdef CONFIG_CRITICAL_PREEMPT_TIMING -# define irqs_off_preempt_count() preempt_count() + static DEFINE_PER_CPU(int, trace_cpu_idle); +# define irqs_off_preempt_count() (!__get_cpu_var(trace_cpu_idle) && preempt_count()) # else # define irqs_off_preempt_count() 0 # endif @@ -2151,6 +2152,20 @@ void notrace unmask_preempt_count(unsign } EXPORT_SYMBOL(unmask_preempt_count); +#ifdef CONFIG_CRITICAL_PREEMPT_TIMING + +/* Some archs do their cpu_idle with preemption on. Don't measure it */ +void notrace trace_preempt_enter_idle(void) +{ + __get_cpu_var(trace_cpu_idle) = 1; +} + +void notrace trace_preempt_exit_idle(void) +{ + __get_cpu_var(trace_cpu_idle) = 0; +} + +#endif /* CONFIG_CRITICAL_PREEMPT_TIMING */ #endif patches/ppc-clockevents.patch0000664000077200007720000002070310655544572015661 0ustar mingomingoFrom sshtylyov@ru.mvista.com Thu May 17 19:40:58 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from imap.sh.mvista.com (unknown [63.81.120.155]) by mail.tglx.de (Postfix) with ESMTP id 88FCD65C3EA for ; Thu, 17 May 2007 19:40:58 +0200 (CEST) Received: from wasted.dev.rtsoft.ru (unknown [10.150.0.9]) by imap.sh.mvista.com (Postfix) with ESMTP id 7571C3EC9; Thu, 17 May 2007 10:40:54 -0700 (PDT) From: Sergei Shtylyov Organization: MontaVista Software Inc. To: tglx@linutronix.de, mingo@elte.hu Subject: [PATCH 2.6.21-rt2] PowerPC: decrementer clockevent driver Date: Thu, 17 May 2007 21:42:26 +0400 User-Agent: KMail/1.5 Cc: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org MIME-Version: 1.0 Content-Disposition: inline Content-Type: text/plain; charset="iso-8859-1" Message-Id: <200705172142.26739.sshtylyov@ru.mvista.com> X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Add PowerPC decrementer clock event driver. Every effort has been made to support the different implementations of the decrementer: the classic one (with 970 series variation), 40x and Book E specific ones. I had to make CONFIG_GENERIC_CLOCKEVENTS option selectable for the compatibility reasons -- this option is not compatible with the PPC64 deterministic time accounting. Thanks to Daniel Walker and Thomas Gleixner for the suggestions they made... Signed-off-by: Sergei Shtylyov --- This patch has been reworked against the 2.6.21 clockevents framework. It has only been tested on the Book E 32-bit CPU this time, so re-testing on "classic" PowerPC CPUs is needed (used to work as of 2.6.18-rt7)... CONFIG_PPC_MULTIPLATFORM was the best option I was able to come up with to cover machines built on 970 series CPUs... arch/powerpc/Kconfig | 12 +++- arch/powerpc/kernel/time.c | 124 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 134 insertions(+), 2 deletions(-) Index: linux/arch/powerpc/Kconfig =================================================================== --- linux.orig/arch/powerpc/Kconfig +++ linux/arch/powerpc/Kconfig @@ -161,6 +161,16 @@ config HIGHMEM depends on PPC32 source kernel/Kconfig.hz + +config GENERIC_CLOCKEVENTS + bool "Clock event devices support" + default n + help + Enable support for the clock event devices necessary for the + high-resolution timers and the tickless system support. + NOTE: This is not compatible with the deterministic time accounting + option on PPC64. + source kernel/Kconfig.preempt source "fs/Kconfig.binfmt" @@ -169,7 +179,7 @@ source "fs/Kconfig.binfmt" # max order + 1 config FORCE_MAX_ZONEORDER int - depends on PPC64 + depends on PPC64 && !GENERIC_CLOCKEVENTS default "9" if PPC_64K_PAGES default "13" Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -126,6 +127,83 @@ unsigned long ppc_tb_freq; static u64 tb_last_jiffy __cacheline_aligned_in_smp; static DEFINE_PER_CPU(u64, last_jiffy); +#ifdef CONFIG_GENERIC_CLOCKEVENTS + +#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) +#define DECREMENTER_MAX 0xffffffff +#else +#define DECREMENTER_MAX 0x7fffffff /* setting MSB triggers an interrupt */ +#endif + +static int decrementer_set_next_event(unsigned long evt, + struct clock_event_device *dev) +{ +#if defined(CONFIG_40x) + mtspr(SPRN_PIT, evt); /* 40x has a hidden PIT auto-reload register */ +#elif defined(CONFIG_BOOKE) + mtspr(SPRN_DECAR, evt); /* Book E has separate auto-reload register */ + set_dec(evt); +#else + set_dec(evt - 1); /* Classic decrementer interrupts at -1 */ +#endif + return 0; +} + +static void decrementer_set_mode(enum clock_event_mode mode, + struct clock_event_device *dev) +{ +#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) + u32 tcr = mfspr(SPRN_TCR); + + tcr |= TCR_DIE; + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + tcr |= TCR_ARE; + break; + case CLOCK_EVT_MODE_ONESHOT: + tcr &= ~TCR_ARE; + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + tcr &= ~TCR_DIE; + break; + } + mtspr(SPRN_TCR, tcr); +#endif + if (mode == CLOCK_EVT_MODE_PERIODIC) + decrementer_set_next_event(tb_ticks_per_jiffy, dev); +} + +static struct clock_event_device decrementer_clockevent = { + .name = "decrementer", +#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, +#else + .features = CLOCK_EVT_FEAT_ONESHOT, +#endif + .shift = 32, + .rating = 200, + .irq = -1, + .set_next_event = decrementer_set_next_event, + .set_mode = decrementer_set_mode, +}; + +static DEFINE_PER_CPU(struct clock_event_device, decrementers); + +static void register_decrementer(void) +{ + int cpu = smp_processor_id(); + struct clock_event_device *decrementer = &per_cpu(decrementers, cpu); + + memcpy(decrementer, &decrementer_clockevent, sizeof(*decrementer)); + + decrementer->cpumask = cpumask_of_cpu(cpu); + + clockevents_register_device(decrementer); +} + +#endif /* CONFIG_GENERIC_CLOCKEVENTS */ + #ifdef CONFIG_VIRT_CPU_ACCOUNTING /* * Factors for converting from cputime_t (timebase ticks) to @@ -317,6 +395,9 @@ void snapshot_timebase(void) { __get_cpu_var(last_jiffy) = get_tb(); snapshot_purr(); +#ifdef CONFIG_GENERIC_CLOCKEVENTS + register_decrementer(); +#endif } void __delay(unsigned long loops) @@ -489,7 +570,31 @@ void timer_interrupt(struct pt_regs * re old_regs = set_irq_regs(regs); irq_enter(); +#ifdef CONFIG_GENERIC_CLOCKEVENTS +#ifdef CONFIG_PPC_MULTIPLATFORM + /* + * We must write a positive value to the decrementer to clear + * the interrupt on the IBM 970 CPU series. In periodic mode, + * this happens when the decrementer gets reloaded later, but + * in one-shot mode, we have to do it here since an event handler + * may skip loading the new value... + */ + if (per_cpu(decrementers, cpu).mode != CLOCK_EVT_MODE_PERIODIC) + set_dec(DECREMENTER_MAX); +#endif + /* + * We can't disable the decrementer, so in the period between + * CPU being marked offline and calling stop-self, it's taking + * timer interrupts... + */ + if (!cpu_is_offline(cpu)) { + struct clock_event_device *dev = &per_cpu(decrementers, cpu); + + dev->event_handler(dev); + } +#else profile_tick(CPU_PROFILING); +#endif calculate_steal_time(); #ifdef CONFIG_PPC_ISERIES @@ -505,6 +610,7 @@ void timer_interrupt(struct pt_regs * re if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000) per_cpu(last_jiffy, cpu) -= 1000000000; +#ifndef CONFIG_GENERIC_CLOCKEVENTS /* * We cannot disable the decrementer, so in the period * between this cpu's being marked offline in cpu_online_map @@ -514,6 +620,7 @@ void timer_interrupt(struct pt_regs * re */ if (!cpu_is_offline(cpu)) account_process_time(regs); +#endif /* * No need to check whether cpu is offline here; boot_cpuid @@ -526,15 +633,19 @@ void timer_interrupt(struct pt_regs * re tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { tb_last_jiffy = tb_next_jiffy; +#ifndef CONFIG_GENERIC_CLOCKEVENTS do_timer(1); +#endif /*timer_recalc_offset(tb_last_jiffy);*/ timer_check_rtc(); } write_sequnlock(&xtime_lock); } - + +#ifndef CONFIG_GENERIC_CLOCKEVENTS next_dec = tb_ticks_per_jiffy - ticks; set_dec(next_dec); +#endif #ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) @@ -769,8 +880,19 @@ void __init time_init(void) /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ boot_tb = get_tb(); +#ifdef CONFIG_GENERIC_CLOCKEVENTS + decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC, + decrementer_clockevent.shift); + decrementer_clockevent.max_delta_ns = + clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); + decrementer_clockevent.min_delta_ns = + clockevent_delta2ns(0xf, &decrementer_clockevent); + + register_decrementer(); +#else /* Not exact, but the timer interrupt takes care of this */ set_dec(tb_ticks_per_jiffy); +#endif } #define FEBRUARY 2 patches/ich-force-hpet-add-ich7_0-pciid-to-quirk-list.patch0000664000077200007720000000227210655544570022762 0ustar mingomingoFrom: Venki Pallipadi Add another PCI ID for ICH7 force hpet. Signed-off-by: Venkatesh Pallipadi Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andi Kleen Cc: john stultz Cc: Greg KH Signed-off-by: Andrew Morton --- arch/i386/kernel/quirks.c | 2 ++ 1 file changed, 2 insertions(+) Index: linux/arch/i386/kernel/quirks.c =================================================================== --- linux.orig/arch/i386/kernel/quirks.c +++ linux/arch/i386/kernel/quirks.c @@ -149,6 +149,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, + ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, patches/rt-apis.patch0000664000077200007720000000621410655544573014142 0ustar mingomingo add new, -rt specific IRQ API variants. Maps to the same as before on non-PREEMPT_RT. include/linux/bottom_half.h | 8 ++++++++ include/linux/interrupt.h | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/include/linux/bottom_half.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/bottom_half.h +++ linux-rt-rebase.q/include/linux/bottom_half.h @@ -1,9 +1,17 @@ #ifndef _LINUX_BH_H #define _LINUX_BH_H +#ifdef CONFIG_PREEMPT_RT +# define local_bh_disable() do { } while (0) +# define __local_bh_disable(ip) do { } while (0) +# define _local_bh_enable() do { } while (0) +# define local_bh_enable() do { } while (0) +# define local_bh_enable_ip(ip) do { } while (0) +#else extern void local_bh_disable(void); extern void _local_bh_enable(void); extern void local_bh_enable(void); extern void local_bh_enable_ip(unsigned long ip); +#endif #endif /* _LINUX_BH_H */ Index: linux-rt-rebase.q/include/linux/interrupt.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/interrupt.h +++ linux-rt-rebase.q/include/linux/interrupt.h @@ -119,7 +119,7 @@ extern void devm_free_irq(struct device #ifdef CONFIG_LOCKDEP # define local_irq_enable_in_hardirq() do { } while (0) #else -# define local_irq_enable_in_hardirq() local_irq_enable() +# define local_irq_enable_in_hardirq() local_irq_enable_nort() #endif extern void disable_irq_nosync(unsigned int irq); @@ -478,4 +478,37 @@ static inline void init_irq_proc(void) } #endif +#ifdef CONFIG_PREEMPT_RT +# define local_irq_disable_nort() do { } while (0) +# define local_irq_enable_nort() do { } while (0) +# define local_irq_enable_rt() local_irq_enable() +# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0) +# define local_irq_restore_nort(flags) do { (void)(flags); } while (0) +# define spin_lock_nort(lock) do { } while (0) +# define spin_unlock_nort(lock) do { } while (0) +# define spin_lock_bh_nort(lock) do { } while (0) +# define spin_unlock_bh_nort(lock) do { } while (0) +# define spin_lock_rt(lock) spin_lock(lock) +# define spin_unlock_rt(lock) spin_unlock(lock) +# define smp_processor_id_rt(cpu) (cpu) +# define in_atomic_rt() (!oops_in_progress && \ + (in_atomic() || irqs_disabled())) +# define read_trylock_rt(lock) ({read_lock(lock); 1; }) +#else +# define local_irq_disable_nort() local_irq_disable() +# define local_irq_enable_nort() local_irq_enable() +# define local_irq_enable_rt() do { } while (0) +# define local_irq_save_nort(flags) local_irq_save(flags) +# define local_irq_restore_nort(flags) local_irq_restore(flags) +# define spin_lock_rt(lock) do { } while (0) +# define spin_unlock_rt(lock) do { } while (0) +# define spin_lock_nort(lock) spin_lock(lock) +# define spin_unlock_nort(lock) spin_unlock(lock) +# define spin_lock_bh_nort(lock) spin_lock_bh(lock) +# define spin_unlock_bh_nort(lock) spin_unlock_bh(lock) +# define smp_processor_id_rt(cpu) smp_processor_id() +# define in_atomic_rt() 0 +# define read_trylock_rt(lock) read_trylock(lock) +#endif + #endif patches/disable-lpptest-on-nonlinux.patch0000664000077200007720000000262110655544577020143 0ustar mingomingo Sadly people keep wanting to build kernels on non-Linux hosts (cygwin & solaris) and testlpp really doesn't like to build on those. I have a separate patch to testlpp.c that fixes this, but it really makes no sense to build the tool to run on your cygwin host as it's meant to be run on Linux with the testlpp module loaded. Even this patch isn't really the right solution b/c you really want to cross-build the may be cross-building for another architecture from Linux you want cross-compile, not host compile but there's no really easy way to cross-compile a userland binary from the kernel build w/o some makefile uglyiness AFAICT. Is there some sort of -rt userland package this could move to instead of being in the kernel itself...? Signed-off-by: Deepak Saxena --- scripts/Makefile | 3 +++ 1 file changed, 3 insertions(+) Index: linux-rt-rebase.q/scripts/Makefile =================================================================== --- linux-rt-rebase.q.orig/scripts/Makefile +++ linux-rt-rebase.q/scripts/Makefile @@ -13,9 +13,12 @@ hostprogs-$(CONFIG_LOGO) += pnmt hostprogs-$(CONFIG_VT) += conmakehash hostprogs-$(CONFIG_PROM_CONSOLE) += conmakehash hostprogs-$(CONFIG_IKCONFIG) += bin2c +HOST_OS := $(shell uname) +ifeq ($(HOST_OS),Linux) ifdef CONFIG_LPPTEST hostprogs-y += testlpp endif +endif always := $(hostprogs-y) $(hostprogs-m) patches/genirq-soft-resend.patch0000664000077200007720000000345010655544577016302 0ustar mingomingoSubject: x86: activate HARDIRQS_SW_RESEND From: Ingo Molnar activate the software-triggered IRQ-resend logic. it appears some chipsets/cpus do not handle local-APIC driven IRQ resends all that well, so always use the soft mechanism to trigger the execution of pending interrupts. Signed-off-by: Ingo Molnar --- arch/i386/Kconfig | 4 ++++ arch/x86_64/Kconfig | 4 ++++ kernel/irq/manage.c | 8 ++++++++ 3 files changed, 16 insertions(+) Index: linux-rt-rebase.q/arch/i386/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/i386/Kconfig +++ linux-rt-rebase.q/arch/i386/Kconfig @@ -1283,6 +1283,10 @@ config GENERIC_PENDING_IRQ depends on GENERIC_HARDIRQS && SMP default y +config HARDIRQS_SW_RESEND + bool + default y + config X86_SMP bool depends on SMP && !X86_VOYAGER Index: linux-rt-rebase.q/arch/x86_64/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/Kconfig +++ linux-rt-rebase.q/arch/x86_64/Kconfig @@ -721,6 +721,10 @@ config GENERIC_PENDING_IRQ depends on GENERIC_HARDIRQS && SMP default y +config HARDIRQS_SW_RESEND + bool + default y + menu "Power management options" source kernel/power/Kconfig Index: linux-rt-rebase.q/kernel/irq/manage.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/manage.c +++ linux-rt-rebase.q/kernel/irq/manage.c @@ -175,6 +175,14 @@ void enable_irq(unsigned int irq) desc->depth--; } spin_unlock_irqrestore(&desc->lock, flags); +#ifdef CONFIG_HARDIRQS_SW_RESEND + /* + * Do a bh disable/enable pair to trigger any pending + * irq resend logic: + */ + local_bh_disable(); + local_bh_enable(); +#endif } EXPORT_SYMBOL(enable_irq); patches/x86_64-apic-remove-nested-irq-disable.patch0000664000077200007720000000326610655544570021401 0ustar mingomingoSubject: x86_64: remove nested irq disables setup_APIC_timer disables interrupts anyway. So no need to do the same in setup_boot_APIC_clock and setup_secondary_APIC_clock. Disable interrupts explicit in the calibration code. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -827,6 +827,9 @@ static void __init calibrate_APIC_clock( unsigned apic, apic_start; unsigned long tsc, tsc_start; int result; + + local_irq_disable(); + /* * Put whatever arbitrary (but long enough) timeout * value into the APIC clock, we just want to get the @@ -856,6 +859,9 @@ static void __init calibrate_APIC_clock( result = (apic_start - apic) * 1000L * tsc_khz / (tsc - tsc_start); } + + local_irq_enable(); + printk(KERN_DEBUG "APIC timer calibration result %d\n", result); printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", @@ -874,22 +880,16 @@ void __init setup_boot_APIC_clock (void) printk(KERN_INFO "Using local APIC timer interrupts.\n"); using_apic_timer = 1; - local_irq_disable(); - calibrate_APIC_clock(); /* * Now set up the timer for real. */ setup_APIC_timer(); - - local_irq_enable(); } void __cpuinit setup_secondary_APIC_clock(void) { - local_irq_disable(); /* FIXME: Do we need this? --RR */ setup_APIC_timer(); - local_irq_enable(); } void disable_APIC_timer(void) patches/x86_64-cleanup-apic-c.patch0000664000077200007720000000240610655544570016274 0ustar mingomingoSubject: x86_64: cleanup apic.c after clock events switch Make variables static. Signed-off-by: Chris Wright Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 6 +++--- include/asm-x86_64/apic.h | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -40,10 +40,10 @@ #include #include -int apic_mapped; int apic_verbosity; -int apic_calibrate_pmtmr __initdata; -int disable_apic_timer __initdata; +static int apic_mapped; +static int apic_calibrate_pmtmr __initdata; +static int disable_apic_timer __initdata; /* Local APIC timer works in C2? */ int local_apic_timer_c2_ok; Index: linux/include/asm-x86_64/apic.h =================================================================== --- linux.orig/include/asm-x86_64/apic.h +++ linux/include/asm-x86_64/apic.h @@ -19,7 +19,6 @@ extern int apic_verbosity; extern int apic_runs_main_timer; extern int ioapic_force; -extern int apic_mapped; /* * Define the default level of output to be very little patches/preempt-irqs-ppc-preempt-schedule-irq-entry-fix.patch0000664000077200007720000001046410655544573023755 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Tue May 22 13:47:39 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=UNPARSEABLE_RELAY autolearn=unavailable version=3.1.7-deb Received: from inet-tsb5.toshiba.co.jp (inet-tsb5.toshiba.co.jp [202.33.96.24]) by mail.tglx.de (Postfix) with ESMTP id 57F7E65C065 for ; Tue, 22 May 2007 13:47:39 +0200 (CEST) Received: from tsb-wall.toshiba.co.jp ([133.199.160.134]) by inet-tsb5.toshiba.co.jp with ESMTP id l4MBlERT003242; Tue, 22 May 2007 20:47:14 +0900 (JST) Received: (from root@localhost) by tsb-wall.toshiba.co.jp id l4MBlEQK014361; Tue, 22 May 2007 20:47:14 +0900 (JST) Received: from ovp1.toshiba.co.jp [133.199.192.124] by tsb-wall.toshiba.co.jp with ESMTP id WAA14360; Tue, 22 May 2007 20:47:14 +0900 Received: from mx2.toshiba.co.jp (localhost [127.0.0.1]) by ovp1.toshiba.co.jp with ESMTP id l4MBlEDs007674; Tue, 22 May 2007 20:47:14 +0900 (JST) Received: from rdcgw.rdc.toshiba.co.jp by toshiba.co.jp id l4MBlDm9015993; Tue, 22 May 2007 20:47:13 +0900 (JST) Received: from island.swc.toshiba.co.jp by rdcgw.rdc.toshiba.co.jp (8.8.8p2+Sun/3.7W) with ESMTP id UAA17003; Tue, 22 May 2007 20:47:13 +0900 (JST) Received: from forest.toshiba.co.jp (forest [133.196.122.2]) by island.swc.toshiba.co.jp (Postfix) with ESMTP id 6A26B40002; Tue, 22 May 2007 20:47:13 +0900 (JST) Date: Tue, 22 May 2007 20:47:13 +0900 Message-ID: From: Tsutomu OWA To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: [PATCH] powerpc 2.6.21-rt6: replace preempt_schedule w/ preempt_schedule_irq User-Agent: Wanderlust/2.8.1 (Something) Emacs/20.7 Mule/4.0 (HANANOEN) Organization: Software Engineering Center, TOSHIBA. MIME-Version: 1.0 (generated by SEMI 1.14.4 - "Hosorogi") Content-Type: text/plain; charset=US-ASCII X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Hi Ingo and Thomas, Please apply. Replace preempt_schedule() w/ preempt_schedule_irq() in irq return path, to avoid irq-entry recursion and stack overflow problems for powerpc64. It hits when doing netperf from another machine to the machine running rt kernel. This patch applies on top of linux-2.6.21 + patch-2.6.21-rt6. Compile, boot and netperf tested on celleb. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ~ $ uname -a Linux Linux 2.6.21-rt6 #1 SMP PREEMPT RT Tue May 22 19:18:00 JST 2007 ppc64 unkn own ~ $ Unable to handle kernel paging request for data at address 0xc0000180004cd9b 0 Faulting instruction address: 0xc00000000003da48 cpu 0x0: Vector: 300 (Data Access) at [c00000000fffba00] pc: c00000000003da48: .resched_task+0x34/0xc4 lr: c0000000000410b4: .try_to_wake_up+0x4cc/0x5a8 sp: c00000000fffbc80 msr: 9000000000001032 dar: c0000180004cd9b0 dsisr: 40000000 current = 0xc00000000244ed20 paca = 0xc0000000004cd980 pid = 425, comm = netserver enter ? for help [c00000000fffbd00] c0000000000410b4 .try_to_wake_up+0x4cc/0x5a8 [c00000000fffbde0] c0000000000880c8 .redirect_hardirq+0x68/0x88 [c00000000fffbe60] c00000000008aec8 .handle_level_irq+0x13c/0x220 [c00000000fffbf00] c000000000032538 .spider_irq_cascade+0x98/0xec [c00000000fffbf90] c000000000022280 .call_handle_irq+0x1c/0x2c [c0000000025abea0] c00000000000c33c .do_IRQ+0xc8/0x17c [c0000000025abf30] c00000000000444c hardware_interrupt_entry+0x18/0x4c --- arch/powerpc/kernel/entry_64.S | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/kernel/entry_64.S =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/entry_64.S +++ linux-rt-rebase.q/arch/powerpc/kernel/entry_64.S @@ -560,14 +560,9 @@ do_work: cmpdi r0,0 crandc eq,cr1*4+eq,eq bne restore - /* here we are preempting the current task */ 1: - li r0,1 - stb r0,PACASOFTIRQEN(r13) - stb r0,PACAHARDIRQEN(r13) - ori r10,r10,MSR_EE - mtmsrd r10,1 /* reenable interrupts */ - bl .preempt_schedule + /* preempt_schedule_irq() expects interrupts disabled. */ + bl .preempt_schedule_irq mfmsr r10 clrrdi r9,r1,THREAD_SHIFT rldicl r10,r10,48,1 /* disable interrupts again */ patches/quicklist-release-before-free-page-fix.patch0000664000077200007720000000541710655544577022076 0ustar mingomingo--- include/linux/quicklist.h | 19 ++++++++----------- mm/quicklist.c | 8 ++------ 2 files changed, 10 insertions(+), 17 deletions(-) Index: linux-rt-rebase.q/include/linux/quicklist.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/quicklist.h +++ linux-rt-rebase.q/include/linux/quicklist.h @@ -30,13 +30,10 @@ DECLARE_PER_CPU_LOCKED(struct quicklist, * The fast patch in quicklist_alloc touched only a per cpu cacheline and * the first cacheline of the page itself. There is minmal overhead involved. */ -static inline void *__quicklist_alloc(int cpu, int nr, gfp_t flags, void (*ctor)(void *)) +static inline void *__quicklist_alloc(struct quicklist *q) { - struct quicklist *q; - void **p = NULL; + void **p = q->page; - q = &__get_cpu_var_locked(quicklist, cpu)[nr]; - p = q->page; if (likely(p)) { q->page = p[0]; p[0] = NULL; @@ -48,11 +45,11 @@ static inline void *__quicklist_alloc(in static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) { struct quicklist *q; - void **p = NULL; + void **p; int cpu; - (void)get_cpu_var_locked(quicklist, &cpu)[nr]; - p = __quicklist_alloc(cpu, nr, flags, ctor); + q = &get_cpu_var_locked(quicklist, &cpu)[nr]; + p = __quicklist_alloc(q); put_cpu_var_locked(quicklist, cpu); if (likely(p)) return p; @@ -67,7 +64,7 @@ static inline void __quicklist_free(int struct page *page) { struct quicklist *q; - int nid = page_to_nid(page); + int cpu, nid = page_to_nid(page); if (unlikely(nid != numa_node_id())) { if (dtor) @@ -76,11 +73,11 @@ static inline void __quicklist_free(int return; } - q = &get_cpu_var(quicklist)[nr]; + q = &get_cpu_var_locked(quicklist, &cpu)[nr]; *(void **)p = q->page; q->page = p; q->nr_pages++; - put_cpu_var(quicklist); + put_cpu_var_locked(quicklist, cpu); } static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) Index: linux-rt-rebase.q/mm/quicklist.c =================================================================== --- linux-rt-rebase.q.orig/mm/quicklist.c +++ linux-rt-rebase.q/mm/quicklist.c @@ -58,11 +58,7 @@ void quicklist_trim(int nr, void (*dtor) pages_to_free = min_pages_to_free(q, min_pages, max_free); while (pages_to_free > 0) { - /* - * We pass a gfp_t of 0 to quicklist_alloc here - * because we will never call into the page allocator. - */ - void *p = __quicklist_alloc(cpu, nr, 0, NULL); + void *p = __quicklist_alloc(q); if (dtor) dtor(p); @@ -80,7 +76,7 @@ unsigned long quicklist_total_size(void) struct quicklist *ql, *q; for_each_online_cpu(cpu) { - ql = per_cpu(quicklist, cpu); + ql = per_cpu_var_locked(quicklist, cpu); for (q = ql; q < ql + CONFIG_NR_QUICK; q++) count += q->nr_pages; } patches/ppc-a-2.patch0000664000077200007720000000162710655544572013724 0ustar mingomingo To fix the following compile error by replacing the deleted structure member "is_continuous" with "flags". - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - arch/powerpc/kernel/time.c arch/powerpc/kernel/time.c:938: error: unknown field 'is_continuous' specified in initializer - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Signed-off-by: Tsutomu Owa -- owa --- arch/powerpc/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -940,7 +940,7 @@ struct clocksource clocksource_timebase .mask = (cycle_t)-1, .mult = 0, .shift = 22, - .is_continuous = 1, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; patches/rt-mutex-i386.patch0000664000077200007720000005572110655544573015046 0ustar mingomingo--- arch/i386/Kconfig.cpu | 8 +++- arch/i386/kernel/apm.c | 2 - arch/i386/kernel/entry.S | 4 +- arch/i386/kernel/i386_ksyms.c | 12 +++--- arch/i386/kernel/process.c | 10 ++--- arch/i386/lib/semaphore.S | 24 ++++++------- include/asm-i386/rwsem.h | 41 +++++++++++------------ include/asm-i386/semaphore.h | 67 ++++++++++++++++++++++---------------- include/asm-i386/spinlock.h | 36 ++++++++++---------- include/asm-i386/spinlock_types.h | 4 +- include/asm-i386/thread_info.h | 3 + 11 files changed, 117 insertions(+), 94 deletions(-) Index: linux-rt-rebase.q/arch/i386/Kconfig.cpu =================================================================== --- linux-rt-rebase.q.orig/arch/i386/Kconfig.cpu +++ linux-rt-rebase.q/arch/i386/Kconfig.cpu @@ -247,12 +247,16 @@ config X86_XADD config RWSEM_GENERIC_SPINLOCK bool - depends on !X86_XADD + depends on !X86_XADD || PREEMPT_RT + default y + +config ASM_SEMAPHORES + bool default y config RWSEM_XCHGADD_ALGORITHM bool - depends on X86_XADD + depends on X86_XADD && !RWSEM_GENERIC_SPINLOCK default y config ARCH_HAS_ILOG2_U32 Index: linux-rt-rebase.q/arch/i386/kernel/apm.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/apm.c +++ linux-rt-rebase.q/arch/i386/kernel/apm.c @@ -783,7 +783,7 @@ static int apm_do_idle(void) */ smp_mb(); } - if (!need_resched()) { + if (!need_resched() && !need_resched_delayed()) { idled = 1; ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); } Index: linux-rt-rebase.q/arch/i386/kernel/entry.S =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/entry.S +++ linux-rt-rebase.q/arch/i386/kernel/entry.S @@ -478,7 +478,7 @@ ENDPROC(system_call) ALIGN RING0_PTREGS_FRAME # can't unwind into user space anyway work_pending: - testb $_TIF_NEED_RESCHED, %cl + testl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED), %ecx jz work_notifysig work_resched: call schedule @@ -490,7 +490,7 @@ work_resched: andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? jz restore_all - testb $_TIF_NEED_RESCHED, %cl + testl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED), %ecx jnz work_resched work_notifysig: # deal with pending signals and Index: linux-rt-rebase.q/arch/i386/kernel/i386_ksyms.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/i386_ksyms.c +++ linux-rt-rebase.q/arch/i386/kernel/i386_ksyms.c @@ -2,10 +2,12 @@ #include #include -EXPORT_SYMBOL(__down_failed); -EXPORT_SYMBOL(__down_failed_interruptible); -EXPORT_SYMBOL(__down_failed_trylock); -EXPORT_SYMBOL(__up_wakeup); +#ifdef CONFIG_ASM_SEMAPHORES +EXPORT_SYMBOL(__compat_down_failed); +EXPORT_SYMBOL(__compat_down_failed_interruptible); +EXPORT_SYMBOL(__compat_down_failed_trylock); +EXPORT_SYMBOL(__compat_up_wakeup); +#endif /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_generic); @@ -20,7 +22,7 @@ EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(strstr); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_ASM_SEMAPHORES) extern void FASTCALL( __write_lock_failed(rwlock_t *rw)); extern void FASTCALL( __read_lock_failed(rwlock_t *rw)); EXPORT_SYMBOL(__write_lock_failed); Index: linux-rt-rebase.q/arch/i386/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/process.c +++ linux-rt-rebase.q/arch/i386/kernel/process.c @@ -115,7 +115,7 @@ void default_idle(void) smp_mb(); local_irq_disable(); - if (!need_resched()) + if (!need_resched() && !need_resched_delayed()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); @@ -179,7 +179,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - while (!need_resched()) { + while (!need_resched() && !need_resched_delayed()) { void (*idle)(void); if (__get_cpu_var(cpu_idle_state)) @@ -201,7 +201,7 @@ void cpu_idle(void) } trace_preempt_exit_idle(); tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); schedule(); preempt_disable(); trace_preempt_enter_idle(); @@ -250,10 +250,10 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); */ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { - if (!need_resched()) { + if (!need_resched() && !need_resched_delayed()) { __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); - if (!need_resched()) + if (!need_resched() && !need_resched_delayed()) __mwait(eax, ecx); } } Index: linux-rt-rebase.q/arch/i386/lib/semaphore.S =================================================================== --- linux-rt-rebase.q.orig/arch/i386/lib/semaphore.S +++ linux-rt-rebase.q/arch/i386/lib/semaphore.S @@ -30,7 +30,7 @@ * value or just clobbered.. */ .section .sched.text -ENTRY(__down_failed) +ENTRY(__compat_down_failed) CFI_STARTPROC FRAME pushl %edx @@ -39,7 +39,7 @@ ENTRY(__down_failed) pushl %ecx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ecx,0 - call __down + call __compat_down popl %ecx CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE ecx @@ -49,9 +49,9 @@ ENTRY(__down_failed) ENDFRAME ret CFI_ENDPROC - END(__down_failed) + END(__compat_down_failed) -ENTRY(__down_failed_interruptible) +ENTRY(__compat_down_failed_interruptible) CFI_STARTPROC FRAME pushl %edx @@ -60,7 +60,7 @@ ENTRY(__down_failed_interruptible) pushl %ecx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ecx,0 - call __down_interruptible + call __compat_down_interruptible popl %ecx CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE ecx @@ -70,9 +70,9 @@ ENTRY(__down_failed_interruptible) ENDFRAME ret CFI_ENDPROC - END(__down_failed_interruptible) + END(__compat_down_failed_interruptible) -ENTRY(__down_failed_trylock) +ENTRY(__compat_down_failed_trylock) CFI_STARTPROC FRAME pushl %edx @@ -81,7 +81,7 @@ ENTRY(__down_failed_trylock) pushl %ecx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ecx,0 - call __down_trylock + call __compat_down_trylock popl %ecx CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE ecx @@ -91,9 +91,9 @@ ENTRY(__down_failed_trylock) ENDFRAME ret CFI_ENDPROC - END(__down_failed_trylock) + END(__compat_down_failed_trylock) -ENTRY(__up_wakeup) +ENTRY(__compat_up_wakeup) CFI_STARTPROC FRAME pushl %edx @@ -102,7 +102,7 @@ ENTRY(__up_wakeup) pushl %ecx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ecx,0 - call __up + call __compat_up popl %ecx CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE ecx @@ -112,7 +112,7 @@ ENTRY(__up_wakeup) ENDFRAME ret CFI_ENDPROC - END(__up_wakeup) + END(__compat_up_wakeup) /* * rw spinlock fallbacks Index: linux-rt-rebase.q/include/asm-i386/rwsem.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/rwsem.h +++ linux-rt-rebase.q/include/asm-i386/rwsem.h @@ -44,15 +44,15 @@ struct rwsem_waiter; -extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); -extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); +extern struct compat_rw_semaphore *FASTCALL(rwsem_down_read_failed(struct compat_rw_semaphore *sem)); +extern struct compat_rw_semaphore *FASTCALL(rwsem_down_write_failed(struct compat_rw_semaphore *sem)); +extern struct compat_rw_semaphore *FASTCALL(rwsem_wake(struct compat_rw_semaphore *)); +extern struct compat_rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct compat_rw_semaphore *sem)); /* * the semaphore definition */ -struct rw_semaphore { +struct compat_rw_semaphore { signed long count; #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 @@ -78,23 +78,23 @@ struct rw_semaphore { { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define COMPAT_DECLARE_RWSEM(name) \ + struct compat_rw_semaphore name = __RWSEM_INITIALIZER(name) -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, +extern void __compat_init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key); -#define init_rwsem(sem) \ +#define compat_init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __compat_init_rwsem((sem), #sem, &__key); \ } while (0) /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct compat_rw_semaphore *sem) { __asm__ __volatile__( "# beginning down_read\n\t" @@ -111,7 +111,7 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* /* * trylock for reading -- returns 1 if successful, 0 if contention */ -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct compat_rw_semaphore *sem) { __s32 result, tmp; __asm__ __volatile__( @@ -134,7 +134,8 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t" /* * lock for writing */ -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void +__down_write_nested(struct compat_rw_semaphore *sem, int subclass) { int tmp; @@ -160,7 +161,7 @@ static inline void __down_write(struct r /* * trylock for writing -- returns 1 if successful, 0 if contention */ -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct compat_rw_semaphore *sem) { signed long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, @@ -173,7 +174,7 @@ static inline int __down_write_trylock(s /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct compat_rw_semaphore *sem) { __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; __asm__ __volatile__( @@ -191,7 +192,7 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct compat_rw_semaphore *sem) { __asm__ __volatile__( "# beginning __up_write\n\t" @@ -209,7 +210,7 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct compat_rw_semaphore *sem) { __asm__ __volatile__( "# beginning __downgrade_write\n\t" @@ -226,7 +227,7 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct compat_rw_semaphore *sem) { __asm__ __volatile__( LOCK_PREFIX "addl %1,%0" @@ -237,7 +238,7 @@ LOCK_PREFIX "addl %1,%0" /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct compat_rw_semaphore *sem) { int tmp = delta; @@ -249,7 +250,7 @@ LOCK_PREFIX "xadd %0,%1" return tmp+delta; } -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline int compat_rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); } Index: linux-rt-rebase.q/include/asm-i386/semaphore.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/semaphore.h +++ linux-rt-rebase.q/include/asm-i386/semaphore.h @@ -3,8 +3,6 @@ #include -#ifdef __KERNEL__ - /* * SMP- and interrupt-safe semaphores.. * @@ -41,30 +39,40 @@ #include #include -struct semaphore { +/* + * On !PREEMPT_RT all semaphores are compat: + */ +#ifndef CONFIG_PREEMPT_RT +# define compat_semaphore semaphore +#endif + +struct compat_semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; }; -#define __SEMAPHORE_INITIALIZER(name, n) \ +#define __COMPAT_SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .sleepers = 0, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) +#define __COMPAT_MUTEX_INITIALIZER(name) \ + __COMPAT_SEMAPHORE_INITIALIZER(name,1) -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) +#define __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct compat_semaphore name = __COMPAT_SEMAPHORE_INITIALIZER(name,count) -static inline void sema_init (struct semaphore *sem, int val) +#define COMPAT_DECLARE_MUTEX(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,1) +#define COMPAT_DECLARE_MUTEX_LOCKED(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,0) + +static inline void compat_sema_init (struct compat_semaphore *sem, int val) { /* - * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); + * *sem = (struct compat_semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * * i'd rather use the more flexible initialization above, but sadly * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. @@ -74,27 +82,27 @@ static inline void sema_init (struct sem init_waitqueue_head(&sem->wait); } -static inline void init_MUTEX (struct semaphore *sem) +static inline void compat_init_MUTEX (struct compat_semaphore *sem) { - sema_init(sem, 1); + compat_sema_init(sem, 1); } -static inline void init_MUTEX_LOCKED (struct semaphore *sem) +static inline void compat_init_MUTEX_LOCKED (struct compat_semaphore *sem) { - sema_init(sem, 0); + compat_sema_init(sem, 0); } -fastcall void __down_failed(void /* special register calling convention */); -fastcall int __down_failed_interruptible(void /* params in registers */); -fastcall int __down_failed_trylock(void /* params in registers */); -fastcall void __up_wakeup(void /* special register calling convention */); +fastcall void __compat_down_failed(void /* special register calling convention */); +fastcall int __compat_down_failed_interruptible(void /* params in registers */); +fastcall int __compat_down_failed_trylock(void /* params in registers */); +fastcall void __compat_up_wakeup(void /* special register calling convention */); /* * This is ugly, but we want the default case to fall through. * "__down_failed" is a special asm handler that calls the C * routine that actually waits. See arch/i386/kernel/semaphore.c */ -static inline void down(struct semaphore * sem) +static inline void compat_down(struct compat_semaphore * sem) { might_sleep(); __asm__ __volatile__( @@ -102,7 +110,7 @@ static inline void down(struct semaphore LOCK_PREFIX "decl %0\n\t" /* --sem->count */ "jns 2f\n" "\tlea %0,%%eax\n\t" - "call __down_failed\n" + "call __compat_down_failed\n" "2:" :"+m" (sem->count) : @@ -113,7 +121,7 @@ static inline void down(struct semaphore * Interruptible try to acquire a semaphore. If we obtained * it, return zero. If we were interrupted, returns -EINTR */ -static inline int down_interruptible(struct semaphore * sem) +static inline int compat_down_interruptible(struct compat_semaphore * sem) { int result; @@ -124,7 +132,7 @@ static inline int down_interruptible(str LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "jns 2f\n\t" "lea %1,%%eax\n\t" - "call __down_failed_interruptible\n" + "call __compat_down_failed_interruptible\n" "2:" :"=&a" (result), "+m" (sem->count) : @@ -136,7 +144,7 @@ static inline int down_interruptible(str * Non-blockingly attempt to down() a semaphore. * Returns zero if we acquired it */ -static inline int down_trylock(struct semaphore * sem) +static inline int compat_down_trylock(struct compat_semaphore * sem) { int result; @@ -146,7 +154,7 @@ static inline int down_trylock(struct se LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "jns 2f\n\t" "lea %1,%%eax\n\t" - "call __down_failed_trylock\n\t" + "call __compat_down_failed_trylock\n\t" "2:\n" :"=&a" (result), "+m" (sem->count) : @@ -158,19 +166,24 @@ static inline int down_trylock(struct se * Note! This is subtle. We jump to wake people up only if * the semaphore was negative (== somebody was waiting on it). */ -static inline void up(struct semaphore * sem) +static inline void compat_up(struct compat_semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ "jg 1f\n\t" "lea %0,%%eax\n\t" - "call __up_wakeup\n" + "call __compat_up_wakeup\n" "1:" :"+m" (sem->count) : :"memory","ax"); } -#endif +extern int FASTCALL(compat_sem_is_locked(struct compat_semaphore *sem)); + +#define compat_sema_count(sem) atomic_read(&(sem)->count) + +#include + #endif Index: linux-rt-rebase.q/include/asm-i386/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/spinlock.h +++ linux-rt-rebase.q/include/asm-i386/spinlock.h @@ -27,12 +27,12 @@ * (the type definitions are in asm/spinlock_types.h) */ -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int __raw_spin_is_locked(__raw_spinlock_t *x) { return *(volatile signed char *)(&(x)->slock) <= 0; } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(__raw_spinlock_t *lock) { asm volatile("\n1:\t" LOCK_PREFIX " ; decb %0\n\t" @@ -55,7 +55,7 @@ static inline void __raw_spin_lock(raw_s * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. */ #ifndef CONFIG_PROVE_LOCKING -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static inline void __raw_spin_lock_flags(__raw_spinlock_t *lock, unsigned long flags) { asm volatile( "\n1:\t" @@ -84,7 +84,7 @@ static inline void __raw_spin_lock_flags } #endif -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(__raw_spinlock_t *lock) { char oldval; asm volatile( @@ -103,14 +103,14 @@ static inline int __raw_spin_trylock(raw #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(__raw_spinlock_t *lock) { asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); } #else -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(__raw_spinlock_t *lock) { char oldval = 1; @@ -121,7 +121,7 @@ static inline void __raw_spin_unlock(raw #endif -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(__raw_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); @@ -152,7 +152,7 @@ static inline void __raw_spin_unlock_wai * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_read_can_lock(raw_rwlock_t *x) +static inline int __raw_read_can_lock(__raw_rwlock_t *x) { return (int)(x)->lock > 0; } @@ -161,12 +161,12 @@ static inline int __raw_read_can_lock(ra * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_write_can_lock(raw_rwlock_t *x) +static inline int __raw_write_can_lock(__raw_rwlock_t *x) { return (x)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" "jns 1f\n" @@ -175,7 +175,7 @@ static inline void __raw_read_lock(raw_r ::"a" (rw) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" "jz 1f\n" @@ -184,7 +184,7 @@ static inline void __raw_write_lock(raw_ ::"a" (rw) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int __raw_read_trylock(__raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); @@ -194,7 +194,7 @@ static inline int __raw_read_trylock(raw return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int __raw_write_trylock(__raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -203,19 +203,19 @@ static inline int __raw_write_trylock(ra return 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" : "+m" (rw->lock) : : "memory"); } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define __raw_spin_relax(lock) cpu_relax() +#define __raw_read_relax(lock) cpu_relax() +#define __raw_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ Index: linux-rt-rebase.q/include/asm-i386/spinlock_types.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/spinlock_types.h +++ linux-rt-rebase.q/include/asm-i386/spinlock_types.h @@ -7,13 +7,13 @@ typedef struct { unsigned int slock; -} raw_spinlock_t; +} __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } typedef struct { unsigned int lock; -} raw_rwlock_t; +} __raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } Index: linux-rt-rebase.q/include/asm-i386/thread_info.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/thread_info.h +++ linux-rt-rebase.q/include/asm-i386/thread_info.h @@ -132,15 +132,18 @@ static inline struct thread_info *curren #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ #define TIF_SECCOMP 7 /* secure computing */ #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ +#define TIF_NEED_RESCHED_DELAYED 10 /* reschedule on return to userspace */ #define TIF_MEMDIE 16 #define TIF_DEBUG 17 /* uses debug registers */ #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ #define TIF_FREEZE 19 /* is freezing for suspend */ #define TIF_NOTSC 20 /* TSC is not accessible in userland */ + #define _TIF_SYSCALL_TRACE (1< --- include/linux/mutex.h | 16 ++++++---- include/linux/rt_lock.h | 70 ++++++++++++++++++++---------------------------- 2 files changed, 39 insertions(+), 47 deletions(-) Index: linux-rt-rebase.q/include/linux/mutex.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/mutex.h +++ linux-rt-rebase.q/include/linux/mutex.h @@ -18,6 +18,13 @@ #include +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + #ifdef CONFIG_PREEMPT_RT #include @@ -29,9 +36,11 @@ struct mutex { #endif }; + #define __MUTEX_INITIALIZER(mutexname) \ { \ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ } #define DEFINE_MUTEX(mutexname) \ @@ -140,13 +149,6 @@ do { \ # define mutex_destroy(mutex) do { } while (0) #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { .name = #lockname } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ Index: linux-rt-rebase.q/include/linux/rt_lock.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/rt_lock.h +++ linux-rt-rebase.q/include/linux/rt_lock.h @@ -27,30 +27,31 @@ typedef struct { } spinlock_t; #ifdef CONFIG_DEBUG_RT_MUTEXES -# define __SPIN_LOCK_UNLOCKED(name) \ - (spinlock_t) { { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) \ - , .save_state = 1, .file = __FILE__, .line = __LINE__ }, SPIN_DEP_MAP_INIT(name) } +# define __RT_SPIN_INITIALIZER(name) \ + { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + .save_state = 1, \ + .file = __FILE__, \ + .line = __LINE__, } #else -# define __SPIN_LOCK_UNLOCKED(name) \ - (spinlock_t) { { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) }, SPIN_DEP_MAP_INIT(name) } +# define __RT_SPIN_INITIALIZER(name) \ + { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) } #endif -# define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(spin_old_style) + +#define __SPIN_LOCK_UNLOCKED(name) (spinlock_t) \ + { .lock = __RT_SPIN_INITIALIZER(name), \ + SPIN_DEP_MAP_INIT(name) } + #else /* !PREEMPT_RT */ - typedef raw_spinlock_t spinlock_t; -# ifdef CONFIG_DEBUG_SPINLOCK -# define _SPIN_LOCK_UNLOCKED \ - { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ - .magic = SPINLOCK_MAGIC, \ - .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1 } -# else -# define _SPIN_LOCK_UNLOCKED \ - { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } -# endif -# define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED -# define __SPIN_LOCK_UNLOCKED(name) _SPIN_LOCK_UNLOCKED + +typedef raw_spinlock_t spinlock_t; + +#define __SPIN_LOCK_UNLOCKED _RAW_SPIN_LOCK_UNLOCKED + #endif +#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(spin_old_style) + + #define __DEFINE_SPINLOCK(name) \ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) @@ -89,32 +90,20 @@ typedef struct { #endif } rwlock_t; -# ifdef CONFIG_DEBUG_RT_MUTEXES -# define __RW_LOCK_UNLOCKED(name) (rwlock_t) \ - { .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name), \ - .save_state = 1, .file = __FILE__, .line = __LINE__ } } -# else -# define __RW_LOCK_UNLOCKED(name) (rwlock_t) \ - { .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) } } -# endif +#define __RW_LOCK_UNLOCKED(name) (rwlock_t) \ + { .lock = __RT_SPIN_INITIALIZER(name), \ + RW_DEP_MAP_INIT(name) } #else /* !PREEMPT_RT */ - typedef raw_rwlock_t rwlock_t; -# ifdef CONFIG_DEBUG_SPINLOCK -# define _RW_LOCK_UNLOCKED \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ - .magic = RWLOCK_MAGIC, \ - .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1 } -# else -# define _RW_LOCK_UNLOCKED \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } -# endif -# define __RW_LOCK_UNLOCKED(name) _RW_LOCK_UNLOCKED +typedef raw_rwlock_t rwlock_t; + +#define __RW_LOCK_UNLOCKED _RAW_RW_LOCK_UNLOCKED + #endif #define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(rw_old_style) + #define DEFINE_RWLOCK(name) \ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) @@ -243,7 +232,8 @@ do { \ */ #define __RWSEM_INITIALIZER(name) \ - { .lock = __RT_MUTEX_INITIALIZER(name.lock) } + { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ + RW_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(lockname) \ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) patches/rt-mutex-spinlock-might-sleep.patch0000664000077200007720000000450210655544573020402 0ustar mingomingoFrom rostedt@goodmis.org Sat Jun 2 00:35:54 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=ham version=3.1.7-deb Received: from ms-smtp-01.nyroc.rr.com (ms-smtp-01.nyroc.rr.com [24.24.2.55]) by mail.tglx.de (Postfix) with ESMTP id C420E65C065 for ; Sat, 2 Jun 2007 00:35:54 +0200 (CEST) Received: from [192.168.23.10] (cpe-24-94-51-176.stny.res.rr.com [24.94.51.176]) by ms-smtp-01.nyroc.rr.com (8.13.6/8.13.6) with ESMTP id l51MZLun018065; Fri, 1 Jun 2007 18:35:24 -0400 (EDT) Subject: [PATCH RT] add might_sleep in rt_spin_lock_fastlock From: Steven Rostedt To: Ingo Molnar Cc: Thomas Gleixner , Arnaldo Carvalho de Melo , LKML Content-Type: multipart/mixed; boundary="=-jgTmng/RcFNHiVaU9w/Z" Date: Fri, 01 Jun 2007 18:35:21 -0400 Message-Id: <1180737321.21781.46.camel@localhost.localdomain> Mime-Version: 1.0 X-Mailer: Evolution 2.6.3 X-Virus-Scanned: Symantec AntiVirus Scan Engine X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ --=-jgTmng/RcFNHiVaU9w/Z Content-Type: text/plain Content-Transfer-Encoding: 8bit Ingo, Every so often we get bit by a bug "scheduling in atomic", and it comes from a rtmutex spin_lock. The bug only happens when that lock has contention, so we miss it a lot. This patch adds a might_sleep() to the rt_spin_lock_fastlock to find bugs where we can schedule in atomic. The one place that exists now is from do_page_fault and sending a signal. I wrote a simple crash program that segfaults (attached) and with this patch, I get the warning. -- Steve Signed-off-by: Steven Rostedt --- kernel/rtmutex.c | 2 ++ 1 file changed, 2 insertions(+) Index: linux-rt-rebase.q/kernel/rtmutex.c =================================================================== --- linux-rt-rebase.q.orig/kernel/rtmutex.c +++ linux-rt-rebase.q/kernel/rtmutex.c @@ -631,6 +631,8 @@ static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, void fastcall (*slowfn)(struct rt_mutex *lock)) { + might_sleep(); + if (likely(rt_mutex_cmpxchg(lock, NULL, current))) rt_mutex_deadlock_account_lock(lock, current); else patches/netpoll-8139too-fix.patch0000664000077200007720000000114210655544571016141 0ustar mingomingo--- drivers/net/8139too.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) Index: linux/drivers/net/8139too.c =================================================================== --- linux.orig/drivers/net/8139too.c +++ linux/drivers/net/8139too.c @@ -2211,7 +2211,11 @@ static irqreturn_t rtl8139_interrupt (in */ static void rtl8139_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); + /* + * use _nosync() variant - might be used by netconsole + * from atomic contexts: + */ + disable_irq_nosync(dev->irq); rtl8139_interrupt(dev->irq, dev); enable_irq(dev->irq); } patches/preempt-realtime-printk.patch0000664000077200007720000001055510655544575017351 0ustar mingomingo--- kernel/printk.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 8 deletions(-) Index: linux-rt-rebase.q/kernel/printk.c =================================================================== --- linux-rt-rebase.q.orig/kernel/printk.c +++ linux-rt-rebase.q/kernel/printk.c @@ -82,7 +82,7 @@ static int console_locked, console_suspe * It is also used in interesting ways to provide interlocking in * release_console_sem(). */ -static DEFINE_SPINLOCK(logbuf_lock); +static DEFINE_RAW_SPINLOCK(logbuf_lock); #define LOG_BUF_MASK (log_buf_len-1) #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) @@ -327,7 +327,7 @@ static void __call_console_drivers(unsig touch_critical_timing(); for (con = console_drivers; con; con = con->next) { if ((con->flags & CON_ENABLED) && con->write && - (cpu_online(smp_processor_id()) || + (cpu_online(raw_smp_processor_id()) || (con->flags & CON_ANYTIME))) { /* * Disable tracing of printk details - it just @@ -459,6 +459,7 @@ static void zap_locks(void) spin_lock_init(&logbuf_lock); /* And make sure that we print immediately */ init_MUTEX(&console_sem); + zap_rt_locks(); } #if defined(CONFIG_PRINTK_TIME) @@ -555,6 +556,7 @@ asmlinkage int vprintk(const char *fmt, lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = smp_processor_id(); + preempt_enable(); /* Emit the output into the temporary buffer */ printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args); @@ -624,6 +626,8 @@ asmlinkage int vprintk(const char *fmt, console_locked = 1; printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); + lockdep_on(); + local_irq_restore(flags); /* * Console drivers may assume that per-cpu resources have @@ -631,7 +635,7 @@ asmlinkage int vprintk(const char *fmt, * being able to cope (CON_ANYTIME) don't call them until * this CPU is officially up. */ - if (cpu_online(smp_processor_id()) || have_callable_console()) { + if (cpu_online(raw_smp_processor_id()) || have_callable_console()) { console_may_schedule = 0; release_console_sem(); } else { @@ -639,8 +643,6 @@ asmlinkage int vprintk(const char *fmt, console_locked = 0; up(&console_sem); } - lockdep_on(); - raw_local_irq_restore(flags); } else { /* * Someone else owns the drivers. We drop the spinlock, which @@ -653,7 +655,6 @@ asmlinkage int vprintk(const char *fmt, raw_local_irq_restore(flags); } - preempt_enable(); return printed_len; } EXPORT_SYMBOL(printk); @@ -865,13 +866,33 @@ void release_console_sem(void) _con_start = con_start; _log_end = log_end; con_start = log_end; /* Flush */ + /* + * on PREEMPT_RT, call console drivers with + * interrupts enabled (if printk was called + * with interrupts disabled): + */ +#ifdef CONFIG_PREEMPT_RT + spin_unlock_irqrestore(&logbuf_lock, flags); +#else spin_unlock(&logbuf_lock); +#endif call_console_drivers(_con_start, _log_end); +#ifndef CONFIG_PREEMPT_RT local_irq_restore(flags); +#endif } console_locked = 0; - up(&console_sem); spin_unlock_irqrestore(&logbuf_lock, flags); + up(&console_sem); + /* + * On PREEMPT_RT kernels __wake_up may sleep, so wake syslogd + * up only if we are in a preemptible section. We normally dont + * printk from non-preemptible sections so this is for the emergency + * case only. + */ +#ifdef CONFIG_PREEMPT_RT + if (!in_atomic() && !irqs_disabled()) +#endif if (wake_klogd) wake_up_klogd(); } @@ -1125,7 +1146,7 @@ void tty_write_message(struct tty_struct */ int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst) { - static DEFINE_SPINLOCK(ratelimit_lock); + static DEFINE_RAW_SPINLOCK(ratelimit_lock); static unsigned long toks = 10 * 5 * HZ; static unsigned long last_msg; static int missed; @@ -1166,6 +1187,23 @@ int printk_ratelimit(void) } EXPORT_SYMBOL(printk_ratelimit); +static DEFINE_RAW_SPINLOCK(warn_lock); + +void __WARN_ON(const char *func, const char *file, const int line) +{ + unsigned long flags; + + spin_lock_irqsave(&warn_lock, flags); + printk("%s/%d[CPU#%d]: BUG in %s at %s:%d\n", + current->comm, current->pid, raw_smp_processor_id(), + func, file, line); + dump_stack(); + spin_unlock_irqrestore(&warn_lock, flags); +} + +EXPORT_SYMBOL(__WARN_ON); + + /** * printk_timed_ratelimit - caller-controlled printk ratelimiting * @caller_jiffies: pointer to caller's state patches/rt-mutex-delayed-resched.patch0000664000077200007720000001130410655544573017364 0ustar mingomingo--- drivers/acpi/processor_idle.c | 6 +++--- include/linux/preempt.h | 16 ++++++++++++++++ include/linux/sched.h | 23 ++++++++++++++++++++++- kernel/sched.c | 10 +++++++--- 4 files changed, 48 insertions(+), 7 deletions(-) Index: linux-rt-rebase.q/drivers/acpi/processor_idle.c =================================================================== --- linux-rt-rebase.q.orig/drivers/acpi/processor_idle.c +++ linux-rt-rebase.q/drivers/acpi/processor_idle.c @@ -885,7 +885,7 @@ static int acpi_idle_enter_c1(struct cpu * NEED_RESCHED: */ smp_mb(); - if (!need_resched()) + if (!need_resched() || !need_resched_delayed()) safe_halt(); current_thread_info()->status |= TS_POLLING; @@ -921,7 +921,7 @@ static int acpi_idle_enter_c2(struct cpu */ smp_mb(); - if (unlikely(need_resched())) { + if (unlikely(need_resched() || need_resched_delayed())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return 0; @@ -979,7 +979,7 @@ static int acpi_idle_enter_c3(struct cpu */ smp_mb(); - if (unlikely(need_resched())) { + if (unlikely(need_resched() || need_resched_delayed())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return 0; Index: linux-rt-rebase.q/include/linux/preempt.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/preempt.h +++ linux-rt-rebase.q/include/linux/preempt.h @@ -68,6 +68,21 @@ do { \ preempt_schedule(); \ } while (0) + +/* + * If the architecture doens't have TIF_NEED_RESCHED_DELAYED + * help it out and define it back to TIF_NEED_RESCHED + */ +#ifndef TIF_NEED_RESCHED_DELAYED +# define TIF_NEED_RESCHED_DELAYED TIF_NEED_RESCHED +#endif + +#define preempt_check_resched_delayed() \ +do { \ + if (unlikely(test_thread_flag(TIF_NEED_RESCHED_DELAYED))) \ + preempt_schedule(); \ +} while (0) + #define preempt_enable() \ do { \ __preempt_enable_no_resched(); \ @@ -82,6 +97,7 @@ do { \ #define __preempt_enable_no_resched() do { } while (0) #define preempt_enable() do { } while (0) #define preempt_check_resched() do { } while (0) +#define preempt_check_resched_delayed() do { } while (0) #define preempt_schedule_irq() do { } while (0) Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -1880,11 +1880,32 @@ static inline int signal_pending(struct return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); } -static inline int need_resched(void) +static inline int _need_resched(void) { return unlikely(test_thread_flag(TIF_NEED_RESCHED)); } +static inline int need_resched(void) +{ + touch_critical_timing(); + return _need_resched(); +} + +static inline void set_tsk_need_resched_delayed(struct task_struct *tsk) +{ + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_DELAYED); +} + +static inline void clear_tsk_need_resched_delayed(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_DELAYED); +} + +static inline int need_resched_delayed(void) +{ + return unlikely(test_thread_flag(TIF_NEED_RESCHED_DELAYED)); +} + /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -3414,6 +3414,7 @@ need_resched_nonpreemptible: spin_lock_irq(&rq->lock); clear_tsk_need_resched(prev); + clear_tsk_need_resched_delayed(prev); if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely((prev->state & TASK_INTERRUPTIBLE) && @@ -3451,7 +3452,8 @@ need_resched_nonpreemptible: goto need_resched_nonpreemptible; } __preempt_enable_no_resched(); - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) + if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || + test_thread_flag(TIF_NEED_RESCHED_DELAYED))) goto need_resched; } EXPORT_SYMBOL(schedule); @@ -3495,7 +3497,8 @@ need_resched: /* we could miss a preemption opportunity between schedule and now */ barrier(); - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) + if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || + test_thread_flag(TIF_NEED_RESCHED_DELAYED))) goto need_resched; } EXPORT_SYMBOL(preempt_schedule); @@ -3537,7 +3540,8 @@ need_resched: /* we could miss a preemption opportunity between schedule and now */ barrier(); - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) + if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || + test_thread_flag(TIF_NEED_RESCHED_DELAYED))) goto need_resched; } patches/rt-mutex-ppc-fix-a5.patch0000664000077200007720000000510310655544573016213 0ustar mingomingo To fix the following compile error by changing names from __{read,write}_trylock to ___raw_{read,write}_trylock in asm-powerpc/spinlock.h - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - include/asm-powerpc/spinlock.h include/linux/spinlock_api_smp.h:49: error: conflicting types for '__read_trylock' include/asm/spinlock.h:183: error: previous definition of '__read_trylock' was here include/linux/spinlock_api_smp.h:50: error: conflicting types for '__write_trylock' include/asm/spinlock.h:207: error: previous definition of '__write_trylock' was here - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Signed-off-by: Tsutomu Owa -- owa --- include/asm-powerpc/spinlock.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) Index: linux-rt-rebase.q/include/asm-powerpc/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/spinlock.h +++ linux-rt-rebase.q/include/asm-powerpc/spinlock.h @@ -179,7 +179,7 @@ extern void __raw_spin_unlock_wait(__raw * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static long __inline__ __read_trylock(__raw_rwlock_t *rw) +static long __inline__ ___raw_read_trylock(__raw_rwlock_t *rw) { long tmp; @@ -203,7 +203,7 @@ static long __inline__ __read_trylock(__ * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static __inline__ long __write_trylock(__raw_rwlock_t *rw) +static __inline__ long ___raw_write_trylock(__raw_rwlock_t *rw) { long tmp, token; @@ -226,7 +226,7 @@ static __inline__ long __write_trylock(_ static void __inline__ __raw_read_lock(__raw_rwlock_t *rw) { while (1) { - if (likely(__read_trylock(rw) > 0)) + if (likely(___raw_read_trylock(rw) > 0)) break; do { HMT_low(); @@ -240,7 +240,7 @@ static void __inline__ __raw_read_lock(_ static void __inline__ __raw_write_lock(__raw_rwlock_t *rw) { while (1) { - if (likely(__write_trylock(rw) == 0)) + if (likely(___raw_write_trylock(rw) == 0)) break; do { HMT_low(); @@ -253,12 +253,12 @@ static void __inline__ __raw_write_lock( static int __inline__ __raw_read_trylock(__raw_rwlock_t *rw) { - return __read_trylock(rw) > 0; + return ___raw_read_trylock(rw) > 0; } static int __inline__ __raw_write_trylock(__raw_rwlock_t *rw) { - return __write_trylock(rw) == 0; + return ___raw_write_trylock(rw) == 0; } static void __inline__ __raw_read_unlock(__raw_rwlock_t *rw) patches/preempt-realtime-arm-shark.patch0000664000077200007720000000103510655544574017717 0ustar mingomingo--- arch/arm/mach-shark/leds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/arm/mach-shark/leds.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mach-shark/leds.c +++ linux-rt-rebase.q/arch/arm/mach-shark/leds.c @@ -32,7 +32,7 @@ static char led_state; static short hw_led_state; static short saved_state; -static DEFINE_SPINLOCK(leds_lock); +static DEFINE_RAW_SPINLOCK(leds_lock); short sequoia_read(int addr) { outw(addr,0x24); patches/inet-hash-bits-ipv6-fix.patch0000664000077200007720000001655210655544572017055 0ustar mingomingoFrom linux-rt-users-owner@vger.kernel.org Fri Jun 22 23:36:42 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=none autolearn=unavailable version=3.1.7-deb Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by mail.tglx.de (Postfix) with ESMTP id E246465C292; Fri, 22 Jun 2007 23:36:42 +0200 (CEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753653AbXFVVgk (ORCPT + 1 other); Fri, 22 Jun 2007 17:36:40 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1753545AbXFVVgk (ORCPT ); Fri, 22 Jun 2007 17:36:40 -0400 Received: from homer.mvista.com ([63.81.120.158]:10787 "EHLO gateway-1237.mvista.com" rhost-flags-OK-FAIL-OK-OK) by vger.kernel.org with ESMTP id S1753234AbXFVVgk (ORCPT ); Fri, 22 Jun 2007 17:36:40 -0400 Received: from [192.168.0.30] (unknown [10.0.10.125]) by hermes.mvista.com (Postfix) with ESMTP id EDA2D1DBDB; Fri, 22 Jun 2007 14:36:38 -0700 (PDT) Message-ID: <467C40D3.206@ncos.nec.co.jp> Date: Fri, 22 Jun 2007 14:36:19 -0700 From: Masayuki Nakagawa User-Agent: Thunderbird 1.5.0.12 (Windows/20070509) MIME-Version: 1.0 To: mingo@elte.hu Cc: linux-rt-users@vger.kernel.org, dwalker@mvista.com Subject: [PATCH 2.6.21.5-rt17] IPV6: estalished connections are not shown with "cat /proc/net/tcp6" Content-Type: text/plain; charset=UTF-8 Sender: linux-rt-users-owner@vger.kernel.org Precedence: bulk X-Mailing-List: linux-rt-users@vger.kernel.org X-Filter-To: .Kernel.rt-users X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit I found an issue regarding networking in the real-time patch (patch-2.6.21.5-rt17). The issue happens only with the kernel, which the real-time patch was applied. However, the latest stable main kernel (2.6.21.5) doesn't have the same issue. Therefore, please don't transfer this report to netdev. The detail of issue is below. I ran my test program, which is a very simple IPv6 client-server program. These programs establish a TCP/IPv6 connection between two hosts, and then sleep, like following diagram. And then, the problem appears with "cat /proc/net/tcp6". serverA serverB | SYN | +--------------->+ | SYN/ACK | +<---------------+ | ACK | +--------------->+ | | sleep... sleep... | | When I "cat /proc/net/tcp6" on serverA while establishing connection between serverA and B, the established connections are not shown. If you need my test program, please let me know. I can provide it to you. However, in case of the main-line kernel, the established connections will be shown appropriately with "cat /proc/net/tcp6". It's different because the real-time patch has implemented a new socket lookup mechanism for a high-latency. So, real-time patch has a different mechanism from main-line kernel. The real-time patch, which implemented a new socket lookup mechanism is using bitmap(ebitmask). When establishing TCP connection, it sets a flag bit into the bitmap like followings. [ebitmask in struct inet_hashinfo] Before connecting 0000000000000000000000000000000000000000000000000000000000000000 After connecting 0000001000000000000000000000000000000000000000000000000000000000 ^ And when reading "/proc/net/tcp and tcp6", the kernel searches the currently active TCP connections with reference to the bitmap. However, the kernel can't search the active TCP/IPv6 connection in established state. It is because the kernel doesn't set a flag bit when establishing TCP/IPv6 connection. In case of TCP/IPv4, __inet_hash() sets the flag bit properly with __inet_hash_setbit(). But, in case of TCP/IPv6, the setting the flag bit is missing in __inet6_hash(). [include/net/inet_hashtables.h] static inline void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk, const int listen_possible) { struct hlist_head *list; rwlock_t *lock; unsigned long *bitmask = NULL; unsigned int index = 0; BUG_TRAP(sk_unhashed(sk)); if (listen_possible && sk->sk_state == TCP_LISTEN) { list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; lock = &hashinfo->lhash_lock; inet_listen_wlock(hashinfo); } else { struct inet_ehash_bucket *head; sk->sk_hash = inet_sk_ehashfn(sk); index = inet_ehash_index(hashinfo, sk->sk_hash); head = inet_ehash_bucket(hashinfo, sk->sk_hash); list = &head->chain; lock = &head->lock; bitmask = hashinfo->ebitmask; write_lock(lock); } __sk_add_node(sk, list); __inet_hash_setbit(bitmask, index); sock_prot_inc_use(sk->sk_prot); write_unlock(lock); if (listen_possible && sk->sk_state == TCP_LISTEN) wake_up(&hashinfo->lhash_wait); } [net/ipv6/inet6_hashtables.c] void __inet6_hash(struct inet_hashinfo *hashinfo, struct sock *sk) { struct hlist_head *list; rwlock_t *lock; printk("__inet6_hash hit\n"); BUG_TRAP(sk_unhashed(sk)); if (sk->sk_state == TCP_LISTEN) { list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; lock = &hashinfo->lhash_lock; inet_listen_wlock(hashinfo); } else { unsigned int hash; sk->sk_hash = hash = inet6_sk_ehashfn(sk); hash &= (hashinfo->ehash_size - 1); list = &hashinfo->ehash[hash].chain; lock = &hashinfo->ehash[hash].lock; write_lock(lock); } __sk_add_node(sk, list); sock_prot_inc_use(sk->sk_prot); write_unlock(lock); } So, I suggest a following change. The change is to set the flag bit appropriately in __inet6_hash(). Signed-off-by: Masayuki Nakagawa --- net/ipv6/inet6_hashtables.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) Index: linux/net/ipv6/inet6_hashtables.c =================================================================== --- linux.orig/net/ipv6/inet6_hashtables.c +++ linux/net/ipv6/inet6_hashtables.c @@ -27,6 +27,8 @@ void __inet6_hash(struct inet_hashinfo * { struct hlist_head *list; rwlock_t *lock; + unsigned long *bitmask = NULL; + unsigned int index = 0; BUG_TRAP(sk_unhashed(sk)); @@ -35,15 +37,16 @@ void __inet6_hash(struct inet_hashinfo * lock = &hashinfo->lhash_lock; inet_listen_wlock(hashinfo); } else { - unsigned int hash; - sk->sk_hash = hash = inet6_sk_ehashfn(sk); - hash &= (hashinfo->ehash_size - 1); - list = &hashinfo->ehash[hash].chain; - lock = &hashinfo->ehash[hash].lock; + sk->sk_hash = inet6_sk_ehashfn(sk); + index = inet_ehash_index(hashinfo, sk->sk_hash); + list = &hashinfo->ehash[index].chain; + lock = &hashinfo->ehash[index].lock; + bitmask = hashinfo->ebitmask; write_lock(lock); } __sk_add_node(sk, list); + __inet_hash_setbit(bitmask, index); sock_prot_inc_use(sk->sk_prot); write_unlock(lock); } patches/kvm-rt.patch0000664000077200007720000001014710655544576014006 0ustar mingomingoSubject: [patch] kvm: make vcpu_load/put preemptible From: Ingo Molnar make vcpu_load/put preemptible. Signed-off-by: Ingo Molnar --- drivers/kvm/kvm_main.c | 8 ++++---- drivers/kvm/svm.c | 13 ++++++++++--- drivers/kvm/vmx.c | 15 ++++++++++++--- 3 files changed, 26 insertions(+), 10 deletions(-) Index: linux-rt-rebase.q/drivers/kvm/kvm_main.c =================================================================== --- linux-rt-rebase.q.orig/drivers/kvm/kvm_main.c +++ linux-rt-rebase.q/drivers/kvm/kvm_main.c @@ -2902,7 +2902,7 @@ static int kvm_reboot(struct notifier_bl * in vmx root mode. */ printk(KERN_INFO "kvm: exiting hardware virtualization\n"); - on_each_cpu(hardware_disable, NULL, 0, 1); + schedule_on_each_cpu(hardware_disable, NULL, 0, 1); } return NOTIFY_OK; } @@ -3125,7 +3125,7 @@ int kvm_init_arch(struct kvm_arch_ops *o if (r < 0) goto out; - on_each_cpu(hardware_enable, NULL, 0, 1); + schedule_on_each_cpu(hardware_enable, NULL, 0, 1); r = register_cpu_notifier(&kvm_cpu_notifier); if (r) goto out_free_1; @@ -3157,7 +3157,7 @@ out_free_2: unregister_reboot_notifier(&kvm_reboot_notifier); unregister_cpu_notifier(&kvm_cpu_notifier); out_free_1: - on_each_cpu(hardware_disable, NULL, 0, 1); + schedule_on_each_cpu(hardware_disable, NULL, 0, 1); kvm_arch_ops->hardware_unsetup(); out: kvm_arch_ops = NULL; @@ -3171,7 +3171,7 @@ void kvm_exit_arch(void) sysdev_class_unregister(&kvm_sysdev_class); unregister_reboot_notifier(&kvm_reboot_notifier); unregister_cpu_notifier(&kvm_cpu_notifier); - on_each_cpu(hardware_disable, NULL, 0, 1); + schedule_on_each_cpu(hardware_disable, NULL, 0, 1); kvm_arch_ops->hardware_unsetup(); kvm_arch_ops = NULL; } Index: linux-rt-rebase.q/drivers/kvm/svm.c =================================================================== --- linux-rt-rebase.q.orig/drivers/kvm/svm.c +++ linux-rt-rebase.q/drivers/kvm/svm.c @@ -613,9 +613,17 @@ static void svm_free_vcpu(struct kvm_vcp static void svm_vcpu_load(struct kvm_vcpu *vcpu) { - int cpu, i; + int cpu = raw_smp_processor_id(), i; + cpumask_t this_mask = cpumask_of_cpu(cpu); + + /* + * Keep the context preemptible, but do not migrate + * away to another CPU. TODO: make sure this persists. + * Save/restore original mask. + */ + if (unlikely(!cpus_equal(current->cpus_allowed, this_mask))) + set_cpus_allowed(current, cpumask_of_cpu(cpu)); - cpu = get_cpu(); if (unlikely(cpu != vcpu->cpu)) { u64 tsc_this, delta; @@ -641,7 +649,6 @@ static void svm_vcpu_put(struct kvm_vcpu wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); rdtscll(vcpu->host_tsc); - put_cpu(); } static void svm_vcpu_decache(struct kvm_vcpu *vcpu) Index: linux-rt-rebase.q/drivers/kvm/vmx.c =================================================================== --- linux-rt-rebase.q.orig/drivers/kvm/vmx.c +++ linux-rt-rebase.q/drivers/kvm/vmx.c @@ -366,6 +366,8 @@ static void vmx_load_host_state(struct k reload_tss(); } + preempt_enable(); + save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); load_msrs(vcpu->host_msrs, vcpu->save_nmsrs); if (msr_efer_need_save_restore(vcpu)) @@ -379,10 +381,17 @@ static void vmx_load_host_state(struct k static void vmx_vcpu_load(struct kvm_vcpu *vcpu) { u64 phys_addr = __pa(vcpu->vmcs); - int cpu; u64 tsc_this, delta; + int cpu = raw_smp_processor_id(); + cpumask_t this_mask = cpumask_of_cpu(cpu); - cpu = get_cpu(); + /* + * Keep the context preemptible, but do not migrate + * away to another CPU. TODO: make sure this persists. + * Save/restore original mask. + */ + if (unlikely(!cpus_equal(current->cpus_allowed, this_mask))) + set_cpus_allowed(current, cpumask_of_cpu(cpu)); if (vcpu->cpu != cpu) vcpu_clear(vcpu); @@ -428,7 +437,6 @@ static void vmx_vcpu_put(struct kvm_vcpu { vmx_load_host_state(vcpu); kvm_put_guest_fpu(vcpu); - put_cpu(); } static void vmx_fpu_activate(struct kvm_vcpu *vcpu) @@ -2022,6 +2030,7 @@ again: if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests)) vmx_flush_tlb(vcpu); + preempt_disable(); asm ( /* Store host registers */ #ifdef CONFIG_X86_64 patches/git-acpi.patch0000664000077200007720000033363710655544570014271 0ustar mingomingoGIT 00a62456d1b3d6ee7cde8f7b499731a6386be4a0 git+ssh://master.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git#test commit 10b3dcae0f275e2546e55303d64ddbb58cec7599 Author: Shaohua Li Date: Fri Jul 20 10:03:25 2007 +0800 ACPI: ignore _PSx method for hotplugable PCI devices If the ACPI device has _EJ0, ignore the device. _PSx will set power for the slot, and the hotplug driver will take care of _PSx. Signed-off-by: Shaohua Li Signed-off-by: Len Brown commit ab826ca4cf2fe8ebcfd21189ca8bfeb47ca88359 Author: Shaohua Li Date: Fri Jul 20 10:03:22 2007 +0800 ACPI: Use ACPI methods to select PCI device suspend state applied after Rafel's 'PM: Update global suspend and hibernation operations framework' patch set Signed-off-by: Shaohua Li Signed-off-by: Len Brown commit fc30e68e88baf463683bde43347756889ba2ffae Author: Shaohua Li Date: Fri Jul 20 10:03:20 2007 +0800 ACPI, PNP: hook ACPI D-state to PNP suspend/resume applied after Rafel's 'PM: Update global suspend and hibernation operations framework' patch set Signed-off-by: Shaohua Li Signed-off-by: Len Brown commit fd4aff1a28eecbd729b409bf7d3eff5948f20414 Author: Shaohua Li Date: Tue Jul 17 22:40:25 2007 +0200 ACPI: Add acpi_pm_device_sleep_state helper routine Based on the David Brownell's patch at http://marc.info/?l=linux-acpi&m=117873972806360&w=2 updated by: Rafael J. Wysocki Add a helper routine returning the lowest power (highest number) ACPI device power state that given device can be in while the system is in the sleep state indicated by acpi_target_sleep_state . Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Len Brown commit e9b3aba887f47f9cd64de20fec9c333a932b70dc Author: Rafael J. Wysocki Date: Tue Jul 17 22:40:06 2007 +0200 ACPI: Implement the set_target() callback from pm_ops In the future some drivers may need to use ACPI to determine the low power states in which to place their devices, but to provide the drivers with this information the ACPI core needs to know what sleep state the system is going to enter. Namely, the device's state should not be too high power for given system sleep state and, if the device is supposed to be able to wake up the system, its state should not be too low power for the wake up to be possible). For this purpose, the ACPI core needs to implement the set_target() method in 'struct pm_ops' and store the target system sleep state passed by the PM core in a variable. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Acked-by: David Brownell Signed-off-by: Len Brown commit 6eec70ce9b0cbc33bdda4bac10e8a9c9f9a478c8 Author: Adrian Bunk Date: Mon Jul 9 11:33:13 2007 -0700 cpuidle: static make cpuidle_replace_governor() static Signed-off-by: Adrian Bunk Cc: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 366d6877e5fe0e59b900cffdad078950fef851dc Author: Adrian Bunk Date: Tue Jul 3 00:54:40 2007 -0400 cpuidle: static This patch makes the needlessly global struct menu_governor static. Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 551340add4c11c81eb62f4892dceeaa86f0dba05 Author: Andrew Morton Date: Tue Jul 3 00:49:14 2007 -0400 export symbol tick_nohz_get_sleep_length ERROR: "tick_nohz_get_sleep_length" [drivers/cpuidle/governors/menu.ko] undefined! ERROR: "tick_nohz_get_idle_jiffies" [drivers/cpuidle/governors/menu.ko] undefined! And please be sure to get your changes to core kernel suitably reviewed. Cc: Adam Belay Cc: Venki Pallipadi Cc: Ingo Molnar Cc: Thomas Gleixner Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 6cb17f09ef27a4be59c910028461e3c5be222cf9 Author: Andrew Morton Date: Tue Jul 3 00:43:04 2007 -0400 tick.h needs hrtimer.h It uses hrtimers. Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 1c539d0e67698d15b1a5ffba526bf2761d305b93 Author: Venki Pallipadi Date: Tue Jul 3 00:40:34 2007 -0400 cpuidle: first round of documentation updates Documentation changes based on Pavel's feedback. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 899e617dd8a30d6eb6fad12ad4fc0ec23f47b42f Author: Venki Pallipadi Date: Tue Jul 3 00:39:25 2007 -0400 cpuidle: add rating to the governors and pick the one with highest rating by default Introduce a governor rating scheme to pick the right governor by default. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit d29daa0882be3a2cee417ed06cad4aea1acac917 Author: Venki Pallipadi Date: Tue Jul 3 00:38:08 2007 -0400 cpuidle: make cpuidle sysfs driver governor switch off by default Make default cpuidle sysfs to show current_governor and current_driver in read-only mode. More elaborate available_governors and available_drivers with writeable current_governor and current_driver interface only appear with "cpuidle_sysfs_switch" boot parameter. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit d07e801a752249b9a1cb5832f0e503cf566ea79d Author: Venki Pallipadi Date: Tue Jul 3 00:37:00 2007 -0400 cpuidle: menu governor: change the early break condition Change the C-state early break out algorithm in menu governor. We only look at early breakouts that result in wakeups shorter than idle state's target_residency. If such a breakout is frequent enough, eliminate the particular idle state upto a timeout period. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 6a16ce047e1c2bce21b985133b0064fc7fab3bc8 Author: Venki Pallipadi Date: Tue Jul 3 00:35:38 2007 -0400 cpuidle: fix uninitialized variable in sysfs routine Fix the uninitialized usage of ret. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 1b5debd79296497d49961d1adaf0e5063f19207f Author: Venki Pallipadi Date: Tue Jul 3 00:34:16 2007 -0400 cpuidle: reenable /proc/acpi//power interface for the time being Keep /proc/acpi/processor/CPU*/power around for a while as powertop depends on it. It will be marked deprecated and removed in future. powertop can use cpuidle interfaces instead. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 743fe7f75ab4897ba69b33dbd3c12d98143fcd60 Author: Venki Pallipadi Date: Tue Jul 3 00:32:37 2007 -0400 cpuidle: menu governor and hrtimer compile fix Compile fix for menu governor. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 0157896199e2b0e65b5be1d423aded91634f2861 Author: Len Brown Date: Thu May 31 22:51:43 2007 -0400 cpuidle: build fix - cpuidle vs ipw2100 module ERROR: "acpi_set_cstate_limit" [drivers/net/wireless/ipw2100.ko] undefined! Signed-off-by: Len Brown commit 40bf36fed3f990cad3c54175528a975f265659ce Author: Adam Belay Date: Sat Mar 24 03:47:07 2007 -0400 cpuidle: add the 'menu' governor Here is my first take at implementing an idle PM governor that takes full advantage of NO_HZ. I call it the 'menu' governor because it considers the full list of idle states before each entry. I've kept the implementation fairly simple. It attempts to guess the next residency time and then chooses a state that would meet at least the break-even point between power savings and entry cost. To this end, it selects the deepest idle state that satisfies the following constraints: 1. If the idle time elapsed since bus master activity was detected is below a threshold (currently 20 ms), then limit the selection to C2-type or above. 2. Do not choose a state with a break-even residency that exceeds the expected time remaining until the next timer interrupt. 3. Do not choose a state with a break-even residency that exceeds the elapsed time between the last pair of break events, excluding timer interrupts. This governor has an advantage over "ladder" governor because it proactively checks how much time remains until the next timer interrupt using the tick infrastructure. Also, it handles device interrupt activity more intelligently by not including timer interrupts in break event calculations. Finally, it doesn't make policy decisions using the number of state entries, which can have variable residency times (NO_HZ makes these potentially very large), and instead only considers sleep time deltas. The menu governor can be selected during runtime using the cpuidle sysfs interface like so: "echo "menu" > /sys/devices/system/cpu/cpuidle/current_governor" Signed-off-by: Adam Belay Signed-off-by: Len Brown commit 85b2afe395e09ffb6a0bdb88d8e0fe80f039ef30 Author: Adam Belay Date: Sat Mar 24 03:47:03 2007 -0400 cpuidle: export time until next timer interrupt using NO_HZ Expose information about the time remaining until the next timer interrupt expires by utilizing the dynticks infrastructure. Also modify the main idle loop to allow dynticks to handle non-interrupt break events (e.g. DMA). Finally, expose sleep ticks information to external code. Thomas Gleixner is responsible for much of the code in this patch. However, I've made some additional changes, so I'm probably responsible if there are any bugs or oversights :) Signed-off-by: Adam Belay Signed-off-by: Len Brown commit ddb5ec5c0a7ef163c1efe2468bcc005a57187dfa Author: Adam Belay Date: Sat Mar 24 03:46:58 2007 -0400 cpuidle: governor API changes This patch prepares cpuidle for the menu governor. It adds an optional stage after idle state entry to give the governor an opportunity to check why the state was exited. Also it makes sure the idle loop returns after each state entry, allowing the appropriate dynticks code to run. Signed-off-by: Adam Belay Signed-off-by: Len Brown commit e1d4b76cbe138f7c93a051010362d0f9add57566 Author: Venki Pallipadi Date: Thu Apr 26 00:03:59 2007 -0700 cpuidle: hang fix Prevent hang on x86-64, when ACPI processor driver is added as a module on a system that does not support C-states. x86-64 expects all idle handlers to enable interrupts before returning from idle handler. This is due to enter_idle(), exit_idle() races. Make cpuidle_idle_call() confirm to this when there is no pm_idle_old. Also, cpuidle look at the return values of attch_driver() and set current_driver to NULL if attach fails on all CPUs. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit ff0e02b9a13936484eea6a713e99df0204d99732 Author: Shaohua Li Date: Thu Apr 26 10:40:09 2007 +0800 cpuidle: add support for max_cstate limit With CPUIDLE framework, the max_cstate (to limit max cpu c-state) parameter is ingored. Some systems require it to ignore C2/C3 and some drivers like ipw require it too. Signed-off-by: Shaohua Li Signed-off-by: Len Brown commit 4d91bed5b98905d0399466e6500385c7c9aa4675 Author: Shaohua Li Date: Thu Apr 26 10:40:13 2007 +0800 cpuidle: add cpuidle_fore_redetect_devices API add cpuidle_force_redetect_devices API, which forces all CPU redetect idle states. Next patch will use it. Signed-off-by: Shaohua Li Signed-off-by: Len Brown commit 55705c005660bf8c5e83359f3d1ef009702af0da Author: Shaohua Li Date: Thu Apr 26 10:40:01 2007 +0800 cpuidle: fix sysfs related issue Fix the cpuidle sysfs issue. a. make kobject dynamicaly allocated b. fixed sysfs init issue to avoid suspend/resume issue Signed-off-by: Shaohua Li Signed-off-by: Len Brown commit 4d8f36e7d95a98fe7099a76e1fb766a77d26efff Author: Randy Dunlap Date: Wed Mar 28 22:52:53 2007 -0400 cpuidle: 1-bit field must be unsigned A 1-bit bitfield has no room for a sign bit. drivers/cpuidle/governors/ladder.c:54:16: error: dubious bitfield without explicit `signed' or `unsigned' Signed-off-by: Randy Dunlap Cc: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 6e122e5e891d5e66fe3df37474e00b6ab241b80c Author: Venkatesh Pallipadi Date: Wed Mar 28 22:52:41 2007 -0400 cpuidle: fix boot hang Patch for cpuidle boot hang reported by Larry Finger here. http://www.ussg.iu.edu/hypermail/linux/kernel/0703.2/2025.html Signed-off-by: Venkatesh Pallipadi Cc: Larry Finger Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 8eecab24ab74a9b565888f62ab172b5993b85784 Author: Len Brown Date: Wed Mar 7 04:37:53 2007 -0500 cpuidle: ladder does not depend on ACPI build fix for CONFIG_ACPI=n In file included from drivers/cpuidle/governors/ladder.c:21: include/acpi/processor.h:88: error: expected specifier-qualifier-list before ‘acpi_integer’ include/acpi/processor.h:106: error: expected specifier-qualifier-list before ‘acpi_integer’ include/acpi/processor.h:168: error: expected specifier-qualifier-list before ‘acpi_handle’ Signed-off-by: Len Brown commit ed6a8fc4d67a601706e72442a0568852e20aa7e7 Author: Adrian Bunk Date: Tue Mar 6 02:29:40 2007 -0800 cpuidle: make code static This patch makes the following needlessly global code static: - driver.c: __cpuidle_find_driver() - governor.c: __cpuidle_find_governor() - ladder.c: struct ladder_governor Signed-off-by: Adrian Bunk Cc: Venkatesh Pallipadi Cc: Adam Belay Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 0b7b292b9e893f21f275ab40457e0a6a9fb3302a Author: Venkatesh Pallipadi Date: Wed Mar 7 02:38:22 2007 -0500 cpu_idle: fix build break This patch fixes a build breakage with !CONFIG_HOTPLUG_CPU and CONFIG_CPU_IDLE. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit 5520c1cdb3154acb2c1250c395bd7d6f3c6f01b5 Author: Venkatesh Pallipadi Date: Tue Mar 6 02:29:39 2007 -0800 cpuidle: build fix for !CPU_IDLE Fix the compile issues when CPU_IDLE is not configured. Signed-off-by: Venkatesh Pallipadi Cc: Adam Belay Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Len Brown commit bd5951fdfdef243135ea275c27e6ff5ba20a3d7d Author: Venkatesh Pallipadi Date: Thu Feb 22 13:54:57 2007 -0800 cpuidle take2: Basic documentation for cpuidle Documentation for cpuidle infrastructure Signed-off-by: Venkatesh Pallipadi Signed-off-by: Adam Belay Signed-off-by: Shaohua Li Signed-off-by: Len Brown commit 33741f25522cf4e660e263106df951b9cbe02df1 Author: Venkatesh Pallipadi Date: Thu Feb 22 13:54:03 2007 -0800 cpuidle take2: Hookup ACPI C-states driver with cpuidle Hookup ACPI C-states onto generic cpuidle infrastructure. drivers/acpi/procesor_idle.c is now a ACPI C-states driver that registers as a driver in cpuidle infrastructure and the policy part is removed from drivers/acpi/processor_idle.c. We use governor in cpuidle instead. Signed-off-by: Shaohua Li Signed-off-by: Venkatesh Pallipadi Signed-off-by: Adam Belay Signed-off-by: Len Brown commit b89790e9968a77c6cdc9fa08c5260d73face5487 Author: Venkatesh Pallipadi Date: Thu Feb 22 13:52:57 2007 -0800 cpuidle take2: Core cpuidle infrastructure Announcing 'cpuidle', a new CPU power management infrastructure to manage idle CPUs in a clean and efficient manner. cpuidle separates out the drivers that can provide support for multiple types of idle states and policy governors that decide on what idle state to use at run time. A cpuidle driver can support multiple idle states based on parameters like varying power consumption, wakeup latency, etc (ACPI C-states for example). A cpuidle governor can be usage model specific (laptop, server, laptop on battery etc). Main advantage of the infrastructure being, it allows independent development of drivers and governors and allows for better CPU power management. A huge thanks to Adam Belay and Shaohua Li who were part of this mini-project since its beginning and are greatly responsible for this patchset. This patch: Core cpuidle infrastructure. Introduces a new abstraction layer for cpuidle: * which manages drivers that can support multiple idles states. Drivers can be generic or particular to specific hardware/platform * allows pluging in multiple policy governors that can take idle state policy decision * The core also has a set of sysfs interfaces with which administrato can know about supported drivers and governors and switch them at run time. Signed-off-by: Adam Belay Signed-off-by: Shaohua Li Signed-off-by: Venkatesh Pallipadi Signed-off-by: Len Brown Documentation/cpuidle/core.txt | 17 Documentation/cpuidle/driver.txt | 29 + Documentation/cpuidle/governor.txt | 28 + Documentation/cpuidle/sysfs.txt | 35 + arch/i386/Kconfig | 2 arch/i386/kernel/process.c | 2 arch/x86_64/Kconfig | 2 drivers/Makefile | 1 drivers/acpi/osl.c | 11 drivers/acpi/processor_core.c | 5 drivers/acpi/processor_idle.c | 913 ++++++++++++++----------------------- drivers/cpuidle/Kconfig | 39 + drivers/cpuidle/Makefile | 5 drivers/cpuidle/cpuidle.c | 306 ++++++++++++ drivers/cpuidle/cpuidle.h | 50 ++ drivers/cpuidle/driver.c | 276 +++++++++++ drivers/cpuidle/governor.c | 187 +++++++ drivers/cpuidle/governors/Makefile | 6 drivers/cpuidle/governors/ladder.c | 228 +++++++++ drivers/cpuidle/governors/menu.c | 181 +++++++ drivers/cpuidle/sysfs.c | 393 +++++++++++++++ include/acpi/acpi_bus.h | 2 include/acpi/processor.h | 3 include/linux/acpi.h | 7 include/linux/cpuidle.h | 190 +++++++ include/linux/tick.h | 11 kernel/softirq.c | 5 kernel/time/tick-sched.c | 27 + 28 files changed, 2402 insertions(+), 559 deletions(-) Index: linux/Documentation/cpuidle/core.txt =================================================================== --- /dev/null +++ linux/Documentation/cpuidle/core.txt @@ -0,0 +1,17 @@ + + Supporting multiple CPU idle levels in kernel + + cpuidle + +General Information: + +Various CPUs today support multiple idle levels that are differentiated +by varying exit latencies and power consumption during idle. +cpuidle is a generic in-kernel infrastructure that separates +idle policy (governor) from idle mechanism (driver) and provides a +standardized infrastructure to support independent development of +governors and drivers. + +cpuidle resides under drivers/cpuidle. + + Index: linux/Documentation/cpuidle/driver.txt =================================================================== --- /dev/null +++ linux/Documentation/cpuidle/driver.txt @@ -0,0 +1,29 @@ + + + Supporting multiple CPU idle levels in kernel + + cpuidle drivers + + + + +cpuidle driver hooks into the cpuidle infrastructure and does the +architecture/platform dependent part of CPU idle states. Driver +provides the platform idle state detection capability and also +has mechanisms in place to support actusl entry-exit into a CPU idle state. + +cpuidle driver supports capability detection for a platform using the +init and exit routines. They will be called for each online CPU, with a +percpu cpuidle_driver object and driver should fill in cpuidle_states +inside cpuidle_driver depending on the CPU capability. + +Driver can handle dynamic state changes (like battery<->AC), by calling +force_redetect interface. + +It is possible to have more than one driver registered at the same time and +user can switch between drivers using /sysfs interface (when enabled). + +Interfaces: +int cpuidle_register_driver(struct cpuidle_driver *drv); +void cpuidle_unregister_driver(struct cpuidle_driver *drv); +int cpuidle_force_redetect(struct cpuidle_device *dev); Index: linux/Documentation/cpuidle/governor.txt =================================================================== --- /dev/null +++ linux/Documentation/cpuidle/governor.txt @@ -0,0 +1,28 @@ + + + + Supporting multiple CPU idle levels in kernel + + cpuidle governors + + + + +cpuidle governor is policy routine that decides what idle state to enter at +any given time. cpuidle core uses different callbacks to governor while +handling idle entry. +* select_state() callback where governor can determine next idle state to enter +* prepare_idle() callback is called before entering an idle state +* scan() callback is called after a driver forces redetection of the states + +More than one governor can be registered at the same time and +user can switch between drivers using /sysfs interface (when supported). + +More than one governor part is supported for developers to easily experiment +with different governors. By default, most optimal governor based on your +kernel configuration and platform will be selected by cpuidle. + +Interfaces: +int cpuidle_register_governor(struct cpuidle_governor *gov); +void cpuidle_unregister_governor(struct cpuidle_governor *gov); + Index: linux/Documentation/cpuidle/sysfs.txt =================================================================== --- /dev/null +++ linux/Documentation/cpuidle/sysfs.txt @@ -0,0 +1,35 @@ + + + Supporting multiple CPU idle levels in kernel + + cpuidle sysfs + +System global cpuidle related information and tunables are under +/sys/devices/system/cpu/cpuidle + +The current interfaces in this directory has self-explanatory names: +* current_driver_ro +* current_governor_ro + +With cpuidle_sysfs_switch boot option (meant for developer testing) +following objects are visible instead. +* available_drivers +* available_governors +* current_driver +* current_governor +In this case user can switch the driver, governor at run time by writing +onto current_driver and current_governor. + + +Per logical CPU specific cpuidle information are under +/sys/devices/system/cpu/cpuX/cpuidle +for each online cpu X + +Under this percpu directory, there is a directory for each idle state supported +by the driver, which in turn has +* latency : Latency to exit out of this idle state (in microseconds) +* power : Power consumed while in this idle state (in milliwatts) +* time : Total time spent in this idle state (in microseconds) +* usage : Number of times this state was entered (count) + + Index: linux/arch/i386/Kconfig =================================================================== --- linux.orig/arch/i386/Kconfig +++ linux/arch/i386/Kconfig @@ -1065,6 +1065,8 @@ endif # APM source "arch/i386/kernel/cpu/cpufreq/Kconfig" +source "drivers/cpuidle/Kconfig" + endmenu menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" Index: linux/arch/i386/kernel/process.c =================================================================== --- linux.orig/arch/i386/kernel/process.c +++ linux/arch/i386/kernel/process.c @@ -179,7 +179,6 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_stop_sched_tick(); while (!need_resched()) { void (*idle)(void); @@ -187,6 +186,7 @@ void cpu_idle(void) __get_cpu_var(cpu_idle_state) = 0; check_pgt_cache(); + tick_nohz_stop_sched_tick(); rmb(); idle = pm_idle; Index: linux/arch/x86_64/Kconfig =================================================================== --- linux.orig/arch/x86_64/Kconfig +++ linux/arch/x86_64/Kconfig @@ -714,6 +714,8 @@ source "drivers/acpi/Kconfig" source "arch/x86_64/kernel/cpufreq/Kconfig" +source "drivers/cpuidle/Kconfig" + endmenu menu "Bus options (PCI etc.)" Index: linux/drivers/Makefile =================================================================== --- linux.orig/drivers/Makefile +++ linux/drivers/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_MCA) += mca/ obj-$(CONFIG_EISA) += eisa/ obj-$(CONFIG_LGUEST_GUEST) += lguest/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ +obj-$(CONFIG_CPU_IDLE) += cpuidle/ obj-$(CONFIG_MMC) += mmc/ obj-$(CONFIG_NEW_LEDS) += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ Index: linux/drivers/acpi/osl.c =================================================================== --- linux.orig/drivers/acpi/osl.c +++ linux/drivers/acpi/osl.c @@ -1050,6 +1050,17 @@ unsigned int max_cstate = ACPI_PROCESSOR EXPORT_SYMBOL(max_cstate); +void (*acpi_do_set_cstate_limit)(void); +EXPORT_SYMBOL(acpi_do_set_cstate_limit); + +void acpi_set_cstate_limit(unsigned int new_limit) +{ + max_cstate = new_limit; + if (acpi_do_set_cstate_limit) + acpi_do_set_cstate_limit(); +} +EXPORT_SYMBOL(acpi_set_cstate_limit); + /* * Acquire a spinlock. * Index: linux/drivers/acpi/processor_core.c =================================================================== --- linux.orig/drivers/acpi/processor_core.c +++ linux/drivers/acpi/processor_core.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -1031,11 +1032,15 @@ static int __init acpi_processor_init(vo acpi_processor_ppc_init(); + cpuidle_register_driver(&acpi_idle_driver); + acpi_do_set_cstate_limit = acpi_max_cstate_changed; return 0; } static void __exit acpi_processor_exit(void) { + acpi_do_set_cstate_limit = NULL; + cpuidle_unregister_driver(&acpi_idle_driver); acpi_processor_ppc_exit(); Index: linux/drivers/acpi/processor_idle.c =================================================================== --- linux.orig/drivers/acpi/processor_idle.c +++ linux/drivers/acpi/processor_idle.c @@ -40,6 +40,7 @@ #include /* need_resched() */ #include #include +#include /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -62,25 +63,34 @@ #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_idle"); #define ACPI_PROCESSOR_FILE_POWER "power" -#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) -#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -static void (*pm_idle_save) (void) __read_mostly; -module_param(max_cstate, uint, 0644); +#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) +#define C2_OVERHEAD 1 /* 1us */ +#define C3_OVERHEAD 1 /* 1us */ + +void acpi_max_cstate_changed(void) +{ + /* Driver will reset devices' max cstate limit */ + cpuidle_force_redetect_devices(&acpi_idle_driver); +} + +static int change_max_cstate(const char *val, struct kernel_param *kp) +{ + int max; + + max = simple_strtol(val, NULL, 0); + if (!max) + return -EINVAL; + max_cstate = max; + if (acpi_do_set_cstate_limit) + acpi_do_set_cstate_limit(); + return 0; +} + +module_param_call(max_cstate, change_max_cstate, param_get_uint, &max_cstate, 0644); static unsigned int nocst __read_mostly; module_param(nocst, uint, 0000); -/* - * bm_history -- bit-mask with a bit per jiffy of bus-master activity - * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms - * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms - * 100 HZ: 0x0000000F: 4 jiffies = 40ms - * reduce history for more aggressive entry into C3 - */ -static unsigned int bm_history __read_mostly = - (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); -module_param(bm_history, uint, 0644); /* -------------------------------------------------------------------------- Power Management -------------------------------------------------------------------------- */ @@ -166,88 +176,6 @@ static struct dmi_system_id __cpuinitdat {}, }; -static inline u32 ticks_elapsed(u32 t1, u32 t2) -{ - if (t2 >= t1) - return (t2 - t1); - else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) - return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); - else - return ((0xFFFFFFFF - t1) + t2); -} - -static void -acpi_processor_power_activate(struct acpi_processor *pr, - struct acpi_processor_cx *new) -{ - struct acpi_processor_cx *old; - - if (!pr || !new) - return; - - old = pr->power.state; - - if (old) - old->promotion.count = 0; - new->demotion.count = 0; - - /* Cleanup from old state. */ - if (old) { - switch (old->type) { - case ACPI_STATE_C3: - /* Disable bus master reload */ - if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); - break; - } - } - - /* Prepare to use new state. */ - switch (new->type) { - case ACPI_STATE_C3: - /* Enable bus master reload */ - if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); - break; - } - - pr->power.state = new; - - return; -} - -static void acpi_safe_halt(void) -{ - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we - * test NEED_RESCHED: - */ - smp_mb(); - if (!need_resched()) - safe_halt(); - current_thread_info()->status |= TS_POLLING; -} - -static atomic_t c3_cpu_count; - -/* Common C-state entry for C2, C3, .. */ -static void acpi_cstate_enter(struct acpi_processor_cx *cstate) -{ - if (cstate->space_id == ACPI_CSTATE_FFH) { - /* Call into architectural FFH based C-state */ - acpi_processor_ffh_cstate_enter(cstate); - } else { - int unused; - /* IO port based C-state */ - inb(cstate->address); - /* Dummy wait op - must do something useless after P_LVL2 read - because chipsets cannot guarantee that STPCLK# signal - gets asserted in time to freeze execution properly. */ - unused = inl(acpi_gbl_FADT.xpm_timer_block.address); - } -} - #ifdef ARCH_APICTIMER_STOPS_ON_C3 /* @@ -324,387 +252,6 @@ static void acpi_state_timer_broadcast(s #endif -static void acpi_processor_idle(void) -{ - struct acpi_processor *pr = NULL; - struct acpi_processor_cx *cx = NULL; - struct acpi_processor_cx *next_state = NULL; - int sleep_ticks = 0; - u32 t1, t2 = 0; - - /* - * Interrupts must be disabled during bus mastering calculations and - * for C2/C3 transitions. - */ - local_irq_disable(); - - pr = processors[smp_processor_id()]; - if (!pr) { - local_irq_enable(); - return; - } - - /* - * Check whether we truly need to go idle, or should - * reschedule: - */ - if (unlikely(need_resched())) { - local_irq_enable(); - return; - } - - cx = pr->power.state; - if (!cx) { - if (pm_idle_save) - pm_idle_save(); - else - acpi_safe_halt(); - return; - } - - /* - * Check BM Activity - * ----------------- - * Check for bus mastering activity (if required), record, and check - * for demotion. - */ - if (pr->flags.bm_check) { - u32 bm_status = 0; - unsigned long diff = jiffies - pr->power.bm_check_timestamp; - - if (diff > 31) - diff = 31; - - pr->power.bm_activity <<= diff; - - acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); - if (bm_status) { - pr->power.bm_activity |= 0x1; - acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); - } - /* - * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect - * the true state of bus mastering activity; forcing us to - * manually check the BMIDEA bit of each IDE channel. - */ - else if (errata.piix4.bmisx) { - if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) - || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) - pr->power.bm_activity |= 0x1; - } - - pr->power.bm_check_timestamp = jiffies; - - /* - * If bus mastering is or was active this jiffy, demote - * to avoid a faulty transition. Note that the processor - * won't enter a low-power state during this call (to this - * function) but should upon the next. - * - * TBD: A better policy might be to fallback to the demotion - * state (use it for this quantum only) istead of - * demoting -- and rely on duration as our sole demotion - * qualification. This may, however, introduce DMA - * issues (e.g. floppy DMA transfer overrun/underrun). - */ - if ((pr->power.bm_activity & 0x1) && - cx->demotion.threshold.bm) { - local_irq_enable(); - next_state = cx->demotion.state; - goto end; - } - } - -#ifdef CONFIG_HOTPLUG_CPU - /* - * Check for P_LVL2_UP flag before entering C2 and above on - * an SMP system. We do it here instead of doing it at _CST/P_LVL - * detection phase, to work cleanly with logical CPU hotplug. - */ - if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) - cx = &pr->power.states[ACPI_STATE_C1]; -#endif - - /* - * Sleep: - * ------ - * Invoke the current Cx state to put the processor to sleep. - */ - if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we - * test NEED_RESCHED: - */ - smp_mb(); - if (need_resched()) { - current_thread_info()->status |= TS_POLLING; - local_irq_enable(); - return; - } - } - - switch (cx->type) { - - case ACPI_STATE_C1: - /* - * Invoke C1. - * Use the appropriate idle routine, the one that would - * be used without acpi C-states. - */ - if (pm_idle_save) - pm_idle_save(); - else - acpi_safe_halt(); - - /* - * TBD: Can't get time duration while in C1, as resumes - * go to an ISR rather than here. Need to instrument - * base interrupt handler. - */ - sleep_ticks = 0xFFFFFFFF; - break; - - case ACPI_STATE_C2: - /* Get start time (ticks) */ - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); - /* Invoke C2 */ - acpi_state_timer_broadcast(pr, cx, 1); - acpi_cstate_enter(cx); - /* Get end time (ticks) */ - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); - -#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) - /* TSC halts in C2, so notify users */ - mark_tsc_unstable("possible TSC halt in C2"); -#endif - /* Re-enable interrupts */ - local_irq_enable(); - current_thread_info()->status |= TS_POLLING; - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = - ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; - acpi_state_timer_broadcast(pr, cx, 0); - break; - - case ACPI_STATE_C3: - - /* - * disable bus master - * bm_check implies we need ARB_DIS - * !bm_check implies we need cache flush - * bm_control implies whether we can do ARB_DIS - * - * That leaves a case where bm_check is set and bm_control is - * not set. In that case we cannot do much, we enter C3 - * without doing anything. - */ - if (pr->flags.bm_check && pr->flags.bm_control) { - if (atomic_inc_return(&c3_cpu_count) == - num_online_cpus()) { - /* - * All CPUs are trying to go to C3 - * Disable bus master arbitration - */ - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); - } - } else if (!pr->flags.bm_check) { - /* SMP with no shared cache... Invalidate cache */ - ACPI_FLUSH_CPU_CACHE(); - } - - /* Get start time (ticks) */ - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); - /* Invoke C3 */ - acpi_state_timer_broadcast(pr, cx, 1); - acpi_cstate_enter(cx); - /* Get end time (ticks) */ - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); - if (pr->flags.bm_check && pr->flags.bm_control) { - /* Enable bus master arbitration */ - atomic_dec(&c3_cpu_count); - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); - } - -#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) - /* TSC halts in C3, so notify users */ - mark_tsc_unstable("TSC halts in C3"); -#endif - /* Re-enable interrupts */ - local_irq_enable(); - current_thread_info()->status |= TS_POLLING; - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = - ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; - acpi_state_timer_broadcast(pr, cx, 0); - break; - - default: - local_irq_enable(); - return; - } - cx->usage++; - if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) - cx->time += sleep_ticks; - - next_state = pr->power.state; - -#ifdef CONFIG_HOTPLUG_CPU - /* Don't do promotion/demotion */ - if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { - next_state = cx; - goto end; - } -#endif - - /* - * Promotion? - * ---------- - * Track the number of longs (time asleep is greater than threshold) - * and promote when the count threshold is reached. Note that bus - * mastering activity may prevent promotions. - * Do not promote above max_cstate. - */ - if (cx->promotion.state && - ((cx->promotion.state - pr->power.states) <= max_cstate)) { - if (sleep_ticks > cx->promotion.threshold.ticks && - cx->promotion.state->latency <= system_latency_constraint()) { - cx->promotion.count++; - cx->demotion.count = 0; - if (cx->promotion.count >= - cx->promotion.threshold.count) { - if (pr->flags.bm_check) { - if (! - (pr->power.bm_activity & cx-> - promotion.threshold.bm)) { - next_state = - cx->promotion.state; - goto end; - } - } else { - next_state = cx->promotion.state; - goto end; - } - } - } - } - - /* - * Demotion? - * --------- - * Track the number of shorts (time asleep is less than time threshold) - * and demote when the usage threshold is reached. - */ - if (cx->demotion.state) { - if (sleep_ticks < cx->demotion.threshold.ticks) { - cx->demotion.count++; - cx->promotion.count = 0; - if (cx->demotion.count >= cx->demotion.threshold.count) { - next_state = cx->demotion.state; - goto end; - } - } - } - - end: - /* - * Demote if current state exceeds max_cstate - * or if the latency of the current state is unacceptable - */ - if ((pr->power.state - pr->power.states) > max_cstate || - pr->power.state->latency > system_latency_constraint()) { - if (cx->demotion.state) - next_state = cx->demotion.state; - } - - /* - * New Cx State? - * ------------- - * If we're going to start using a new Cx state we must clean up - * from the previous and prepare to use the new. - */ - if (next_state != pr->power.state) - acpi_processor_power_activate(pr, next_state); -} - -static int acpi_processor_set_power_policy(struct acpi_processor *pr) -{ - unsigned int i; - unsigned int state_is_set = 0; - struct acpi_processor_cx *lower = NULL; - struct acpi_processor_cx *higher = NULL; - struct acpi_processor_cx *cx; - - - if (!pr) - return -EINVAL; - - /* - * This function sets the default Cx state policy (OS idle handler). - * Our scheme is to promote quickly to C2 but more conservatively - * to C3. We're favoring C2 for its characteristics of low latency - * (quick response), good power savings, and ability to allow bus - * mastering activity. Note that the Cx state policy is completely - * customizable and can be altered dynamically. - */ - - /* startup state */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (!state_is_set) - pr->power.state = cx; - state_is_set++; - break; - } - - if (!state_is_set) - return -ENODEV; - - /* demotion */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (lower) { - cx->demotion.state = lower; - cx->demotion.threshold.ticks = cx->latency_ticks; - cx->demotion.threshold.count = 1; - if (cx->type == ACPI_STATE_C3) - cx->demotion.threshold.bm = bm_history; - } - - lower = cx; - } - - /* promotion */ - for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (higher) { - cx->promotion.state = higher; - cx->promotion.threshold.ticks = cx->latency_ticks; - if (cx->type >= ACPI_STATE_C2) - cx->promotion.threshold.count = 4; - else - cx->promotion.threshold.count = 10; - if (higher->type == ACPI_STATE_C3) - cx->promotion.threshold.bm = bm_history; - } - - higher = cx; - } - - return 0; -} - static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) { @@ -922,7 +469,7 @@ static void acpi_processor_power_verify_ * Normalize the C2 latency to expidite policy */ cx->valid = 1; - cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); + cx->latency_ticks = cx->latency; return; } @@ -996,7 +543,7 @@ static void acpi_processor_power_verify_ * use this in our C3 policy */ cx->valid = 1; - cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); + cx->latency_ticks = cx->latency; return; } @@ -1062,18 +609,6 @@ static int acpi_processor_get_power_info pr->power.count = acpi_processor_power_verify(pr); /* - * Set Default Policy - * ------------------ - * Now that we know which states are supported, set the default - * policy. Note that this policy can be changed dynamically - * (e.g. encourage deeper sleeps to conserve battery life when - * not on AC). - */ - result = acpi_processor_set_power_policy(pr); - if (result) - return result; - - /* * if one state of type C2 or C3 is available, mark this * CPU as being "idle manageable" */ @@ -1090,9 +625,6 @@ static int acpi_processor_get_power_info int acpi_processor_cst_has_changed(struct acpi_processor *pr) { - int result = 0; - - if (!pr) return -EINVAL; @@ -1103,16 +635,9 @@ int acpi_processor_cst_has_changed(struc if (!pr->flags.power_setup_done) return -ENODEV; - /* Fall back to the default idle loop */ - pm_idle = pm_idle_save; - synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ - - pr->flags.power = 0; - result = acpi_processor_get_power_info(pr); - if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) - pm_idle = acpi_processor_idle; - - return result; + acpi_processor_get_power_info(pr); + return cpuidle_force_redetect(per_cpu(cpuidle_devices, pr->id), + &acpi_idle_driver); } /* proc interface */ @@ -1198,30 +723,6 @@ static const struct file_operations acpi .release = single_release, }; -#ifdef CONFIG_SMP -static void smp_callback(void *v) -{ - /* we already woke the CPU up, nothing more to do */ -} - -/* - * This function gets called when a part of the kernel has a new latency - * requirement. This means we need to get all processors out of their C-state, - * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that - * wakes them all right up. - */ -static int acpi_processor_latency_notify(struct notifier_block *b, - unsigned long l, void *v) -{ - smp_call_function(smp_callback, NULL, 0, 1); - return NOTIFY_OK; -} - -static struct notifier_block acpi_processor_latency_notifier = { - .notifier_call = acpi_processor_latency_notify, -}; -#endif - int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { @@ -1238,9 +739,6 @@ int __cpuinit acpi_processor_power_init( "ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; -#ifdef CONFIG_SMP - register_latency_notifier(&acpi_processor_latency_notifier); -#endif } if (!pr) @@ -1257,6 +755,7 @@ int __cpuinit acpi_processor_power_init( acpi_processor_get_power_info(pr); + /* * Install the idle handler if processor power management is supported. * Note that we use previously set idle handler will be used on @@ -1269,11 +768,6 @@ int __cpuinit acpi_processor_power_init( printk(" C%d[C%d]", i, pr->power.states[i].type); printk(")\n"); - - if (pr->id == 0) { - pm_idle_save = pm_idle; - pm_idle = acpi_processor_idle; - } } /* 'power' [R] */ @@ -1301,21 +795,344 @@ int acpi_processor_power_exit(struct acp if (acpi_device_dir(device)) remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, acpi_device_dir(device)); + return 0; +} - /* Unregister the idle handler when processor #0 is removed. */ - if (pr->id == 0) { - pm_idle = pm_idle_save; +/** + * ticks_elapsed - a helper function that determines how many ticks (in US) + * have elapsed between two PM Timer timestamps + * @t1: the start time + * @t2: the end time + */ +static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) +{ + if (t2 >= t1) + return PM_TIMER_TICKS_TO_US(t2 - t1); + else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) + return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); + else + return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); +} - /* - * We are about to unload the current idle thread pm callback - * (pm_idle), Wait for all processors to update cached/local - * copies of pm_idle before proceeding. - */ - cpu_idle_wait(); -#ifdef CONFIG_SMP - unregister_latency_notifier(&acpi_processor_latency_notifier); +static inline u32 ticks_elapsed(u32 t1, u32 t2) +{ + if (t2 >= t1) + return (t2 - t1); + else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) + return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); + else + return ((0xFFFFFFFF - t1) + t2); +} + +/** + * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state + * @pr: the processor + * @target: the new target state + */ +static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, + struct acpi_processor_cx *target) +{ + if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); + pr->flags.bm_rld_set = 0; + } + + if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); + pr->flags.bm_rld_set = 1; + } +} + +/** + * acpi_idle_do_entry - a helper function that does C2 and C3 type entry + * @cx: cstate data + */ +static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) +{ + if (cx->space_id == ACPI_CSTATE_FFH) { + /* Call into architectural FFH based C-state */ + acpi_processor_ffh_cstate_enter(cx); + } else { + int unused; + /* IO port based C-state */ + inb(cx->address); + /* Dummy wait op - must do something useless after P_LVL2 read + because chipsets cannot guarantee that STPCLK# signal + gets asserted in time to freeze execution properly. */ + unused = inl(acpi_gbl_FADT.xpm_timer_block.address); + } +} + +/** + * acpi_idle_enter_c1 - enters an ACPI C1 state-type + * @dev: the target CPU + * @state: the state data + * + * This is equivalent to the HALT instruction. + */ +static int acpi_idle_enter_c1(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + struct acpi_processor *pr; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + pr = processors[smp_processor_id()]; + + if (unlikely(!pr)) + return 0; + + if (pr->flags.bm_check) + acpi_idle_update_bm_rld(pr, cx); + + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we test + * NEED_RESCHED: + */ + smp_mb(); + if (!need_resched()) + safe_halt(); + current_thread_info()->status |= TS_POLLING; + + cx->usage++; + + return 0; +} + +/** + * acpi_idle_enter_c2 - enters an ACPI C2 state-type + * @dev: the target CPU + * @state: the state data + */ +static int acpi_idle_enter_c2(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + struct acpi_processor *pr; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + u32 t1, t2; + pr = processors[smp_processor_id()]; + + if (unlikely(!pr)) + return 0; + + if (pr->flags.bm_check) + acpi_idle_update_bm_rld(pr, cx); + + local_irq_disable(); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we test + * NEED_RESCHED: + */ + smp_mb(); + + if (unlikely(need_resched())) { + current_thread_info()->status |= TS_POLLING; + local_irq_enable(); + return 0; + } + + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + acpi_state_timer_broadcast(pr, cx, 1); + acpi_idle_do_entry(cx); + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + +#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86) + /* TSC halts in C2, so notify users */ + mark_tsc_unstable("possible TSC halt in C2"); #endif + + local_irq_enable(); + current_thread_info()->status |= TS_POLLING; + + cx->usage++; + + acpi_state_timer_broadcast(pr, cx, 0); + cx->time += ticks_elapsed(t1, t2); + return ticks_elapsed_in_us(t1, t2); +} + +static int c3_cpu_count; +static DEFINE_SPINLOCK(c3_lock); + +/** + * acpi_idle_enter_c3 - enters an ACPI C3 state-type + * @dev: the target CPU + * @state: the state data + * + * Similar to C2 entry, except special bus master handling is needed. + */ +static int acpi_idle_enter_c3(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + struct acpi_processor *pr; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + u32 t1, t2; + pr = processors[smp_processor_id()]; + + if (unlikely(!pr)) + return 0; + + if (pr->flags.bm_check) + acpi_idle_update_bm_rld(pr, cx); + + local_irq_disable(); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we test + * NEED_RESCHED: + */ + smp_mb(); + + if (unlikely(need_resched())) { + current_thread_info()->status |= TS_POLLING; + local_irq_enable(); + return 0; + } + + /* disable bus master */ + if (pr->flags.bm_check) { + spin_lock(&c3_lock); + c3_cpu_count++; + if (c3_cpu_count == num_online_cpus()) { + /* + * All CPUs are trying to go to C3 + * Disable bus master arbitration + */ + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); + } + spin_unlock(&c3_lock); + } else { + /* SMP with no shared cache... Invalidate cache */ + ACPI_FLUSH_CPU_CACHE(); } + /* Get start time (ticks) */ + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + acpi_state_timer_broadcast(pr, cx, 1); + acpi_idle_do_entry(cx); + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + + if (pr->flags.bm_check) { + spin_lock(&c3_lock); + /* Enable bus master arbitration */ + if (c3_cpu_count == num_online_cpus()) + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); + c3_cpu_count--; + spin_unlock(&c3_lock); + } + +#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86) + /* TSC halts in C3, so notify users */ + mark_tsc_unstable("TSC halts in C3"); +#endif + + local_irq_enable(); + current_thread_info()->status |= TS_POLLING; + + cx->usage++; + + acpi_state_timer_broadcast(pr, cx, 0); + cx->time += ticks_elapsed(t1, t2); + return ticks_elapsed_in_us(t1, t2); +} + +/** + * acpi_idle_bm_check - checks if bus master activity was detected + */ +static int acpi_idle_bm_check(void) +{ + u32 bm_status = 0; + + acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); + if (bm_status) + acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); + /* + * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect + * the true state of bus mastering activity; forcing us to + * manually check the BMIDEA bit of each IDE channel. + */ + else if (errata.piix4.bmisx) { + if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) + || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) + bm_status = 1; + } + return bm_status; +} + +/** + * acpi_idle_init - attaches the driver to a CPU + * @dev: the CPU + */ +static int acpi_idle_init(struct cpuidle_device *dev) +{ + int cpu = dev->cpu; + int i, count = 0; + struct acpi_processor_cx *cx; + struct cpuidle_state *state; + + struct acpi_processor *pr = processors[cpu]; + + if (!pr->flags.power_setup_done) + return -EINVAL; + + if (pr->flags.power == 0) { + return -EINVAL; + } + + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { + cx = &pr->power.states[i]; + state = &dev->states[count]; + + if (!cx->valid) + continue; + +#ifdef CONFIG_HOTPLUG_CPU + if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && + !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) + continue; +#endif + cpuidle_set_statedata(state, cx); + + state->exit_latency = cx->latency; + state->target_residency = cx->latency * 6; + state->power_usage = cx->power; + + state->flags = 0; + switch (cx->type) { + case ACPI_STATE_C1: + state->flags |= CPUIDLE_FLAG_SHALLOW; + state->enter = acpi_idle_enter_c1; + break; + + case ACPI_STATE_C2: + state->flags |= CPUIDLE_FLAG_BALANCED; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = acpi_idle_enter_c2; + break; + + case ACPI_STATE_C3: + state->flags |= CPUIDLE_FLAG_DEEP; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->flags |= CPUIDLE_FLAG_CHECK_BM; + state->enter = acpi_idle_enter_c3; + break; + } + + count++; + } + + if (!count) + return -EINVAL; + + dev->state_count = count; return 0; } + +struct cpuidle_driver acpi_idle_driver = { + .name = "acpi_idle", + .init = acpi_idle_init, + .redetect = acpi_idle_init, + .bm_check = acpi_idle_bm_check, + .owner = THIS_MODULE, +}; Index: linux/drivers/cpuidle/Kconfig =================================================================== --- /dev/null +++ linux/drivers/cpuidle/Kconfig @@ -0,0 +1,39 @@ +menu "CPU idle PM support" + +config CPU_IDLE + bool "CPU idle PM support" + help + CPU idle is a generic framework for supporting software-controlled + idle processor power management. It includes modular cross-platform + governors that can be swapped during runtime. + + If you're using a mobile platform that supports CPU idle PM (e.g. + an ACPI-capable notebook), you should say Y here. + +if CPU_IDLE + +comment "Governors" + +config CPU_IDLE_GOV_LADDER + tristate "'ladder' governor" + depends on CPU_IDLE + default y + help + This cpuidle governor promotes and demotes through the supported idle + states using residency time and bus master activity as metrics. This + algorithm was originally introduced in the old ACPI processor driver. + +config CPU_IDLE_GOV_MENU + tristate "'menu' governor" + depends on CPU_IDLE && NO_HZ + default y + help + This cpuidle governor evaluates all available states and chooses the + deepest state that meets all of the following constraints: BM activity, + expected time until next timer interrupt, and last break event time + delta. It is designed to minimize power consumption. Currently + dynticks is required. + +endif # CPU_IDLE + +endmenu Index: linux/drivers/cpuidle/Makefile =================================================================== --- /dev/null +++ linux/drivers/cpuidle/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for cpuidle. +# + +obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ Index: linux/drivers/cpuidle/cpuidle.c =================================================================== --- /dev/null +++ linux/drivers/cpuidle/cpuidle.c @@ -0,0 +1,306 @@ +/* + * cpuidle.c - core cpuidle infrastructure + * + * (C) 2006-2007 Venkatesh Pallipadi + * Shaohua Li + * Adam Belay + * + * This code is licenced under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cpuidle.h" + +DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); +EXPORT_PER_CPU_SYMBOL_GPL(cpuidle_devices); + +DEFINE_MUTEX(cpuidle_lock); +LIST_HEAD(cpuidle_detected_devices); +static void (*pm_idle_old)(void); + +/** + * cpuidle_idle_call - the main idle loop + * + * NOTE: no locks or semaphores should be used here + */ +static void cpuidle_idle_call(void) +{ + struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); + struct cpuidle_state *target_state; + int next_state; + + /* check if the device is ready */ + if (!dev || dev->status != CPUIDLE_STATUS_DOIDLE) { + if (pm_idle_old) + pm_idle_old(); + else + local_irq_enable(); + return; + } + + /* ask the governor for the next state */ + next_state = cpuidle_curr_governor->select(dev); + if (need_resched()) + return; + target_state = &dev->states[next_state]; + + /* enter the state and update stats */ + dev->last_residency = target_state->enter(dev, target_state); + dev->last_state = target_state; + target_state->time += dev->last_residency; + target_state->usage++; + + /* give the governor an opportunity to reflect on the outcome */ + if (cpuidle_curr_governor->reflect) + cpuidle_curr_governor->reflect(dev); +} + +/** + * cpuidle_install_idle_handler - installs the cpuidle idle loop handler + */ +void cpuidle_install_idle_handler(void) +{ + if (pm_idle != cpuidle_idle_call) { + /* Make sure all changes finished before we switch to new idle */ + smp_wmb(); + pm_idle = cpuidle_idle_call; + } +} + +/** + * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler + */ +void cpuidle_uninstall_idle_handler(void) +{ + if (pm_idle != pm_idle_old) { + pm_idle = pm_idle_old; + cpu_idle_wait(); + } +} + +/** + * cpuidle_rescan_device - prepares for a new state configuration + * @dev: the target device + * + * Must be called with cpuidle_lock aquired. + */ +void cpuidle_rescan_device(struct cpuidle_device *dev) +{ + int i; + + if (cpuidle_curr_governor->scan) + cpuidle_curr_governor->scan(dev); + + for (i = 0; i < dev->state_count; i++) { + dev->states[i].usage = 0; + dev->states[i].time = 0; + } +} + +/** + * cpuidle_add_device - attaches the driver to a CPU instance + * @sys_dev: the system device (driver model CPU representation) + */ +static int cpuidle_add_device(struct sys_device *sys_dev) +{ + int cpu = sys_dev->id; + struct cpuidle_device *dev; + + dev = per_cpu(cpuidle_devices, cpu); + + mutex_lock(&cpuidle_lock); + if (cpu_is_offline(cpu)) { + mutex_unlock(&cpuidle_lock); + return 0; + } + + if (!dev) { + dev = kzalloc(sizeof(struct cpuidle_device), GFP_KERNEL); + if (!dev) { + mutex_unlock(&cpuidle_lock); + return -ENOMEM; + } + init_completion(&dev->kobj_unregister); + per_cpu(cpuidle_devices, cpu) = dev; + } + dev->cpu = cpu; + + if (dev->status & CPUIDLE_STATUS_DETECTED) { + mutex_unlock(&cpuidle_lock); + return 0; + } + + cpuidle_add_sysfs(sys_dev); + + if (cpuidle_curr_driver) { + if (cpuidle_attach_driver(dev)) + goto err_ret; + } + + if (cpuidle_curr_governor) { + if (cpuidle_attach_governor(dev)) { + cpuidle_detach_driver(dev); + goto err_ret; + } + } + + if (cpuidle_device_can_idle(dev)) + cpuidle_install_idle_handler(); + + list_add(&dev->device_list, &cpuidle_detected_devices); + dev->status |= CPUIDLE_STATUS_DETECTED; + +err_ret: + mutex_unlock(&cpuidle_lock); + + return 0; +} + +/** + * __cpuidle_remove_device - detaches the driver from a CPU instance + * @sys_dev: the system device (driver model CPU representation) + * + * Must be called with cpuidle_lock aquired. + */ +static int __cpuidle_remove_device(struct sys_device *sys_dev) +{ + struct cpuidle_device *dev; + + dev = per_cpu(cpuidle_devices, sys_dev->id); + + if (!(dev->status & CPUIDLE_STATUS_DETECTED)) { + return 0; + } + dev->status &= ~CPUIDLE_STATUS_DETECTED; + /* NOTE: we don't wait because the cpu is already offline */ + if (cpuidle_curr_governor) + cpuidle_detach_governor(dev); + if (cpuidle_curr_driver) + cpuidle_detach_driver(dev); + cpuidle_remove_sysfs(sys_dev); + list_del(&dev->device_list); + wait_for_completion(&dev->kobj_unregister); + per_cpu(cpuidle_devices, sys_dev->id) = NULL; + kfree(dev); + + return 0; +} + +/** + * cpuidle_remove_device - detaches the driver from a CPU instance + * @sys_dev: the system device (driver model CPU representation) + */ +static int cpuidle_remove_device(struct sys_device *sys_dev) +{ + int ret; + mutex_lock(&cpuidle_lock); + ret = __cpuidle_remove_device(sys_dev); + mutex_unlock(&cpuidle_lock); + + return ret; +} + +static struct sysdev_driver cpuidle_sysdev_driver = { + .add = cpuidle_add_device, + .remove = cpuidle_remove_device, +}; + +static int cpuidle_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + struct sys_device *sys_dev; + + sys_dev = get_cpu_sysdev((unsigned long)hcpu); + + switch (action) { + case CPU_ONLINE: + cpuidle_add_device(sys_dev); + break; + case CPU_DOWN_PREPARE: + mutex_lock(&cpuidle_lock); + break; + case CPU_DEAD: + __cpuidle_remove_device(sys_dev); + mutex_unlock(&cpuidle_lock); + break; + case CPU_DOWN_FAILED: + mutex_unlock(&cpuidle_lock); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata cpuidle_cpu_notifier = +{ + .notifier_call = cpuidle_cpu_callback, +}; + +#ifdef CONFIG_SMP + +static void smp_callback(void *v) +{ + /* we already woke the CPU up, nothing more to do */ +} + +/* + * This function gets called when a part of the kernel has a new latency + * requirement. This means we need to get all processors out of their C-state, + * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that + * wakes them all right up. + */ +static int cpuidle_latency_notify(struct notifier_block *b, + unsigned long l, void *v) +{ + smp_call_function(smp_callback, NULL, 0, 1); + return NOTIFY_OK; +} + +static struct notifier_block cpuidle_latency_notifier = { + .notifier_call = cpuidle_latency_notify, +}; + +#define latency_notifier_init(x) do { register_latency_notifier(x); } while (0) + +#else /* CONFIG_SMP */ + +#define latency_notifier_init(x) do { } while (0) + +#endif /* CONFIG_SMP */ + +/** + * cpuidle_init - core initializer + */ +static int __init cpuidle_init(void) +{ + int ret; + + pm_idle_old = pm_idle; + + ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); + if (ret) + return ret; + + register_hotcpu_notifier(&cpuidle_cpu_notifier); + + ret = sysdev_driver_register(&cpu_sysdev_class, &cpuidle_sysdev_driver); + + if (ret) { + cpuidle_remove_class_sysfs(&cpu_sysdev_class); + printk(KERN_ERR "cpuidle: failed to initialize\n"); + return ret; + } + + latency_notifier_init(&cpuidle_latency_notifier); + + return 0; +} + +core_initcall(cpuidle_init); Index: linux/drivers/cpuidle/cpuidle.h =================================================================== --- /dev/null +++ linux/drivers/cpuidle/cpuidle.h @@ -0,0 +1,50 @@ +/* + * cpuidle.h - The internal header file + */ + +#ifndef __DRIVER_CPUIDLE_H +#define __DRIVER_CPUIDLE_H + +#include + +/* For internal use only */ +extern struct cpuidle_governor *cpuidle_curr_governor; +extern struct cpuidle_driver *cpuidle_curr_driver; +extern struct list_head cpuidle_drivers; +extern struct list_head cpuidle_governors; +extern struct list_head cpuidle_detected_devices; +extern struct mutex cpuidle_lock; + +/* idle loop */ +extern void cpuidle_install_idle_handler(void); +extern void cpuidle_uninstall_idle_handler(void); +extern void cpuidle_rescan_device(struct cpuidle_device *dev); + +/* drivers */ +extern int cpuidle_attach_driver(struct cpuidle_device *dev); +extern void cpuidle_detach_driver(struct cpuidle_device *dev); +extern int cpuidle_switch_driver(struct cpuidle_driver *drv); + +/* governors */ +extern int cpuidle_attach_governor(struct cpuidle_device *dev); +extern void cpuidle_detach_governor(struct cpuidle_device *dev); +extern int cpuidle_switch_governor(struct cpuidle_governor *gov); + +/* sysfs */ +extern int cpuidle_add_class_sysfs(struct sysdev_class *cls); +extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls); +extern int cpuidle_add_driver_sysfs(struct cpuidle_device *device); +extern void cpuidle_remove_driver_sysfs(struct cpuidle_device *device); +extern int cpuidle_add_sysfs(struct sys_device *sysdev); +extern void cpuidle_remove_sysfs(struct sys_device *sysdev); + +/** + * cpuidle_device_can_idle - determines if a CPU can utilize the idle loop + * @dev: the target CPU + */ +static inline int cpuidle_device_can_idle(struct cpuidle_device *dev) +{ + return (dev->status == CPUIDLE_STATUS_DOIDLE); +} + +#endif /* __DRIVER_CPUIDLE_H */ Index: linux/drivers/cpuidle/driver.c =================================================================== --- /dev/null +++ linux/drivers/cpuidle/driver.c @@ -0,0 +1,276 @@ +/* + * driver.c - driver support + * + * (C) 2006-2007 Venkatesh Pallipadi + * Shaohua Li + * Adam Belay + * + * This code is licenced under the GPL. + */ + +#include +#include +#include + +#include "cpuidle.h" + +LIST_HEAD(cpuidle_drivers); +struct cpuidle_driver *cpuidle_curr_driver; + + +/** + * cpuidle_attach_driver - attaches a driver to a CPU + * @dev: the target CPU + * + * Must be called with cpuidle_lock aquired. + */ +int cpuidle_attach_driver(struct cpuidle_device *dev) +{ + int ret; + + if (dev->status & CPUIDLE_STATUS_DRIVER_ATTACHED) + return -EIO; + + if (!try_module_get(cpuidle_curr_driver->owner)) + return -EINVAL; + + ret = cpuidle_curr_driver->init(dev); + if (ret) { + module_put(cpuidle_curr_driver->owner); + printk(KERN_INFO "cpuidle: driver %s failed to attach to " + "cpu %d\n", cpuidle_curr_driver->name, dev->cpu); + } else { + if (dev->status & CPUIDLE_STATUS_GOVERNOR_ATTACHED) + cpuidle_rescan_device(dev); + smp_wmb(); + dev->status |= CPUIDLE_STATUS_DRIVER_ATTACHED; + cpuidle_add_driver_sysfs(dev); + } + + return ret; +} + +/** + * cpuidle_detach_govenor - detaches a driver from a CPU + * @dev: the target CPU + * + * Must be called with cpuidle_lock aquired. + */ +void cpuidle_detach_driver(struct cpuidle_device *dev) +{ + if (dev->status & CPUIDLE_STATUS_DRIVER_ATTACHED) { + cpuidle_remove_driver_sysfs(dev); + dev->status &= ~CPUIDLE_STATUS_DRIVER_ATTACHED; + if (cpuidle_curr_driver->exit) + cpuidle_curr_driver->exit(dev); + module_put(cpuidle_curr_driver->owner); + } +} + +/** + * __cpuidle_find_driver - finds a driver of the specified name + * @str: the name + * + * Must be called with cpuidle_lock aquired. + */ +static struct cpuidle_driver * __cpuidle_find_driver(const char *str) +{ + struct cpuidle_driver *drv; + + list_for_each_entry(drv, &cpuidle_drivers, driver_list) + if (!strnicmp(str, drv->name, CPUIDLE_NAME_LEN)) + return drv; + + return NULL; +} + +/** + * cpuidle_switch_driver - changes the driver + * @drv: the new target driver + * + * NOTE: "drv" can be NULL to specify disabled + * Must be called with cpuidle_lock aquired. + */ +int cpuidle_switch_driver(struct cpuidle_driver *drv) +{ + struct cpuidle_device *dev; + + if (drv == cpuidle_curr_driver) + return -EINVAL; + + cpuidle_uninstall_idle_handler(); + + if (cpuidle_curr_driver) + list_for_each_entry(dev, &cpuidle_detected_devices, device_list) + cpuidle_detach_driver(dev); + + cpuidle_curr_driver = drv; + + if (drv) { + int ret = 1; + list_for_each_entry(dev, &cpuidle_detected_devices, device_list) + if (cpuidle_attach_driver(dev) == 0) + ret = 0; + + /* If attach on all devices fail, switch to NULL driver */ + if (ret) + cpuidle_curr_driver = NULL; + + if (cpuidle_curr_driver && cpuidle_curr_governor) { + printk(KERN_INFO "cpuidle: using driver %s\n", + drv->name); + cpuidle_install_idle_handler(); + } + } + + return 0; +} + +/** + * cpuidle_register_driver - registers a driver + * @drv: the driver + */ +int cpuidle_register_driver(struct cpuidle_driver *drv) +{ + int ret = -EEXIST; + + if (!drv || !drv->init) + return -EINVAL; + + mutex_lock(&cpuidle_lock); + if (__cpuidle_find_driver(drv->name) == NULL) { + ret = 0; + list_add_tail(&drv->driver_list, &cpuidle_drivers); + if (!cpuidle_curr_driver) + cpuidle_switch_driver(drv); + } + mutex_unlock(&cpuidle_lock); + + return ret; +} + +EXPORT_SYMBOL_GPL(cpuidle_register_driver); + +/** + * cpuidle_unregister_driver - unregisters a driver + * @drv: the driver + */ +void cpuidle_unregister_driver(struct cpuidle_driver *drv) +{ + if (!drv) + return; + + mutex_lock(&cpuidle_lock); + if (drv == cpuidle_curr_driver) + cpuidle_switch_driver(NULL); + list_del(&drv->driver_list); + mutex_unlock(&cpuidle_lock); +} + +EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); + +static void __cpuidle_force_redetect(struct cpuidle_device *dev) +{ + cpuidle_remove_driver_sysfs(dev); + cpuidle_curr_driver->redetect(dev); + cpuidle_add_driver_sysfs(dev); +} + +/** + * cpuidle_force_redetect - redetects the idle states of a CPU + * + * @dev: the CPU to redetect + * @drv: the target driver + * + * Generally, the driver will call this when the supported states set has + * changed. (e.g. as the result of an ACPI transition to battery power) + */ +int cpuidle_force_redetect(struct cpuidle_device *dev, + struct cpuidle_driver *drv) +{ + int uninstalled = 0; + + mutex_lock(&cpuidle_lock); + + if (drv != cpuidle_curr_driver) { + mutex_unlock(&cpuidle_lock); + return 0; + } + + if (!(dev->status & CPUIDLE_STATUS_DRIVER_ATTACHED) || + !cpuidle_curr_driver->redetect) { + mutex_unlock(&cpuidle_lock); + return -EIO; + } + + if (cpuidle_device_can_idle(dev)) { + uninstalled = 1; + cpuidle_uninstall_idle_handler(); + } + + __cpuidle_force_redetect(dev); + + if (cpuidle_device_can_idle(dev)) { + cpuidle_rescan_device(dev); + cpuidle_install_idle_handler(); + } + + /* other devices are still ok */ + if (uninstalled) + cpuidle_install_idle_handler(); + + mutex_unlock(&cpuidle_lock); + + return 0; +} + +EXPORT_SYMBOL_GPL(cpuidle_force_redetect); + +/** + * cpuidle_force_redetect_devices - redetects the idle states of all CPUs + * + * @drv: the target driver + * + * Generally, the driver will call this when the supported states set has + * changed. (e.g. as the result of an ACPI transition to battery power) + */ +int cpuidle_force_redetect_devices(struct cpuidle_driver *drv) +{ + struct cpuidle_device *dev; + int ret = 0; + + mutex_lock(&cpuidle_lock); + + if (drv != cpuidle_curr_driver) + goto out; + + if (!cpuidle_curr_driver->redetect) { + ret = -EIO; + goto out; + } + + cpuidle_uninstall_idle_handler(); + + list_for_each_entry(dev, &cpuidle_detected_devices, device_list) + __cpuidle_force_redetect(dev); + + cpuidle_install_idle_handler(); +out: + mutex_unlock(&cpuidle_lock); + return ret; +} + +EXPORT_SYMBOL_GPL(cpuidle_force_redetect_devices); + +/** + * cpuidle_get_bm_activity - determines if BM activity has occured + */ +int cpuidle_get_bm_activity(void) +{ + if (cpuidle_curr_driver->bm_check) + return cpuidle_curr_driver->bm_check(); + else + return 0; +} +EXPORT_SYMBOL_GPL(cpuidle_get_bm_activity); + Index: linux/drivers/cpuidle/governor.c =================================================================== --- /dev/null +++ linux/drivers/cpuidle/governor.c @@ -0,0 +1,187 @@ +/* + * governor.c - governor support + * + * (C) 2006-2007 Venkatesh Pallipadi + * Shaohua Li + * Adam Belay + * + * This code is licenced under the GPL. + */ + +#include +#include +#include + +#include "cpuidle.h" + +LIST_HEAD(cpuidle_governors); +struct cpuidle_governor *cpuidle_curr_governor; + + +/** + * cpuidle_attach_governor - attaches a governor to a CPU + * @dev: the target CPU + * + * Must be called with cpuidle_lock aquired. + */ +int cpuidle_attach_governor(struct cpuidle_device *dev) +{ + int ret = 0; + + if(dev->status & CPUIDLE_STATUS_GOVERNOR_ATTACHED) + return -EIO; + + if (!try_module_get(cpuidle_curr_governor->owner)) + return -EINVAL; + + if (cpuidle_curr_governor->init) + ret = cpuidle_curr_governor->init(dev); + if (ret) { + module_put(cpuidle_curr_governor->owner); + printk(KERN_ERR "cpuidle: governor %s failed to attach to cpu %d\n", + cpuidle_curr_governor->name, dev->cpu); + } else { + if (dev->status & CPUIDLE_STATUS_DRIVER_ATTACHED) + cpuidle_rescan_device(dev); + smp_wmb(); + dev->status |= CPUIDLE_STATUS_GOVERNOR_ATTACHED; + } + + return ret; +} + +/** + * cpuidle_detach_govenor - detaches a governor from a CPU + * @dev: the target CPU + * + * Must be called with cpuidle_lock aquired. + */ +void cpuidle_detach_governor(struct cpuidle_device *dev) +{ + if (dev->status & CPUIDLE_STATUS_GOVERNOR_ATTACHED) { + dev->status &= ~CPUIDLE_STATUS_GOVERNOR_ATTACHED; + if (cpuidle_curr_governor->exit) + cpuidle_curr_governor->exit(dev); + module_put(cpuidle_curr_governor->owner); + } +} + +/** + * __cpuidle_find_governor - finds a governor of the specified name + * @str: the name + * + * Must be called with cpuidle_lock aquired. + */ +static struct cpuidle_governor * __cpuidle_find_governor(const char *str) +{ + struct cpuidle_governor *gov; + + list_for_each_entry(gov, &cpuidle_governors, governor_list) + if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN)) + return gov; + + return NULL; +} + +/** + * cpuidle_switch_governor - changes the governor + * @gov: the new target governor + * + * NOTE: "gov" can be NULL to specify disabled + * Must be called with cpuidle_lock aquired. + */ +int cpuidle_switch_governor(struct cpuidle_governor *gov) +{ + struct cpuidle_device *dev; + + if (gov == cpuidle_curr_governor) + return -EINVAL; + + cpuidle_uninstall_idle_handler(); + + if (cpuidle_curr_governor) + list_for_each_entry(dev, &cpuidle_detected_devices, device_list) + cpuidle_detach_governor(dev); + + cpuidle_curr_governor = gov; + + if (gov) { + list_for_each_entry(dev, &cpuidle_detected_devices, device_list) + cpuidle_attach_governor(dev); + if (cpuidle_curr_driver) + cpuidle_install_idle_handler(); + printk(KERN_INFO "cpuidle: using governor %s\n", gov->name); + } + + return 0; +} + +/** + * cpuidle_register_governor - registers a governor + * @gov: the governor + */ +int cpuidle_register_governor(struct cpuidle_governor *gov) +{ + int ret = -EEXIST; + + if (!gov || !gov->select) + return -EINVAL; + + mutex_lock(&cpuidle_lock); + if (__cpuidle_find_governor(gov->name) == NULL) { + ret = 0; + list_add_tail(&gov->governor_list, &cpuidle_governors); + if (!cpuidle_curr_governor || + cpuidle_curr_governor->rating < gov->rating) + cpuidle_switch_governor(gov); + } + mutex_unlock(&cpuidle_lock); + + return ret; +} + +EXPORT_SYMBOL_GPL(cpuidle_register_governor); + +/** + * cpuidle_replace_governor - find a replacement governor + * @exclude_rating: the rating that will be skipped while looking for + * new governor. + */ +static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating) +{ + struct cpuidle_governor *gov; + struct cpuidle_governor *ret_gov = NULL; + unsigned int max_rating = 0; + + list_for_each_entry(gov, &cpuidle_governors, governor_list) { + if (gov->rating == exclude_rating) + continue; + if (gov->rating > max_rating) { + max_rating = gov->rating; + ret_gov = gov; + } + } + + return ret_gov; +} + +/** + * cpuidle_unregister_governor - unregisters a governor + * @gov: the governor + */ +void cpuidle_unregister_governor(struct cpuidle_governor *gov) +{ + if (!gov) + return; + + mutex_lock(&cpuidle_lock); + if (gov == cpuidle_curr_governor) { + struct cpuidle_governor *new_gov; + new_gov = cpuidle_replace_governor(gov->rating); + cpuidle_switch_governor(new_gov); + } + list_del(&gov->governor_list); + mutex_unlock(&cpuidle_lock); +} + +EXPORT_SYMBOL_GPL(cpuidle_unregister_governor); Index: linux/drivers/cpuidle/governors/Makefile =================================================================== --- /dev/null +++ linux/drivers/cpuidle/governors/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for cpuidle governors. +# + +obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o +obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o Index: linux/drivers/cpuidle/governors/ladder.c =================================================================== --- /dev/null +++ linux/drivers/cpuidle/governors/ladder.c @@ -0,0 +1,228 @@ +/* + * ladder.c - the residency ladder algorithm + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2004, 2005 Dominik Brodowski + * + * (C) 2006-2007 Venkatesh Pallipadi + * Shaohua Li + * Adam Belay + * + * This code is licenced under the GPL. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#define PROMOTION_COUNT 4 +#define DEMOTION_COUNT 1 + +/* + * bm_history -- bit-mask with a bit per jiffy of bus-master activity + * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms + * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms + * 100 HZ: 0x0000000F: 4 jiffies = 40ms + * reduce history for more aggressive entry into C3 + */ +static unsigned int bm_history __read_mostly = + (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); +module_param(bm_history, uint, 0644); + +struct ladder_device_state { + struct { + u32 promotion_count; + u32 demotion_count; + u32 promotion_time; + u32 demotion_time; + u32 bm; + } threshold; + struct { + int promotion_count; + int demotion_count; + } stats; +}; + +struct ladder_device { + struct ladder_device_state states[CPUIDLE_STATE_MAX]; + unsigned int bm_check:1; + unsigned long bm_check_timestamp; + unsigned long bm_activity; /* FIXME: bm activity should be global */ + int last_state_idx; +}; + +/** + * ladder_do_selection - prepares private data for a state change + * @ldev: the ladder device + * @old_idx: the current state index + * @new_idx: the new target state index + */ +static inline void ladder_do_selection(struct ladder_device *ldev, + int old_idx, int new_idx) +{ + ldev->states[old_idx].stats.promotion_count = 0; + ldev->states[old_idx].stats.demotion_count = 0; + ldev->last_state_idx = new_idx; +} + +/** + * ladder_select_state - selects the next state to enter + * @dev: the CPU + */ +static int ladder_select_state(struct cpuidle_device *dev) +{ + struct ladder_device *ldev = dev->governor_data; + struct ladder_device_state *last_state; + int last_residency, last_idx = ldev->last_state_idx; + + if (unlikely(!ldev)) + return 0; + + last_state = &ldev->states[last_idx]; + + /* demote if within BM threshold */ + if (ldev->bm_check) { + unsigned long diff; + + diff = jiffies - ldev->bm_check_timestamp; + if (diff > 31) + diff = 31; + + ldev->bm_activity <<= diff; + if (cpuidle_get_bm_activity()) + ldev->bm_activity |= ((1 << diff) - 1); + + ldev->bm_check_timestamp = jiffies; + if ((last_idx > 0) && + (last_state->threshold.bm & ldev->bm_activity)) { + ladder_do_selection(ldev, last_idx, last_idx - 1); + return last_idx - 1; + } + } + + if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) + last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; + else + last_residency = last_state->threshold.promotion_time + 1; + + /* consider promotion */ + if (last_idx < dev->state_count - 1 && + last_residency > last_state->threshold.promotion_time && + dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) { + last_state->stats.promotion_count++; + last_state->stats.demotion_count = 0; + if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { + ladder_do_selection(ldev, last_idx, last_idx + 1); + return last_idx + 1; + } + } + + /* consider demotion */ + if (last_idx > 0 && + last_residency < last_state->threshold.demotion_time) { + last_state->stats.demotion_count++; + last_state->stats.promotion_count = 0; + if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { + ladder_do_selection(ldev, last_idx, last_idx - 1); + return last_idx - 1; + } + } + + /* otherwise remain at the current state */ + return last_idx; +} + +/** + * ladder_scan_device - scans a CPU's states and does setup + * @dev: the CPU + */ +static void ladder_scan_device(struct cpuidle_device *dev) +{ + int i, bm_check = 0; + struct ladder_device *ldev = dev->governor_data; + struct ladder_device_state *lstate; + struct cpuidle_state *state; + + ldev->last_state_idx = 0; + ldev->bm_check_timestamp = 0; + ldev->bm_activity = 0; + + for (i = 0; i < dev->state_count; i++) { + state = &dev->states[i]; + lstate = &ldev->states[i]; + + lstate->stats.promotion_count = 0; + lstate->stats.demotion_count = 0; + + lstate->threshold.promotion_count = PROMOTION_COUNT; + lstate->threshold.demotion_count = DEMOTION_COUNT; + + if (i < dev->state_count - 1) + lstate->threshold.promotion_time = state->exit_latency; + if (i > 0) + lstate->threshold.demotion_time = state->exit_latency; + if (state->flags & CPUIDLE_FLAG_CHECK_BM) { + lstate->threshold.bm = bm_history; + bm_check = 1; + } else + lstate->threshold.bm = 0; + } + + ldev->bm_check = bm_check; +} + +/** + * ladder_init_device - initializes a CPU-instance + * @dev: the CPU + */ +static int ladder_init_device(struct cpuidle_device *dev) +{ + dev->governor_data = kmalloc(sizeof(struct ladder_device), GFP_KERNEL); + + return !dev->governor_data; +} + +/** + * ladder_exit_device - exits a CPU-instance + * @dev: the CPU + */ +static void ladder_exit_device(struct cpuidle_device *dev) +{ + kfree(dev->governor_data); +} + +static struct cpuidle_governor ladder_governor = { + .name = "ladder", + .rating = 10, + .init = ladder_init_device, + .exit = ladder_exit_device, + .scan = ladder_scan_device, + .select = ladder_select_state, + .owner = THIS_MODULE, +}; + +/** + * init_ladder - initializes the governor + */ +static int __init init_ladder(void) +{ + return cpuidle_register_governor(&ladder_governor); +} + +/** + * exit_ladder - exits the governor + */ +static void __exit exit_ladder(void) +{ + cpuidle_unregister_governor(&ladder_governor); +} + +MODULE_LICENSE("GPL"); +module_init(init_ladder); +module_exit(exit_ladder); Index: linux/drivers/cpuidle/governors/menu.c =================================================================== --- /dev/null +++ linux/drivers/cpuidle/governors/menu.c @@ -0,0 +1,181 @@ +/* + * menu.c - the menu idle governor + * + * Copyright (C) 2006-2007 Adam Belay + * + * This code is licenced under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define BM_HOLDOFF 20000 /* 20 ms */ +#define DEMOTION_THRESHOLD 5 +#define DEMOTION_TIMEOUT_MULTIPLIER 1000 + +struct menu_device { + int last_state_idx; + + int deepest_break_state; + struct timespec break_expire_time_ts; + int break_last_cnt; + + int deepest_bm_state; + int bm_elapsed_us; + int bm_holdoff_us; +}; + +static DEFINE_PER_CPU(struct menu_device, menu_devices); + +/** + * menu_select - selects the next idle state to enter + * @dev: the CPU + */ +static int menu_select(struct cpuidle_device *dev) +{ + struct menu_device *data = &__get_cpu_var(menu_devices); + int i, expected_us, max_state = dev->state_count; + + /* discard BM history because it is sticky */ + cpuidle_get_bm_activity(); + + /* determine the expected residency time */ + expected_us = (s32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; + + /* determine the maximum state compatible with current BM status */ + if (cpuidle_get_bm_activity()) + data->bm_elapsed_us = 0; + if (data->bm_elapsed_us <= data->bm_holdoff_us) + max_state = data->deepest_bm_state + 1; + + /* determine the maximum state compatible with recent idle breaks */ + if (data->deepest_break_state >= 0) { + struct timespec now; + ktime_get_ts(&now); + if (timespec_compare(&data->break_expire_time_ts, &now) > 0) { + max_state = min(max_state, + data->deepest_break_state + 1); + } else { + data->deepest_break_state = -1; + } + } + + /* find the deepest idle state that satisfies our constraints */ + for (i = 1; i < max_state; i++) { + struct cpuidle_state *s = &dev->states[i]; + + if (s->target_residency > expected_us) + break; + + if (s->exit_latency > system_latency_constraint()) + break; + } + + if (data->last_state_idx != i - 1) + data->break_last_cnt = 0; + + data->last_state_idx = i - 1; + return i - 1; +} + +/** + * menu_reflect - attempts to guess what happened after entry + * @dev: the CPU + * + * NOTE: it's important to be fast here because this operation will add to + * the overall exit latency. + */ +static void menu_reflect(struct cpuidle_device *dev) +{ + struct menu_device *data = &__get_cpu_var(menu_devices); + int last_idx = data->last_state_idx; + int measured_us = cpuidle_get_last_residency(dev); + struct cpuidle_state *target = &dev->states[last_idx]; + + /* + * Ugh, this idle state doesn't support residency measurements, so we + * are basically lost in the dark. As a compromise, assume we slept + * for one full standard timer tick. However, be aware that this + * could potentially result in a suboptimal state transition. + */ + if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) + measured_us = USEC_PER_SEC / HZ; + + data->bm_elapsed_us += measured_us; + + if (data->last_state_idx == 0) + return; + + /* + * Did something other than the timer interrupt + * cause an early break event? + */ + if (unlikely(measured_us < target->target_residency)) { + if (data->break_last_cnt > DEMOTION_THRESHOLD) { + data->deepest_break_state = data->last_state_idx - 1; + ktime_get_ts(&data->break_expire_time_ts); + timespec_add_ns(&data->break_expire_time_ts, + target->target_residency * + DEMOTION_TIMEOUT_MULTIPLIER); + } else { + data->break_last_cnt++; + } + } else { + if (data->break_last_cnt > 0) + data->break_last_cnt--; + } +} + +/** + * menu_scan_device - scans a CPU's states and does setup + * @dev: the CPU + */ +static void menu_scan_device(struct cpuidle_device *dev) +{ + struct menu_device *data = &per_cpu(menu_devices, dev->cpu); + int i; + + data->last_state_idx = 0; + data->bm_elapsed_us = 0; + data->bm_holdoff_us = BM_HOLDOFF; + data->deepest_break_state = -1; + + for (i = 1; i < dev->state_count; i++) + if (dev->states[i].flags & CPUIDLE_FLAG_CHECK_BM) + break; + data->deepest_bm_state = i - 1; +} + +static struct cpuidle_governor menu_governor = { + .name = "menu", + .rating = 20, + .scan = menu_scan_device, + .select = menu_select, + .reflect = menu_reflect, + .owner = THIS_MODULE, +}; + +/** + * init_menu - initializes the governor + */ +static int __init init_menu(void) +{ + return cpuidle_register_governor(&menu_governor); +} + +/** + * exit_menu - exits the governor + */ +static void __exit exit_menu(void) +{ + cpuidle_unregister_governor(&menu_governor); +} + +MODULE_LICENSE("GPL"); +module_init(init_menu); +module_exit(exit_menu); Index: linux/drivers/cpuidle/sysfs.c =================================================================== --- /dev/null +++ linux/drivers/cpuidle/sysfs.c @@ -0,0 +1,393 @@ +/* + * sysfs.c - sysfs support + * + * (C) 2006-2007 Shaohua Li + * + * This code is licenced under the GPL. + */ + +#include +#include +#include +#include + +#include "cpuidle.h" + +static unsigned int sysfs_switch; +static int __init cpuidle_sysfs_setup(char *unused) +{ + sysfs_switch = 1; + return 1; +} +__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); + +static ssize_t show_available_drivers(struct sys_device *dev, char *buf) +{ + ssize_t i = 0; + struct cpuidle_driver *tmp; + + mutex_lock(&cpuidle_lock); + list_for_each_entry(tmp, &cpuidle_drivers, driver_list) { + if (i >= (ssize_t)((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2)) + goto out; + i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name); + } +out: + i+= sprintf(&buf[i], "\n"); + mutex_unlock(&cpuidle_lock); + return i; +} + +static ssize_t show_available_governors(struct sys_device *dev, char *buf) +{ + ssize_t i = 0; + struct cpuidle_governor *tmp; + + mutex_lock(&cpuidle_lock); + list_for_each_entry(tmp, &cpuidle_governors, governor_list) { + if (i >= (ssize_t)((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2)) + goto out; + i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name); + } + if (list_empty(&cpuidle_governors)) + i+= sprintf(&buf[i], "no governors"); +out: + i+= sprintf(&buf[i], "\n"); + mutex_unlock(&cpuidle_lock); + return i; +} + +static ssize_t show_current_driver(struct sys_device *dev, char *buf) +{ + ssize_t ret; + + mutex_lock(&cpuidle_lock); + ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name); + mutex_unlock(&cpuidle_lock); + return ret; +} + +static ssize_t store_current_driver(struct sys_device *dev, + const char *buf, size_t count) +{ + char str[CPUIDLE_NAME_LEN]; + int len = count; + struct cpuidle_driver *tmp, *found = NULL; + + if (len > CPUIDLE_NAME_LEN) + len = CPUIDLE_NAME_LEN; + + if (sscanf(buf, "%s", str) != 1) + return -EINVAL; + + mutex_lock(&cpuidle_lock); + list_for_each_entry(tmp, &cpuidle_drivers, driver_list) { + if (strncmp(tmp->name, str, CPUIDLE_NAME_LEN) == 0) { + found = tmp; + break; + } + } + if (found) + cpuidle_switch_driver(found); + mutex_unlock(&cpuidle_lock); + + return count; +} + +static ssize_t show_current_governor(struct sys_device *dev, char *buf) +{ + ssize_t i; + + mutex_lock(&cpuidle_lock); + if (cpuidle_curr_governor) + i = sprintf(buf, "%s\n", cpuidle_curr_governor->name); + else + i = sprintf(buf, "no governor\n"); + mutex_unlock(&cpuidle_lock); + + return i; +} + +static ssize_t store_current_governor(struct sys_device *dev, + const char *buf, size_t count) +{ + char str[CPUIDLE_NAME_LEN]; + int len = count; + struct cpuidle_governor *tmp, *found = NULL; + + if (len > CPUIDLE_NAME_LEN) + len = CPUIDLE_NAME_LEN; + + if (sscanf(buf, "%s", str) != 1) + return -EINVAL; + + mutex_lock(&cpuidle_lock); + list_for_each_entry(tmp, &cpuidle_governors, governor_list) { + if (strncmp(tmp->name, str, CPUIDLE_NAME_LEN) == 0) { + found = tmp; + break; + } + } + if (found) + cpuidle_switch_governor(found); + mutex_unlock(&cpuidle_lock); + + return count; +} + +static SYSDEV_ATTR(current_driver_ro, 0444, show_current_driver, NULL); +static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL); + +static struct attribute *cpuclass_default_attrs[] = { + &attr_current_driver_ro.attr, + &attr_current_governor_ro.attr, + NULL +}; + +static SYSDEV_ATTR(available_drivers, 0444, show_available_drivers, NULL); +static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL); +static SYSDEV_ATTR(current_driver, 0644, show_current_driver, + store_current_driver); +static SYSDEV_ATTR(current_governor, 0644, show_current_governor, + store_current_governor); + +static struct attribute *cpuclass_switch_attrs[] = { + &attr_available_drivers.attr, + &attr_available_governors.attr, + &attr_current_driver.attr, + &attr_current_governor.attr, + NULL +}; + +static struct attribute_group cpuclass_attr_group = { + .attrs = cpuclass_default_attrs, + .name = "cpuidle", +}; + +/** + * cpuidle_add_class_sysfs - add CPU global sysfs attributes + */ +int cpuidle_add_class_sysfs(struct sysdev_class *cls) +{ + if (sysfs_switch) + cpuclass_attr_group.attrs = cpuclass_switch_attrs; + + return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group); +} + +/** + * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes + */ +void cpuidle_remove_class_sysfs(struct sysdev_class *cls) +{ + sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group); +} + +struct cpuidle_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_device *, char *); + ssize_t (*store)(struct cpuidle_device *, const char *, size_t count); +}; + +#define define_one_ro(_name, show) \ + static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL) +#define define_one_rw(_name, show, store) \ + static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store) + +#define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj) +#define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr) +static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf) +{ + int ret = -EIO; + struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); + struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); + + if (cattr->show) { + mutex_lock(&cpuidle_lock); + ret = cattr->show(dev, buf); + mutex_unlock(&cpuidle_lock); + } + return ret; +} + +static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr, + const char * buf, size_t count) +{ + int ret = -EIO; + struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); + struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); + + if (cattr->store) { + mutex_lock(&cpuidle_lock); + ret = cattr->store(dev, buf, count); + mutex_unlock(&cpuidle_lock); + } + return ret; +} + +static struct sysfs_ops cpuidle_sysfs_ops = { + .show = cpuidle_show, + .store = cpuidle_store, +}; + +static void cpuidle_sysfs_release(struct kobject *kobj) +{ + struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); + + complete(&dev->kobj_unregister); +} + +static struct kobj_type ktype_cpuidle = { + .sysfs_ops = &cpuidle_sysfs_ops, + .release = cpuidle_sysfs_release, +}; + +struct cpuidle_state_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_state *, char *); + ssize_t (*store)(struct cpuidle_state *, const char *, size_t); +}; + +#define define_one_state_ro(_name, show) \ +static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) + +#define define_show_state_function(_name) \ +static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ +{ \ + return sprintf(buf, "%d\n", state->_name);\ +} + +define_show_state_function(exit_latency) +define_show_state_function(power_usage) +define_show_state_function(usage) +define_show_state_function(time) +define_one_state_ro(latency, show_state_exit_latency); +define_one_state_ro(power, show_state_power_usage); +define_one_state_ro(usage, show_state_usage); +define_one_state_ro(time, show_state_time); + +static struct attribute *cpuidle_state_default_attrs[] = { + &attr_latency.attr, + &attr_power.attr, + &attr_usage.attr, + &attr_time.attr, + NULL +}; + +#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) +#define kobj_to_state(k) (kobj_to_state_obj(k)->state) +#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) +static ssize_t cpuidle_state_show(struct kobject * kobj, + struct attribute * attr ,char * buf) +{ + int ret = -EIO; + struct cpuidle_state *state = kobj_to_state(kobj); + struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); + + if (cattr->show) + ret = cattr->show(state, buf); + + return ret; +} + +static struct sysfs_ops cpuidle_state_sysfs_ops = { + .show = cpuidle_state_show, +}; + +static void cpuidle_state_sysfs_release(struct kobject *kobj) +{ + struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj); + + complete(&state_obj->kobj_unregister); +} + +static struct kobj_type ktype_state_cpuidle = { + .sysfs_ops = &cpuidle_state_sysfs_ops, + .default_attrs = cpuidle_state_default_attrs, + .release = cpuidle_state_sysfs_release, +}; + +static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) +{ + kobject_unregister(&device->kobjs[i]->kobj); + wait_for_completion(&device->kobjs[i]->kobj_unregister); + kfree(device->kobjs[i]); + device->kobjs[i] = NULL; +} + +/** + * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes + * @device: the target device + */ +int cpuidle_add_driver_sysfs(struct cpuidle_device *device) +{ + int i, ret = -ENOMEM; + struct cpuidle_state_kobj *kobj; + + /* state statistics */ + for (i = 0; i < device->state_count; i++) { + kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); + if (!kobj) + goto error_state; + kobj->state = &device->states[i]; + init_completion(&kobj->kobj_unregister); + + kobj->kobj.parent = &device->kobj; + kobj->kobj.ktype = &ktype_state_cpuidle; + kobject_set_name(&kobj->kobj, "state%d", i); + ret = kobject_register(&kobj->kobj); + if (ret) { + kfree(kobj); + goto error_state; + } + device->kobjs[i] = kobj; + } + + return 0; + +error_state: + for (i = i - 1; i >= 0; i--) + cpuidle_free_state_kobj(device, i); + return ret; +} + +/** + * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes + * @device: the target device + */ +void cpuidle_remove_driver_sysfs(struct cpuidle_device *device) +{ + int i; + + for (i = 0; i < device->state_count; i++) + cpuidle_free_state_kobj(device, i); +} + +/** + * cpuidle_add_sysfs - creates a sysfs instance for the target device + * @sysdev: the target device + */ +int cpuidle_add_sysfs(struct sys_device *sysdev) +{ + int cpu = sysdev->id; + struct cpuidle_device *dev; + + dev = per_cpu(cpuidle_devices, cpu); + dev->kobj.parent = &sysdev->kobj; + dev->kobj.ktype = &ktype_cpuidle; + kobject_set_name(&dev->kobj, "%s", "cpuidle"); + return kobject_register(&dev->kobj); +} + +/** + * cpuidle_remove_sysfs - deletes a sysfs instance on the target device + * @sysdev: the target device + */ +void cpuidle_remove_sysfs(struct sys_device *sysdev) +{ + int cpu = sysdev->id; + struct cpuidle_device *dev; + + dev = per_cpu(cpuidle_devices, cpu); + kobject_unregister(&dev->kobj); +} Index: linux/include/acpi/acpi_bus.h =================================================================== --- linux.orig/include/acpi/acpi_bus.h +++ linux/include/acpi/acpi_bus.h @@ -377,6 +377,8 @@ static inline int acpi_pm_device_sleep_s } #endif /* !CONFIG_PM_SLEEP */ +int acpi_pm_device_sleep_state(struct device *, int, int *); + #endif /* CONFIG_ACPI */ #endif /*__ACPI_BUS_H__*/ Index: linux/include/acpi/processor.h =================================================================== --- linux.orig/include/acpi/processor.h +++ linux/include/acpi/processor.h @@ -199,6 +199,7 @@ struct acpi_processor_flags { u8 bm_check:1; u8 has_cst:1; u8 power_setup_done:1; + u8 bm_rld_set:1; }; struct acpi_processor { @@ -320,6 +321,8 @@ int acpi_processor_power_init(struct acp int acpi_processor_cst_has_changed(struct acpi_processor *pr); int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device); +extern struct cpuidle_driver acpi_idle_driver; +void acpi_max_cstate_changed(void); /* in processor_thermal.c */ int acpi_processor_get_limit_info(struct acpi_processor *pr); Index: linux/include/linux/acpi.h =================================================================== --- linux.orig/include/linux/acpi.h +++ linux/include/linux/acpi.h @@ -205,11 +205,8 @@ static inline unsigned int acpi_get_csta { return max_cstate; } -static inline void acpi_set_cstate_limit(unsigned int new_limit) -{ - max_cstate = new_limit; - return; -} +extern void (*acpi_do_set_cstate_limit)(void); +extern void acpi_set_cstate_limit(unsigned int new_limit); #else static inline unsigned int acpi_get_cstate_limit(void) { return 0; } static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; } Index: linux/include/linux/cpuidle.h =================================================================== --- /dev/null +++ linux/include/linux/cpuidle.h @@ -0,0 +1,190 @@ +/* + * cpuidle.h - a generic framework for CPU idle power management + * + * (C) 2007 Venkatesh Pallipadi + * Shaohua Li + * Adam Belay + * + * This code is licenced under the GPL. + */ + +#ifndef _LINUX_CPUIDLE_H +#define _LINUX_CPUIDLE_H + +#include +#include +#include +#include +#include + +#define CPUIDLE_STATE_MAX 8 +#define CPUIDLE_NAME_LEN 16 + +struct cpuidle_device; + + +/**************************** + * CPUIDLE DEVICE INTERFACE * + ****************************/ + +struct cpuidle_state { + char name[CPUIDLE_NAME_LEN]; + void *driver_data; + + unsigned int flags; + unsigned int exit_latency; /* in US */ + unsigned int power_usage; /* in mW */ + unsigned int target_residency; /* in US */ + + unsigned int usage; + unsigned int time; /* in US */ + + int (*enter) (struct cpuidle_device *dev, + struct cpuidle_state *state); +}; + +/* Idle State Flags */ +#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ +#define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */ +#define CPUIDLE_FLAG_SHALLOW (0x10) /* low latency, minimal savings */ +#define CPUIDLE_FLAG_BALANCED (0x20) /* medium latency, moderate savings */ +#define CPUIDLE_FLAG_DEEP (0x40) /* high latency, large savings */ + +#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) + +/** + * cpuidle_get_statedata - retrieves private driver state data + * @state: the state + */ +static inline void * cpuidle_get_statedata(struct cpuidle_state *state) +{ + return state->driver_data; +} + +/** + * cpuidle_set_statedata - stores private driver state data + * @state: the state + * @data: the private data + */ +static inline void +cpuidle_set_statedata(struct cpuidle_state *state, void *data) +{ + state->driver_data = data; +} + +struct cpuidle_state_kobj { + struct cpuidle_state *state; + struct completion kobj_unregister; + struct kobject kobj; +}; + +struct cpuidle_device { + unsigned int status; + int cpu; + + int last_residency; + int state_count; + struct cpuidle_state states[CPUIDLE_STATE_MAX]; + struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; + struct cpuidle_state *last_state; + + struct list_head device_list; + struct kobject kobj; + struct completion kobj_unregister; + void *governor_data; +}; + +DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); + +/* Device Status Flags */ +#define CPUIDLE_STATUS_DETECTED (0x1) +#define CPUIDLE_STATUS_DRIVER_ATTACHED (0x2) +#define CPUIDLE_STATUS_GOVERNOR_ATTACHED (0x4) +#define CPUIDLE_STATUS_DOIDLE (CPUIDLE_STATUS_DETECTED | \ + CPUIDLE_STATUS_DRIVER_ATTACHED | \ + CPUIDLE_STATUS_GOVERNOR_ATTACHED) + +/** + * cpuidle_get_last_residency - retrieves the last state's residency time + * @dev: the target CPU + * + * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set + */ +static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) +{ + return dev->last_residency; +} + + +/**************************** + * CPUIDLE DRIVER INTERFACE * + ****************************/ + +struct cpuidle_driver { + char name[CPUIDLE_NAME_LEN]; + struct list_head driver_list; + + int (*init) (struct cpuidle_device *dev); + void (*exit) (struct cpuidle_device *dev); + int (*redetect) (struct cpuidle_device *dev); + + int (*bm_check) (void); + + struct module *owner; +}; + +#ifdef CONFIG_CPU_IDLE + +extern int cpuidle_register_driver(struct cpuidle_driver *drv); +extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); +extern int cpuidle_force_redetect(struct cpuidle_device *dev, struct cpuidle_driver *drv); +extern int cpuidle_force_redetect_devices(struct cpuidle_driver *drv); + +#else + +static inline int cpuidle_register_driver(struct cpuidle_driver *drv) +{return 0;} +static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } +static inline int cpuidle_force_redetect(struct cpuidle_device *dev, struct cpuidle_driver *drv) +{return 0;} +static inline int cpuidle_force_redetect_devices(struct cpuidle_driver *drv) +{return 0;} + +#endif + +/****************************** + * CPUIDLE GOVERNOR INTERFACE * + ******************************/ + +struct cpuidle_governor { + char name[CPUIDLE_NAME_LEN]; + struct list_head governor_list; + unsigned int rating; + + int (*init) (struct cpuidle_device *dev); + void (*exit) (struct cpuidle_device *dev); + void (*scan) (struct cpuidle_device *dev); + + int (*select) (struct cpuidle_device *dev); + void (*reflect) (struct cpuidle_device *dev); + + struct module *owner; +}; + +#ifdef CONFIG_CPU_IDLE + +extern int cpuidle_register_governor(struct cpuidle_governor *gov); +extern void cpuidle_unregister_governor(struct cpuidle_governor *gov); +extern int cpuidle_get_bm_activity(void); + +#else + +static inline int cpuidle_register_governor(struct cpuidle_governor *gov) +{return 0;} +static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { } +static inline int cpuidle_get_bm_activity(void) +{return 0;} + +#endif + +#endif /* _LINUX_CPUIDLE_H */ Index: linux/include/linux/tick.h =================================================================== --- linux.orig/include/linux/tick.h +++ linux/include/linux/tick.h @@ -7,6 +7,7 @@ #define _LINUX_TICK_H #include +#include #ifdef CONFIG_GENERIC_CLOCKEVENTS @@ -40,6 +41,7 @@ enum tick_nohz_mode { * @idle_sleeps: Number of idle calls, where the sched tick was stopped * @idle_entrytime: Time when the idle call was entered * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped + * @sleep_length: Duration of the current idle sleep */ struct tick_sched { struct hrtimer sched_timer; @@ -52,6 +54,7 @@ struct tick_sched { unsigned long idle_sleeps; ktime_t idle_entrytime; ktime_t idle_sleeptime; + ktime_t sleep_length; unsigned long last_jiffies; unsigned long next_jiffies; ktime_t idle_expires; @@ -100,10 +103,18 @@ static inline int tick_check_oneshot_cha extern void tick_nohz_stop_sched_tick(void); extern void tick_nohz_restart_sched_tick(void); extern void tick_nohz_update_jiffies(void); +extern ktime_t tick_nohz_get_sleep_length(void); +extern unsigned long tick_nohz_get_idle_jiffies(void); # else static inline void tick_nohz_stop_sched_tick(void) { } static inline void tick_nohz_restart_sched_tick(void) { } static inline void tick_nohz_update_jiffies(void) { } +static inline ktime_t tick_nohz_get_sleep_length(void) +{ + ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; + + return len; +} # endif /* !NO_HZ */ #endif Index: linux/kernel/softirq.c =================================================================== --- linux.orig/kernel/softirq.c +++ linux/kernel/softirq.c @@ -304,11 +304,6 @@ void irq_exit(void) if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); -#ifdef CONFIG_NO_HZ - /* Make sure that timer wheel updates are propagated */ - if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) - tick_nohz_stop_sched_tick(); -#endif preempt_enable_no_resched(); } Index: linux/kernel/time/tick-sched.c =================================================================== --- linux.orig/kernel/time/tick-sched.c +++ linux/kernel/time/tick-sched.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -153,6 +154,7 @@ void tick_nohz_stop_sched_tick(void) unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; struct tick_sched *ts; ktime_t last_update, expires, now, delta; + struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; int cpu; local_irq_save(flags); @@ -290,11 +292,36 @@ void tick_nohz_stop_sched_tick(void) out: ts->next_jiffies = next_jiffies; ts->last_jiffies = last_jiffies; + ts->sleep_length = ktime_sub(dev->next_event, now); end: local_irq_restore(flags); } /** + * tick_nohz_get_sleep_length - return the length of the current sleep + * + * Called from power state control code with interrupts disabled + */ +ktime_t tick_nohz_get_sleep_length(void) +{ + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); + + return ts->sleep_length; +} +EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length); + +/** + * tick_nohz_get_idle_jiffies - returns the current idle jiffie count + */ +unsigned long tick_nohz_get_idle_jiffies(void) +{ + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); + + return ts->idle_jiffies; +} +EXPORT_SYMBOL_GPL(tick_nohz_get_idle_jiffies); + +/** * nohz_restart_sched_tick - restart the idle tick from the idle task * * Restart the idle tick when the CPU is woken up from idle patches/preempt-realtime-mmdrop-delayed.patch0000664000077200007720000001566310655544575020752 0ustar mingomingo--- include/linux/sched.h | 11 +++ kernel/fork.c | 139 ++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched.c | 6 +- 3 files changed, 155 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/include/linux/sched.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/sched.h +++ linux-rt-rebase.q/include/linux/sched.h @@ -571,6 +571,9 @@ struct mm_struct { /* Architecture-specific MM context */ mm_context_t context; + /* realtime bits */ + struct list_head delayed_drop; + /* Swap token stuff */ /* * Last value of global fault stamp as seen by this process. @@ -1782,12 +1785,20 @@ extern struct mm_struct * mm_alloc(void) /* mmdrop drops the mm and the page tables */ extern void FASTCALL(__mmdrop(struct mm_struct *)); +extern void FASTCALL(__mmdrop_delayed(struct mm_struct *)); + static inline void mmdrop(struct mm_struct * mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } +static inline void mmdrop_delayed(struct mm_struct * mm) +{ + if (atomic_dec_and_test(&mm->mm_count)) + __mmdrop_delayed(mm); +} + /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ Index: linux-rt-rebase.q/kernel/fork.c =================================================================== --- linux-rt-rebase.q.orig/kernel/fork.c +++ linux-rt-rebase.q/kernel/fork.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,8 @@ #include #include #include +#include +#include #include #include #include @@ -70,6 +73,15 @@ DEFINE_PER_CPU(unsigned long, process_co __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ +/* + * Delayed mmdrop. In the PREEMPT_RT case we + * dont want to do this from the scheduling + * context. + */ +static DEFINE_PER_CPU(struct task_struct *, desched_task); + +static DEFINE_PER_CPU(struct list_head, delayed_drop_list); + int nr_processes(void) { int cpu; @@ -130,6 +142,8 @@ void __put_task_struct(struct task_struc void __init fork_init(unsigned long mempages) { + int i; + #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES @@ -157,6 +171,9 @@ void __init fork_init(unsigned long memp init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; + + for (i = 0; i < NR_CPUS; i++) + INIT_LIST_HEAD(&per_cpu(delayed_drop_list, i)); } static struct task_struct *dup_task_struct(struct task_struct *orig) @@ -343,6 +360,7 @@ static struct mm_struct * mm_init(struct spin_lock_init(&mm->page_table_lock); rwlock_init(&mm->ioctx_list_lock); mm->ioctx_list = NULL; + INIT_LIST_HEAD(&mm->delayed_drop); mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; @@ -1269,7 +1287,9 @@ static struct task_struct *copy_process( attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail_rcu(&p->tasks, &init_task.tasks); + preempt_disable(); __get_cpu_var(process_counts)++; + preempt_enable(); } attach_pid(p, PIDTYPE_PID, pid); nr_threads++; @@ -1691,3 +1711,122 @@ bad_unshare_cleanup_thread: bad_unshare_out: return err; } + +static int mmdrop_complete(void) +{ + struct list_head *head; + int ret = 0; + + head = &get_cpu_var(delayed_drop_list); + while (!list_empty(head)) { + struct mm_struct *mm = list_entry(head->next, + struct mm_struct, delayed_drop); + list_del(&mm->delayed_drop); + put_cpu_var(delayed_drop_list); + + __mmdrop(mm); + ret = 1; + + head = &get_cpu_var(delayed_drop_list); + } + put_cpu_var(delayed_drop_list); + + return ret; +} + +/* + * We dont want to do complex work from the scheduler, thus + * we delay the work to a per-CPU worker thread: + */ +void fastcall __mmdrop_delayed(struct mm_struct *mm) +{ + struct task_struct *desched_task; + struct list_head *head; + + head = &get_cpu_var(delayed_drop_list); + list_add_tail(&mm->delayed_drop, head); + desched_task = __get_cpu_var(desched_task); + if (desched_task) + wake_up_process(desched_task); + put_cpu_var(delayed_drop_list); +} + +static int desched_thread(void * __bind_cpu) +{ + set_user_nice(current, -10); + current->flags |= PF_NOFREEZE | PF_SOFTIRQ; + + set_current_state(TASK_INTERRUPTIBLE); + + while (!kthread_should_stop()) { + + if (mmdrop_complete()) + continue; + schedule(); + + /* This must be called from time to time on ia64, and is a no-op on other archs. + * Used to be in cpu_idle(), but with the new -rt semantics it can't stay there. + */ + check_pgt_cache(); + + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +static int __devinit cpu_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + int hotcpu = (unsigned long)hcpu; + struct task_struct *p; + + switch (action) { + case CPU_UP_PREPARE: + + BUG_ON(per_cpu(desched_task, hotcpu)); + INIT_LIST_HEAD(&per_cpu(delayed_drop_list, hotcpu)); + p = kthread_create(desched_thread, hcpu, "desched/%d", hotcpu); + if (IS_ERR(p)) { + printk("desched_thread for %i failed\n", hotcpu); + return NOTIFY_BAD; + } + per_cpu(desched_task, hotcpu) = p; + kthread_bind(p, hotcpu); + break; + case CPU_ONLINE: + + wake_up_process(per_cpu(desched_task, hotcpu)); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + + /* Unbind so it can run. Fall thru. */ + kthread_bind(per_cpu(desched_task, hotcpu), smp_processor_id()); + case CPU_DEAD: + + p = per_cpu(desched_task, hotcpu); + per_cpu(desched_task, hotcpu) = NULL; + kthread_stop(p); + takeover_tasklets(hotcpu); + break; +#endif /* CONFIG_HOTPLUG_CPU */ + } + return NOTIFY_OK; +} + +static struct notifier_block __devinitdata cpu_nfb = { + .notifier_call = cpu_callback +}; + +__init int spawn_desched_task(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + + cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); + cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); + register_cpu_notifier(&cpu_nfb); + return 0; +} + Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -2178,8 +2178,12 @@ static inline void finish_task_switch(st finish_lock_switch(rq, prev); fire_sched_in_preempt_notifiers(current); trace_stop_sched_switched(current); + /* + * Delay the final freeing of the mm or task, so that we dont have + * to do complex work from within the scheduler: + */ if (mm) - mmdrop(mm); + mmdrop_delayed(mm); if (unlikely(prev_state == TASK_DEAD)) { /* patches/preempt-realtime-ia64.patch0000664000077200007720000014052510655544574016605 0ustar mingomingo Hi, This is a first version of my port of Ingo's -rt kernel to the IA64 arch. So far the kernel boots with PREEMPT_RT enabled (on a 4-cpu tiger), and that's about it. I've not done extensive tests (only scripts/rt-tester), nor any measurements of any kind. There's very probably many bugs I'm not aware of. But there is already one thing I know should be fixed : I've changed the declaration of (struct zone).lock (in include/linux/mmzone.h) from spinlock_t to raw_spinlock_t. I did this because on IA64, cpu_idle(), which is not allowed to call schedule(), calls check_pgt_cache(). I guess this could be fixed by moving this call to another kernel thread... ideas are welcome. Simon. Signed-off-by: Simon.Derr@bull.net arch/ia64/Kconfig | 82 ++++++++++++++++++++++++++++---- arch/ia64/kernel/asm-offsets.c | 2 arch/ia64/kernel/entry.S | 25 +++++----- arch/ia64/kernel/fsys.S | 21 ++++++++ arch/ia64/kernel/iosapic.c | 34 ++++++++++++- arch/ia64/kernel/mca.c | 2 arch/ia64/kernel/perfmon.c | 6 +- arch/ia64/kernel/process.c | 14 +++-- arch/ia64/kernel/sal.c | 2 arch/ia64/kernel/salinfo.c | 6 +- arch/ia64/kernel/semaphore.c | 8 +-- arch/ia64/kernel/signal.c | 8 +++ arch/ia64/kernel/smp.c | 16 ++++++ arch/ia64/kernel/smpboot.c | 3 + arch/ia64/kernel/time.c | 74 +++++++++++++++++++---------- arch/ia64/kernel/traps.c | 10 ++-- arch/ia64/kernel/unwind.c | 4 - arch/ia64/kernel/unwind_i.h | 2 arch/ia64/mm/init.c | 2 arch/ia64/mm/tlb.c | 2 include/asm-ia64/irqflags.h | 95 ++++++++++++++++++++++++++++++++++++++ include/asm-ia64/mmu_context.h | 2 include/asm-ia64/percpu.h | 21 +++++++- include/asm-ia64/processor.h | 6 +- include/asm-ia64/rtc.h | 7 ++ include/asm-ia64/rwsem.h | 32 ++++++------ include/asm-ia64/sal.h | 2 include/asm-ia64/semaphore.h | 53 +++++++++++++-------- include/asm-ia64/spinlock.h | 26 ++++------ include/asm-ia64/spinlock_types.h | 4 - include/asm-ia64/system.h | 67 -------------------------- include/asm-ia64/thread_info.h | 1 include/asm-ia64/tlb.h | 10 ++-- 33 files changed, 446 insertions(+), 203 deletions(-) Index: linux-rt-rebase.q/arch/ia64/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/Kconfig +++ linux-rt-rebase.q/arch/ia64/Kconfig @@ -44,6 +44,7 @@ config SWIOTLB config RWSEM_XCHGADD_ALGORITHM bool + depends on !PREEMPT_RT default y config ARCH_HAS_ILOG2_U32 @@ -275,6 +276,69 @@ config SMP If you don't know what to do here, say N. + +config GENERIC_TIME + bool + default y + +config HIGH_RES_TIMERS + bool "High-Resolution Timers" + help + + POSIX timers are available by default. This option enables + high-resolution POSIX timers. With this option the resolution + is at least 1 microsecond. High resolution is not free. If + enabled this option will add a small overhead each time a + timer expires that is not on a 1/HZ tick boundary. If no such + timers are used the overhead is nil. + + This option enables two additional POSIX CLOCKS, + CLOCK_REALTIME_HR and CLOCK_MONOTONIC_HR. Note that this + option does not change the resolution of CLOCK_REALTIME or + CLOCK_MONOTONIC which remain at 1/HZ resolution. + +config HIGH_RES_RESOLUTION + int "High-Resolution-Timer resolution (nanoseconds)" + depends on HIGH_RES_TIMERS + default 1000 + help + + This sets the resolution of timers accessed with + CLOCK_REALTIME_HR and CLOCK_MONOTONIC_HR. Too + fine a resolution (small a number) will usually not + be observable due to normal system latencies. For an + 800 MHZ processor about 10,000 is the recommended maximum + (smallest number). If you don't need that sort of resolution, + higher numbers may generate less overhead. + +choice + prompt "Clock source" + depends on HIGH_RES_TIMERS + default HIGH_RES_TIMER_ITC + help + This option allows you to choose the hardware source in charge + of generating high precision interruptions on your system. + On IA-64 these are: + + + ITC Interval Time Counter 1/CPU clock + HPET High Precision Event Timer ~ (XXX:have to check the spec) + + The ITC timer is available on all the ia64 computers because + it is integrated directly into the processor. However it may not + give correct results on MP machines with processors running + at different clock rates. In this case you may want to use + the HPET if available on your machine. + + +config HIGH_RES_TIMER_ITC + bool "Interval Time Counter/ITC" + +config HIGH_RES_TIMER_HPET + bool "High Precision Event Timer/HPET" + +endchoice + config NR_CPUS int "Maximum number of CPUs (2-1024)" range 2 1024 @@ -327,17 +391,15 @@ config FORCE_CPEI_RETARGET This option it useful to enable this feature on older BIOS's as well. You can also enable this by using boot command line option force_cpei=1. -config PREEMPT - bool "Preemptible Kernel" - help - This option reduces the latency of the kernel when reacting to - real-time or interactive events by allowing a low priority process to - be preempted even if it is in kernel mode executing a system call. - This allows applications to run more reliably even when the system is - under load. +source "kernel/Kconfig.preempt" - Say Y here if you are building a kernel for a desktop, embedded - or real-time system. Say N if you are unsure. +config RWSEM_GENERIC_SPINLOCK + bool + depends on PREEMPT_RT + default y + +config PREEMPT + def_bool y if (PREEMPT_RT || PREEMPT_SOFTIRQS || PREEMPT_HARDIRQS || PREEMPT_VOLUNTARY || PREEMPT_DESKTOP) source "mm/Kconfig" Index: linux-rt-rebase.q/arch/ia64/kernel/asm-offsets.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/asm-offsets.c +++ linux-rt-rebase.q/arch/ia64/kernel/asm-offsets.c @@ -257,6 +257,7 @@ void foo(void) offsetof (struct pal_min_state_area_s, pmsa_xip)); BLANK(); +#ifdef CONFIG_TIME_INTERPOLATION /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ DEFINE(IA64_GTOD_LOCK_OFFSET, offsetof (struct fsyscall_gtod_data_t, lock)); @@ -278,4 +279,5 @@ void foo(void) offsetof (struct itc_jitter_data_t, itc_jitter)); DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); +#endif } Index: linux-rt-rebase.q/arch/ia64/kernel/entry.S =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/entry.S +++ linux-rt-rebase.q/arch/ia64/kernel/entry.S @@ -1098,23 +1098,24 @@ skip_rbs_switch: st8 [r2]=r8 st8 [r3]=r10 .work_pending: - tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0? + tbit.nz p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0? +(p6) br.cond.sptk.few .needresched + tbit.z p6,p0=r31,TIF_NEED_RESCHED_DELAYED // current_thread_info()->need_resched_delayed==0? (p6) br.cond.sptk.few .notify -#ifdef CONFIG_PREEMPT -(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 + +.needresched: + +(pKStk) br.cond.sptk.many .fromkernel ;; -(pKStk) st4 [r20]=r21 ssm psr.i // enable interrupts -#endif br.call.spnt.many rp=schedule -.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 - rsm psr.i // disable interrupts - ;; -#ifdef CONFIG_PREEMPT -(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 +.ret9a: rsm psr.i // disable interrupts ;; -(pKStk) st4 [r20]=r0 // preempt_count() <- 0 -#endif + br.cond.sptk.many .endpreemptdep +.fromkernel: + br.call.spnt.many rp=preempt_schedule_irq +.ret9b: rsm psr.i // disable interrupts +.endpreemptdep: (pLvSys)br.cond.sptk.few .work_pending_syscall_end br.cond.sptk.many .work_processed_kernel // re-check Index: linux-rt-rebase.q/arch/ia64/kernel/fsys.S =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/fsys.S +++ linux-rt-rebase.q/arch/ia64/kernel/fsys.S @@ -26,6 +26,7 @@ #include "entry.h" +#ifdef CONFIG_TIME_INTERPOLATION /* * See Documentation/ia64/fsys.txt for details on fsyscalls. * @@ -349,6 +350,26 @@ ENTRY(fsys_clock_gettime) br.many .gettime END(fsys_clock_gettime) + +#else // !CONFIG_TIME_INTERPOLATION + +# define fsys_gettimeofday 0 +# define fsys_clock_gettime 0 + +.fail_einval: + mov r8 = EINVAL + mov r10 = -1 + FSYS_RETURN + +.fail_efault: + mov r8 = EFAULT + mov r10 = -1 + FSYS_RETURN + +#endif + + + /* * long fsys_rt_sigprocmask (int how, sigset_t *set, sigset_t *oset, size_t sigsetsize). */ Index: linux-rt-rebase.q/arch/ia64/kernel/iosapic.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/iosapic.c +++ linux-rt-rebase.q/arch/ia64/kernel/iosapic.c @@ -111,7 +111,7 @@ (PAGE_SIZE / sizeof(struct iosapic_rte_info)) #define RTE_PREALLOCATED (1) -static DEFINE_SPINLOCK(iosapic_lock); +static DEFINE_RAW_SPINLOCK(iosapic_lock); /* * These tables map IA-64 vectors to the IOSAPIC pin that generates this @@ -403,6 +403,34 @@ iosapic_startup_level_irq (unsigned int return 0; } +/* + * In the preemptible case mask the IRQ first then handle it and ack it. + */ +#ifdef CONFIG_PREEMPT_HARDIRQS + +static void +iosapic_ack_level_irq (unsigned int irq) +{ + ia64_vector vec = irq_to_vector(irq); + struct iosapic_rte_info *rte; + + move_irq(irq); + mask_irq(irq); + list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) + iosapic_eoi(rte->addr, vec); +} + +static void +iosapic_end_level_irq (unsigned int irq) +{ + if (!(irq_desc[irq].status & IRQ_INPROGRESS)) + unmask_irq(irq); +} + +#else /* !CONFIG_PREEMPT_HARDIRQS */ + +#define iosapic_ack_level_irq nop + static void iosapic_end_level_irq (unsigned int irq) { @@ -424,10 +452,12 @@ iosapic_end_level_irq (unsigned int irq) } } + +#endif + #define iosapic_shutdown_level_irq mask_irq #define iosapic_enable_level_irq unmask_irq #define iosapic_disable_level_irq mask_irq -#define iosapic_ack_level_irq nop struct irq_chip irq_type_iosapic_level = { .name = "IO-SAPIC-level", Index: linux-rt-rebase.q/arch/ia64/kernel/mca.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/mca.c +++ linux-rt-rebase.q/arch/ia64/kernel/mca.c @@ -322,7 +322,7 @@ ia64_mca_spin(const char *func) typedef struct ia64_state_log_s { - spinlock_t isl_lock; + raw_spinlock_t isl_lock; int isl_index; unsigned long isl_count; ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */ Index: linux-rt-rebase.q/arch/ia64/kernel/perfmon.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/perfmon.c +++ linux-rt-rebase.q/arch/ia64/kernel/perfmon.c @@ -280,7 +280,7 @@ typedef struct { */ typedef struct pfm_context { - spinlock_t ctx_lock; /* context protection */ + raw_spinlock_t ctx_lock; /* context protection */ pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */ unsigned int ctx_state; /* state: active/inactive (no bitfield) */ @@ -369,7 +369,7 @@ typedef struct pfm_context { * mostly used to synchronize between system wide and per-process */ typedef struct { - spinlock_t pfs_lock; /* lock the structure */ + raw_spinlock_t pfs_lock; /* lock the structure */ unsigned int pfs_task_sessions; /* number of per task sessions */ unsigned int pfs_sys_sessions; /* number of per system wide sessions */ @@ -510,7 +510,7 @@ static pfm_intr_handler_desc_t *pfm_alt static struct proc_dir_entry *perfmon_dir; static pfm_uuid_t pfm_null_uuid = {0,}; -static spinlock_t pfm_buffer_fmt_lock; +static raw_spinlock_t pfm_buffer_fmt_lock; static LIST_HEAD(pfm_buffer_fmt_list); static pmu_config_t *pmu_conf; Index: linux-rt-rebase.q/arch/ia64/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/process.c +++ linux-rt-rebase.q/arch/ia64/kernel/process.c @@ -94,6 +94,9 @@ show_stack (struct task_struct *task, un void dump_stack (void) { + if (irqs_disabled()) { + printk("Uh oh.. entering dump_stack() with irqs disabled.\n"); + } show_stack(NULL, NULL); } @@ -197,7 +200,7 @@ void default_idle (void) { local_irq_enable(); - while (!need_resched()) { + while (!need_resched() && !need_resched_delayed()) { if (can_do_pal_halt) safe_halt(); else @@ -281,7 +284,7 @@ cpu_idle (void) current_thread_info()->status |= TS_POLLING; } - if (!need_resched()) { + if (!need_resched() && !need_resched_delayed()) { void (*idle)(void); #ifdef CONFIG_SMP min_xtp(); @@ -303,10 +306,11 @@ cpu_idle (void) normal_xtp(); #endif } - preempt_enable_no_resched(); - schedule(); + __preempt_enable_no_resched(); + __schedule(); + preempt_disable(); - check_pgt_cache(); + if (cpu_is_offline(cpu)) play_dead(); } Index: linux-rt-rebase.q/arch/ia64/kernel/sal.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/sal.c +++ linux-rt-rebase.q/arch/ia64/kernel/sal.c @@ -18,7 +18,7 @@ #include #include - __cacheline_aligned DEFINE_SPINLOCK(sal_lock); + __cacheline_aligned DEFINE_RAW_SPINLOCK(sal_lock); unsigned long sal_platform_features; unsigned short sal_revision; Index: linux-rt-rebase.q/arch/ia64/kernel/salinfo.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/salinfo.c +++ linux-rt-rebase.q/arch/ia64/kernel/salinfo.c @@ -140,7 +140,7 @@ enum salinfo_state { struct salinfo_data { cpumask_t cpu_event; /* which cpus have outstanding events */ - struct semaphore mutex; + struct compat_semaphore mutex; u8 *log_buffer; u64 log_size; u8 *oemdata; /* decoded oem data */ @@ -156,8 +156,8 @@ struct salinfo_data { static struct salinfo_data salinfo_data[ARRAY_SIZE(salinfo_log_name)]; -static DEFINE_SPINLOCK(data_lock); -static DEFINE_SPINLOCK(data_saved_lock); +static DEFINE_RAW_SPINLOCK(data_lock); +static DEFINE_RAW_SPINLOCK(data_saved_lock); /** salinfo_platform_oemdata - optional callback to decode oemdata from an error * record. Index: linux-rt-rebase.q/arch/ia64/kernel/semaphore.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/semaphore.c +++ linux-rt-rebase.q/arch/ia64/kernel/semaphore.c @@ -40,12 +40,12 @@ */ void -__up (struct semaphore *sem) +__up (struct compat_semaphore *sem) { wake_up(&sem->wait); } -void __sched __down (struct semaphore *sem) +void __sched __down (struct compat_semaphore *sem) { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); @@ -82,7 +82,7 @@ void __sched __down (struct semaphore *s tsk->state = TASK_RUNNING; } -int __sched __down_interruptible (struct semaphore * sem) +int __sched __down_interruptible (struct compat_semaphore * sem) { int retval = 0; struct task_struct *tsk = current; @@ -142,7 +142,7 @@ int __sched __down_interruptible (struct * count. */ int -__down_trylock (struct semaphore *sem) +__down_trylock (struct compat_semaphore *sem) { unsigned long flags; int sleepers; Index: linux-rt-rebase.q/arch/ia64/kernel/signal.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/signal.c +++ linux-rt-rebase.q/arch/ia64/kernel/signal.c @@ -446,6 +446,14 @@ ia64_do_signal (struct sigscratch *scr, long errno = scr->pt.r8; # define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c)) +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif + /* * In the ia64_leave_kernel code path, we want the common case to go fast, which * is why we may in certain cases get here from kernel mode. Just return without Index: linux-rt-rebase.q/arch/ia64/kernel/smp.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/smp.c +++ linux-rt-rebase.q/arch/ia64/kernel/smp.c @@ -261,6 +261,22 @@ smp_send_reschedule (int cpu) } /* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them: + */ +void smp_send_reschedule_allbutself(void) +{ + unsigned int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id()) + platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, + IA64_IPI_DM_INT, 0); + } +} + +/* * Called with preemption disabled. */ static void Index: linux-rt-rebase.q/arch/ia64/kernel/smpboot.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/smpboot.c +++ linux-rt-rebase.q/arch/ia64/kernel/smpboot.c @@ -370,6 +370,8 @@ smp_setup_percpu_timer (void) { } +extern void register_itc_clockevent(void); + static void __cpuinit smp_callin (void) { @@ -448,6 +450,7 @@ smp_callin (void) #ifdef CONFIG_IA32_SUPPORT ia32_gdt_init(); #endif + register_itc_clockevent(); /* * Allow the master to continue. Index: linux-rt-rebase.q/arch/ia64/kernel/time.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/time.c +++ linux-rt-rebase.q/arch/ia64/kernel/time.c @@ -70,6 +70,7 @@ timer_interrupt (int irq, void *dev_id) platform_timer_interrupt(irq, dev_id); +#if 0 new_itm = local_cpu_data->itm_next; if (!time_after(ia64_get_itc(), new_itm)) @@ -77,29 +78,48 @@ timer_interrupt (int irq, void *dev_id) ia64_get_itc(), new_itm); profile_tick(CPU_PROFILING); +#endif + + if (time_after(ia64_get_itc(), local_cpu_data->itm_tick_next)) { - while (1) { - update_process_times(user_mode(get_irq_regs())); + unsigned long new_tick_itm; + new_tick_itm = local_cpu_data->itm_tick_next; - new_itm += local_cpu_data->itm_delta; + profile_tick(CPU_PROFILING, get_irq_regs()); - if (smp_processor_id() == time_keeper_id) { - /* - * Here we are in the timer irq handler. We have irqs locally - * disabled, but we don't know if the timer_bh is running on - * another CPU. We need to avoid to SMP race by acquiring the - * xtime_lock. - */ - write_seqlock(&xtime_lock); - do_timer(1); - local_cpu_data->itm_next = new_itm; - write_sequnlock(&xtime_lock); - } else - local_cpu_data->itm_next = new_itm; + while (1) { + update_process_times(user_mode(get_irq_regs())); + + new_tick_itm += local_cpu_data->itm_tick_delta; + + if (smp_processor_id() == time_keeper_id) { + /* + * Here we are in the timer irq handler. We have irqs locally + * disabled, but we don't know if the timer_bh is running on + * another CPU. We need to avoid to SMP race by acquiring the + * xtime_lock. + */ + write_seqlock(&xtime_lock); + do_timer(get_irq_regs()); + local_cpu_data->itm_tick_next = new_tick_itm; + write_sequnlock(&xtime_lock); + } else + local_cpu_data->itm_tick_next = new_tick_itm; + + if (time_after(new_tick_itm, ia64_get_itc())) + break; + } + } - if (time_after(new_itm, ia64_get_itc())) - break; + if (time_after(ia64_get_itc(), local_cpu_data->itm_timer_next)) { + if (itc_clockevent.event_handler) + itc_clockevent.event_handler(get_irq_regs()); + // FIXME, really, please + new_itm = local_cpu_data->itm_tick_next; + + if (time_after(new_itm, local_cpu_data->itm_timer_next)) + new_itm = local_cpu_data->itm_timer_next; /* * Allow IPIs to interrupt the timer loop. */ @@ -117,8 +137,8 @@ timer_interrupt (int irq, void *dev_id) * too fast (with the potentially devastating effect * of losing monotony of time). */ - while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) - new_itm += local_cpu_data->itm_delta; + while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_tick_delta/2)) + new_itm += local_cpu_data->itm_tick_delta; ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); @@ -137,7 +157,7 @@ ia64_cpu_local_tick (void) /* arrange for the cycle counter to generate a timer interrupt: */ ia64_set_itv(IA64_TIMER_VECTOR); - delta = local_cpu_data->itm_delta; + delta = local_cpu_data->itm_tick_delta; /* * Stagger the timer tick for each CPU so they don't occur all at (almost) the * same time: @@ -146,8 +166,8 @@ ia64_cpu_local_tick (void) unsigned long hi = 1UL << ia64_fls(cpu); shift = (2*(cpu - hi) + 1) * delta/hi/2; } - local_cpu_data->itm_next = ia64_get_itc() + delta + shift; - ia64_set_itm(local_cpu_data->itm_next); + local_cpu_data->itm_tick_next = ia64_get_itc() + delta + shift; + ia64_set_itm(local_cpu_data->itm_tick_next); } static int nojitter; @@ -205,7 +225,7 @@ ia64_init_itm (void) itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; - local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; + local_cpu_data->itm_tick_delta = (itc_freq + HZ/2) / HZ; printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " "ITC freq=%lu.%03luMHz", smp_processor_id(), platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, @@ -225,6 +245,7 @@ ia64_init_itm (void) local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<mfh = 1; } - preempt_enable_no_resched(); + __preempt_enable_no_resched(); } static inline int Index: linux-rt-rebase.q/arch/ia64/kernel/unwind.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/unwind.c +++ linux-rt-rebase.q/arch/ia64/kernel/unwind.c @@ -82,7 +82,7 @@ typedef unsigned long unw_word; typedef unsigned char unw_hash_index_t; static struct { - spinlock_t lock; /* spinlock for unwind data */ + raw_spinlock_t lock; /* spinlock for unwind data */ /* list of unwind tables (one per load-module) */ struct unw_table *tables; @@ -146,7 +146,7 @@ static struct { # endif } unw = { .tables = &unw.kernel_table, - .lock = __SPIN_LOCK_UNLOCKED(unw.lock), + .lock = RAW_SPIN_LOCK_UNLOCKED(unw.lock), .save_order = { UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR Index: linux-rt-rebase.q/arch/ia64/kernel/unwind_i.h =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/kernel/unwind_i.h +++ linux-rt-rebase.q/arch/ia64/kernel/unwind_i.h @@ -154,7 +154,7 @@ struct unw_script { unsigned long ip; /* ip this script is for */ unsigned long pr_mask; /* mask of predicates script depends on */ unsigned long pr_val; /* predicate values this script is for */ - rwlock_t lock; + raw_rwlock_t lock; unsigned int flags; /* see UNW_FLAG_* in unwind.h */ unsigned short lru_chain; /* used for least-recently-used chain */ unsigned short coll_chain; /* used for hash collisions */ Index: linux-rt-rebase.q/arch/ia64/mm/init.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/mm/init.c +++ linux-rt-rebase.q/arch/ia64/mm/init.c @@ -37,7 +37,7 @@ #include #include -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +DEFINE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); extern void ia64_tlb_init (void); Index: linux-rt-rebase.q/arch/ia64/mm/tlb.c =================================================================== --- linux-rt-rebase.q.orig/arch/ia64/mm/tlb.c +++ linux-rt-rebase.q/arch/ia64/mm/tlb.c @@ -32,7 +32,7 @@ static struct { } purge; struct ia64_ctx ia64_ctx = { - .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock), + .lock = RAW_SPIN_LOCK_UNLOCKED(ia64_ctx.lock), .next = 1, .max_ctx = ~0U }; Index: linux-rt-rebase.q/include/asm-ia64/irqflags.h =================================================================== --- /dev/null +++ linux-rt-rebase.q/include/asm-ia64/irqflags.h @@ -0,0 +1,95 @@ + +/* + * include/asm-i64/irqflags.h + * + * IRQ flags handling + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() macros from the lowlevel headers. + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +/* For spinlocks etc */ + +/* + * - clearing psr.i is implicitly serialized (visible by next insn) + * - setting psr.i requires data serialization + * - we need a stop-bit before reading PSR because we sometimes + * write a floating-point register right before reading the PSR + * and that writes to PSR.mfl + */ +#define __local_irq_save(x) \ +do { \ + ia64_stop(); \ + (x) = ia64_getreg(_IA64_REG_PSR); \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ +} while (0) + +#define __local_irq_disable() \ +do { \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ +} while (0) + +#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) + +#ifdef CONFIG_IA64_DEBUG_IRQ + + extern unsigned long last_cli_ip; + +# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) + +# define raw_local_irq_save(x) \ +do { \ + unsigned long psr; \ + \ + __local_irq_save(psr); \ + if (psr & IA64_PSR_I) \ + __save_ip(); \ + (x) = psr; \ +} while (0) + +# define raw_local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0) + +# define raw_local_irq_restore(x) \ +do { \ + unsigned long old_psr, psr = (x); \ + \ + local_save_flags(old_psr); \ + __local_irq_restore(psr); \ + if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \ + __save_ip(); \ +} while (0) + +#else /* !CONFIG_IA64_DEBUG_IRQ */ +# define raw_local_irq_save(x) __local_irq_save(x) +# define raw_local_irq_disable() __local_irq_disable() +# define raw_local_irq_restore(x) __local_irq_restore(x) +#endif /* !CONFIG_IA64_DEBUG_IRQ */ + +#define raw_local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) +#define raw_local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) + +#define raw_irqs_disabled() \ +({ \ + unsigned long __ia64_id_flags; \ + local_save_flags(__ia64_id_flags); \ + (__ia64_id_flags & IA64_PSR_I) == 0; \ +}) + +#define raw_irqs_disabled_flags(flags) ((flags & IA64_PSR_I) == 0) + + +#define raw_safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ + +/* TBD... */ +# define TRACE_IRQS_ON +# define TRACE_IRQS_OFF +# define TRACE_IRQS_ON_STR +# define TRACE_IRQS_OFF_STR + +#endif + Index: linux-rt-rebase.q/include/asm-ia64/mmu_context.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/mmu_context.h +++ linux-rt-rebase.q/include/asm-ia64/mmu_context.h @@ -32,7 +32,7 @@ #include struct ia64_ctx { - spinlock_t lock; + raw_spinlock_t lock; unsigned int next; /* next context number to use */ unsigned int limit; /* available free range */ unsigned int max_ctx; /* max. context value supported by all CPUs */ Index: linux-rt-rebase.q/include/asm-ia64/percpu.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/percpu.h +++ linux-rt-rebase.q/include/asm-ia64/percpu.h @@ -24,10 +24,17 @@ #define DECLARE_PER_CPU(type, name) \ extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name +#define DECLARE_PER_CPU_LOCKED(type, name) \ + extern spinlock_t per_cpu_lock__##name##_locked; \ + extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name##_locked + /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) \ - __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name + __attribute__((__section__(".data.percpu"))) __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name + +#define DEFINE_PER_CPU_LOCKED(type, name) \ + __attribute__((__section__(".data.percpu"))) __SMALL_ADDR_AREA __DEFINE_SPINLOCK(per_cpu_lock__##name##_locked); \ + __attribute__((__section__(".data.percpu"))) __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name##_locked #ifdef CONFIG_SMP #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ @@ -55,6 +62,16 @@ DECLARE_PER_CPU(unsigned long, local_per #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) #define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) +#define per_cpu_lock(var, cpu) \ + (*RELOC_HIDE(&per_cpu_lock__##var##_locked, __per_cpu_offset[cpu])) +#define per_cpu_var_locked(var, cpu) \ + (*RELOC_HIDE(&per_cpu__##var##_locked, __per_cpu_offset[cpu])) +#define __get_cpu_lock(var, cpu) \ + per_cpu_lock(var, cpu) +#define __get_cpu_var_locked(var, cpu) \ + per_cpu_var_locked(var, cpu) + + extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); extern void setup_per_cpu_areas (void); extern void *per_cpu_init(void); Index: linux-rt-rebase.q/include/asm-ia64/processor.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/processor.h +++ linux-rt-rebase.q/include/asm-ia64/processor.h @@ -124,8 +124,10 @@ struct ia64_psr { */ struct cpuinfo_ia64 { __u32 softirq_pending; - __u64 itm_delta; /* # of clock cycles between clock ticks */ - __u64 itm_next; /* interval timer mask value to use for next clock tick */ + __u64 itm_tick_delta; /* # of clock cycles between clock ticks */ + __u64 itm_tick_next; /* interval timer mask value to use for next clock tick */ + __u64 itm_timer_next; + __u64 __itm_next; __u64 nsec_per_cyc; /* (1000000000<count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); @@ -70,7 +70,7 @@ init_rwsem (struct rw_semaphore *sem) * lock for reading */ static inline void -__down_read (struct rw_semaphore *sem) +__down_read (struct compat_rw_semaphore *sem) { long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1); @@ -82,7 +82,7 @@ __down_read (struct rw_semaphore *sem) * lock for writing */ static inline void -__down_write (struct rw_semaphore *sem) +__down_write (struct compat_rw_semaphore *sem) { long old, new; @@ -99,7 +99,7 @@ __down_write (struct rw_semaphore *sem) * unlock after reading */ static inline void -__up_read (struct rw_semaphore *sem) +__up_read (struct compat_rw_semaphore *sem) { long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1); @@ -111,7 +111,7 @@ __up_read (struct rw_semaphore *sem) * unlock after writing */ static inline void -__up_write (struct rw_semaphore *sem) +__up_write (struct compat_rw_semaphore *sem) { long old, new; @@ -128,7 +128,7 @@ __up_write (struct rw_semaphore *sem) * trylock for reading -- returns 1 if successful, 0 if contention */ static inline int -__down_read_trylock (struct rw_semaphore *sem) +__down_read_trylock (struct compat_rw_semaphore *sem) { long tmp; while ((tmp = sem->count) >= 0) { @@ -143,7 +143,7 @@ __down_read_trylock (struct rw_semaphore * trylock for writing -- returns 1 if successful, 0 if contention */ static inline int -__down_write_trylock (struct rw_semaphore *sem) +__down_write_trylock (struct compat_rw_semaphore *sem) { long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); @@ -154,7 +154,7 @@ __down_write_trylock (struct rw_semaphor * downgrade write lock to read lock */ static inline void -__downgrade_write (struct rw_semaphore *sem) +__downgrade_write (struct compat_rw_semaphore *sem) { long old, new; @@ -174,7 +174,7 @@ __downgrade_write (struct rw_semaphore * #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline int compat_rwsem_is_locked(struct compat_rw_semaphore *sem) { return (sem->count != 0); } Index: linux-rt-rebase.q/include/asm-ia64/sal.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/sal.h +++ linux-rt-rebase.q/include/asm-ia64/sal.h @@ -43,7 +43,7 @@ #include #include -extern spinlock_t sal_lock; +extern raw_spinlock_t sal_lock; /* SAL spec _requires_ eight args for each call. */ #define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \ Index: linux-rt-rebase.q/include/asm-ia64/semaphore.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/semaphore.h +++ linux-rt-rebase.q/include/asm-ia64/semaphore.h @@ -11,54 +11,65 @@ #include -struct semaphore { +/* + * On !PREEMPT_RT all semaphores are compat: + */ +#ifndef CONFIG_PREEMPT_RT +# define compat_semaphore semaphore +#endif + +struct compat_semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; }; -#define __SEMAPHORE_INITIALIZER(name, n) \ +#define __COMPAT_SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .sleepers = 0, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) +#define __COMPAT_DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct compat_semaphore name = __COMPAT_SEMAPHORE_INITIALIZER(name, count) -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) +#define COMPAT_DECLARE_MUTEX(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, 1) +#define COMPAT_DECLARE_MUTEX_LOCKED(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, 0) + +#define compat_sema_count(sem) atomic_read(&(sem)->count) + +asmlinkage int compat_sem_is_locked(struct compat_semaphore *sem); static inline void -sema_init (struct semaphore *sem, int val) +compat_sema_init (struct compat_semaphore *sem, int val) { - *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); + *sem = (struct compat_semaphore) __COMPAT_SEMAPHORE_INITIALIZER(*sem, val); } static inline void -init_MUTEX (struct semaphore *sem) +compat_init_MUTEX (struct compat_semaphore *sem) { - sema_init(sem, 1); + compat_sema_init(sem, 1); } static inline void -init_MUTEX_LOCKED (struct semaphore *sem) +compat_init_MUTEX_LOCKED (struct compat_semaphore *sem) { - sema_init(sem, 0); + compat_sema_init(sem, 0); } -extern void __down (struct semaphore * sem); -extern int __down_interruptible (struct semaphore * sem); -extern int __down_trylock (struct semaphore * sem); -extern void __up (struct semaphore * sem); +extern void __down (struct compat_semaphore * sem); +extern int __down_interruptible (struct compat_semaphore * sem); +extern int __down_trylock (struct compat_semaphore * sem); +extern void __up (struct compat_semaphore * sem); /* * Atomically decrement the semaphore's count. If it goes negative, * block the calling thread in the TASK_UNINTERRUPTIBLE state. */ static inline void -down (struct semaphore *sem) +compat_down (struct compat_semaphore *sem) { might_sleep(); if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) @@ -70,7 +81,7 @@ down (struct semaphore *sem) * block the calling thread in the TASK_INTERRUPTIBLE state. */ static inline int -down_interruptible (struct semaphore * sem) +compat_down_interruptible (struct compat_semaphore * sem) { int ret = 0; @@ -81,7 +92,7 @@ down_interruptible (struct semaphore * s } static inline int -down_trylock (struct semaphore *sem) +compat_down_trylock (struct compat_semaphore *sem) { int ret = 0; @@ -91,10 +102,12 @@ down_trylock (struct semaphore *sem) } static inline void -up (struct semaphore * sem) +compat_up (struct compat_semaphore * sem) { if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1) __up(sem); } +#include + #endif /* _ASM_IA64_SEMAPHORE_H */ Index: linux-rt-rebase.q/include/asm-ia64/spinlock.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/spinlock.h +++ linux-rt-rebase.q/include/asm-ia64/spinlock.h @@ -17,8 +17,6 @@ #include #include -#define __raw_spin_lock_init(x) ((x)->lock = 0) - #ifdef ASM_SUPPORTED /* * Try to get the lock. If we fail to get the lock, make a non-standard call to @@ -30,7 +28,7 @@ #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" static inline void -__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) +__raw_spin_lock_flags (__raw_spinlock_t *lock, unsigned long flags) { register volatile unsigned int *ptr asm ("r31") = &lock->lock; @@ -89,7 +87,7 @@ __raw_spin_lock_flags (raw_spinlock_t *l #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) /* Unlock by doing an ordered store and releasing the cacheline with nta */ -static inline void __raw_spin_unlock(raw_spinlock_t *x) { +static inline void __raw_spin_unlock(__raw_spinlock_t *x) { barrier(); asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); } @@ -109,7 +107,7 @@ do { \ } while (ia64_spinlock_val); \ } \ } while (0) -#define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) +#define __raw_spin_unlock(x) do { barrier(); ((__raw_spinlock_t *) x)->lock = 0; } while (0) #endif /* !ASM_SUPPORTED */ #define __raw_spin_is_locked(x) ((x)->lock != 0) @@ -122,7 +120,7 @@ do { \ #define __raw_read_lock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + __raw_rwlock_t *__read_lock_ptr = (rw); \ \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -133,7 +131,7 @@ do { \ #define __raw_read_unlock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + __raw_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) @@ -165,7 +163,7 @@ do { \ (result == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void __raw_write_unlock(__raw_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -193,7 +191,7 @@ static inline void __raw_write_unlock(ra (ia64_val == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void __raw_write_unlock(__raw_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -201,10 +199,10 @@ static inline void __raw_write_unlock(ra #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(raw_rwlock_t *x) +static inline int __raw_read_trylock(__raw_rwlock_t *x) { union { - raw_rwlock_t lock; + __raw_rwlock_t lock; __u32 word; } old, new; old.lock = new.lock = *x; @@ -213,8 +211,8 @@ static inline int __raw_read_trylock(raw return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define __raw_spin_relax(lock) cpu_relax() +#define __raw_read_relax(lock) cpu_relax() +#define __raw_write_relax(lock) cpu_relax() #endif /* _ASM_IA64_SPINLOCK_H */ Index: linux-rt-rebase.q/include/asm-ia64/spinlock_types.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/spinlock_types.h +++ linux-rt-rebase.q/include/asm-ia64/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int read_counter : 31; volatile unsigned int write_lock : 1; -} raw_rwlock_t; +} __raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } Index: linux-rt-rebase.q/include/asm-ia64/system.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/system.h +++ linux-rt-rebase.q/include/asm-ia64/system.h @@ -104,81 +104,16 @@ extern struct ia64_boot_param { */ #define set_mb(var, value) do { (var) = (value); mb(); } while (0) -#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ /* * The group barrier in front of the rsm & ssm are necessary to ensure * that none of the previous instructions in the same group are * affected by the rsm/ssm. */ -/* For spinlocks etc */ -/* - * - clearing psr.i is implicitly serialized (visible by next insn) - * - setting psr.i requires data serialization - * - we need a stop-bit before reading PSR because we sometimes - * write a floating-point register right before reading the PSR - * and that writes to PSR.mfl - */ -#define __local_irq_save(x) \ -do { \ - ia64_stop(); \ - (x) = ia64_getreg(_IA64_REG_PSR); \ - ia64_stop(); \ - ia64_rsm(IA64_PSR_I); \ -} while (0) - -#define __local_irq_disable() \ -do { \ - ia64_stop(); \ - ia64_rsm(IA64_PSR_I); \ -} while (0) - -#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) - -#ifdef CONFIG_IA64_DEBUG_IRQ - extern unsigned long last_cli_ip; - -# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) - -# define local_irq_save(x) \ -do { \ - unsigned long psr; \ - \ - __local_irq_save(psr); \ - if (psr & IA64_PSR_I) \ - __save_ip(); \ - (x) = psr; \ -} while (0) - -# define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0) - -# define local_irq_restore(x) \ -do { \ - unsigned long old_psr, psr = (x); \ - \ - local_save_flags(old_psr); \ - __local_irq_restore(psr); \ - if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \ - __save_ip(); \ -} while (0) +#include -#else /* !CONFIG_IA64_DEBUG_IRQ */ -# define local_irq_save(x) __local_irq_save(x) -# define local_irq_disable() __local_irq_disable() -# define local_irq_restore(x) __local_irq_restore(x) -#endif /* !CONFIG_IA64_DEBUG_IRQ */ - -#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) -#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) - -#define irqs_disabled() \ -({ \ - unsigned long __ia64_id_flags; \ - local_save_flags(__ia64_id_flags); \ - (__ia64_id_flags & IA64_PSR_I) == 0; \ -}) #ifdef __KERNEL__ Index: linux-rt-rebase.q/include/asm-ia64/thread_info.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/thread_info.h +++ linux-rt-rebase.q/include/asm-ia64/thread_info.h @@ -91,6 +91,7 @@ struct thread_info { #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ #define TIF_FREEZE 20 /* is freezing for suspend */ +#define TIF_NEED_RESCHED_DELAYED 20 /* reschedule on return to userspace */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) Index: linux-rt-rebase.q/include/asm-ia64/tlb.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-ia64/tlb.h +++ linux-rt-rebase.q/include/asm-ia64/tlb.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -61,11 +62,12 @@ struct mmu_gather { unsigned char need_flush; /* really unmapped some PTEs? */ unsigned long start_addr; unsigned long end_addr; + int cpu; struct page *pages[FREE_PTE_NR]; }; /* Users of the generic TLB shootdown code must declare this storage space. */ -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); +DECLARE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); /* * Flush the TLB for address range START to END and, if not in fast mode, release the @@ -127,8 +129,10 @@ ia64_tlb_flush_mmu (struct mmu_gather *t static inline struct mmu_gather * tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) { - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); + int cpu; + struct mmu_gather *tlb = &get_cpu_var_locked(mmu_gathers, &cpu); + tlb->cpu = cpu; tlb->mm = mm; /* * Use fast mode if only 1 CPU is online. @@ -165,7 +169,7 @@ tlb_finish_mmu (struct mmu_gather *tlb, /* keep the page table cache within bounds */ check_pgt_cache(); - put_cpu_var(mmu_gathers); + put_cpu_var_locked(mmu_gathers, tlb->cpu); } /* patches/latency-tracing-i386.patch0000664000077200007720000002131310655544571016331 0ustar mingomingo arch/i386/Kconfig | 1 + arch/i386/boot/compressed/Makefile | 2 +- arch/i386/kernel/Makefile | 1 + arch/i386/kernel/apic.c | 2 ++ arch/i386/kernel/entry.S | 15 +++++++++++++++ arch/i386/kernel/hpet.c | 2 +- arch/i386/kernel/irq.c | 7 ++++++- arch/i386/kernel/mcount-wrapper.S | 27 +++++++++++++++++++++++++++ arch/i386/kernel/traps.c | 1 + arch/i386/kernel/tsc.c | 2 +- arch/i386/mm/fault.c | 1 + arch/i386/mm/init.c | 2 +- include/asm-i386/irq.h | 2 +- include/asm-i386/processor.h | 11 +++++++---- 14 files changed, 66 insertions(+), 10 deletions(-) Index: linux/arch/i386/Kconfig =================================================================== --- linux.orig/arch/i386/Kconfig +++ linux/arch/i386/Kconfig @@ -784,6 +784,7 @@ config BOOT_IOREMAP # config REGPARM bool + depends on !MCOUNT default y config SECCOMP Index: linux/arch/i386/boot/compressed/Makefile =================================================================== --- linux.orig/arch/i386/boot/compressed/Makefile +++ linux/arch/i386/boot/compressed/Makefile @@ -11,7 +11,7 @@ EXTRA_AFLAGS := -traditional LDFLAGS_vmlinux := -T hostprogs-y := relocs -CFLAGS := -m32 -D__KERNEL__ $(LINUX_INCLUDE) -O2 \ +CFLAGS := -m32 -D__KERNEL__ $(LINUX_INCLUDE) -Iinclude -O2 \ -fno-strict-aliasing -fPIC \ $(call cc-option,-ffreestanding) \ $(call cc-option,-fno-stack-protector) Index: linux/arch/i386/kernel/Makefile =================================================================== --- linux.orig/arch/i386/kernel/Makefile +++ linux/arch/i386/kernel/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o obj-$(CONFIG_SMP) += smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o +obj-$(CONFIG_MCOUNT) += mcount-wrapper.o obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o Index: linux/arch/i386/kernel/apic.c =================================================================== --- linux.orig/arch/i386/kernel/apic.c +++ linux/arch/i386/kernel/apic.c @@ -583,6 +583,8 @@ void fastcall smp_apic_timer_interrupt(s { struct pt_regs *old_regs = set_irq_regs(regs); + trace_special(regs->eip, 1, 0); + /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. Index: linux/arch/i386/kernel/entry.S =================================================================== --- linux.orig/arch/i386/kernel/entry.S +++ linux/arch/i386/kernel/entry.S @@ -329,6 +329,11 @@ sysenter_past_esp: pushl %eax CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL +#ifdef CONFIG_EVENT_TRACE + pushl %edx; pushl %ecx; pushl %ebx; pushl %eax + call sys_call + popl %eax; popl %ebx; popl %ecx; popl %edx +#endif GET_THREAD_INFO(%ebp) /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ @@ -343,6 +348,11 @@ sysenter_past_esp: movl TI_flags(%ebp), %ecx testw $_TIF_ALLWORK_MASK, %cx jne syscall_exit_work +#ifdef CONFIG_EVENT_TRACE + pushl %eax + call sys_ret + popl %eax +#endif /* if something modifies registers it must also disable sysexit */ movl PT_EIP(%esp), %edx movl PT_OLDESP(%esp), %ecx @@ -366,6 +376,11 @@ ENTRY(system_call) pushl %eax # save orig_eax CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL +#ifdef CONFIG_EVENT_TRACE + pushl %edx; pushl %ecx; pushl %ebx; pushl %eax + call sys_call + popl %eax; popl %ebx; popl %ecx; popl %edx +#endif GET_THREAD_INFO(%ebp) # system call tracing in operation / emulation /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ Index: linux/arch/i386/kernel/hpet.c =================================================================== --- linux.orig/arch/i386/kernel/hpet.c +++ linux/arch/i386/kernel/hpet.c @@ -292,7 +292,7 @@ static int hpet_legacy_next_event(unsign /* * Clock source related code */ -static cycle_t read_hpet(void) +static cycle_t notrace read_hpet(void) { return (cycle_t)hpet_readl(HPET_COUNTER); } Index: linux/arch/i386/kernel/irq.c =================================================================== --- linux.orig/arch/i386/kernel/irq.c +++ linux/arch/i386/kernel/irq.c @@ -68,7 +68,7 @@ static union irq_ctx *softirq_ctx[NR_CPU * SMP cross-CPU interrupts have their own specific * handlers). */ -fastcall unsigned int do_IRQ(struct pt_regs *regs) +fastcall notrace unsigned int do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs; /* high bit used in ret_from_ code */ @@ -87,6 +87,11 @@ fastcall unsigned int do_IRQ(struct pt_r old_regs = set_irq_regs(regs); irq_enter(); +#ifdef CONFIG_EVENT_TRACE + if (irq == trace_user_trigger_irq) + user_trace_start(); +#endif + trace_special(regs->eip, irq, 0); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { Index: linux/arch/i386/kernel/mcount-wrapper.S =================================================================== --- /dev/null +++ linux/arch/i386/kernel/mcount-wrapper.S @@ -0,0 +1,27 @@ +/* + * linux/arch/i386/mcount-wrapper.S + * + * Copyright (C) 2004 Ingo Molnar + */ + +.globl mcount +mcount: + + cmpl $0, mcount_enabled + jz out + + push %ebp + mov %esp, %ebp + pushl %eax + pushl %ecx + pushl %edx + + call __mcount + + popl %edx + popl %ecx + popl %eax + popl %ebp +out: + ret + Index: linux/arch/i386/kernel/traps.c =================================================================== --- linux.orig/arch/i386/kernel/traps.c +++ linux/arch/i386/kernel/traps.c @@ -227,6 +227,7 @@ show_trace_log_lvl(struct task_struct *t { dump_trace(task, regs, stack, &print_trace_ops, log_lvl); printk("%s =======================\n", log_lvl); + print_traces(task); } void show_trace(struct task_struct *task, struct pt_regs *regs, Index: linux/arch/i386/kernel/tsc.c =================================================================== --- linux.orig/arch/i386/kernel/tsc.c +++ linux/arch/i386/kernel/tsc.c @@ -270,7 +270,7 @@ core_initcall(cpufreq_tsc); static unsigned long current_tsc_khz = 0; -static cycle_t read_tsc(void) +static notrace cycle_t read_tsc(void) { cycle_t ret; Index: linux/arch/i386/mm/fault.c =================================================================== --- linux.orig/arch/i386/mm/fault.c +++ linux/arch/i386/mm/fault.c @@ -497,6 +497,7 @@ bad_area_nosemaphore: nr = (address - idt_descr.address) >> 3; if (nr == 6) { + stop_trace(); do_invalid_op(regs, 0); return; } Index: linux/arch/i386/mm/init.c =================================================================== --- linux.orig/arch/i386/mm/init.c +++ linux/arch/i386/mm/init.c @@ -193,7 +193,7 @@ static inline int page_kills_ppro(unsign return 0; } -int page_is_ram(unsigned long pagenr) +int notrace page_is_ram(unsigned long pagenr) { int i; unsigned long addr, end; Index: linux/include/asm-i386/irq.h =================================================================== --- linux.orig/include/asm-i386/irq.h +++ linux/include/asm-i386/irq.h @@ -41,7 +41,7 @@ extern int irqbalance_disable(char *str) extern void fixup_irqs(cpumask_t map); #endif -unsigned int do_IRQ(struct pt_regs *regs); +extern fastcall notrace unsigned int do_IRQ(struct pt_regs *regs); void init_IRQ(void); void __init native_init_IRQ(void); Index: linux/include/asm-i386/processor.h =================================================================== --- linux.orig/include/asm-i386/processor.h +++ linux/include/asm-i386/processor.h @@ -128,7 +128,7 @@ extern void detect_ht(struct cpuinfo_x86 static inline void detect_ht(struct cpuinfo_x86 *c) {} #endif -static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, +static inline void fastcall native_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { /* ecx is often an input as well as an output. */ @@ -595,7 +595,9 @@ static inline void load_esp0(struct tss_ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx * resulting in stale register contents being returned. */ -static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) +static inline void +cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) { *eax = op; *ecx = 0; @@ -603,8 +605,9 @@ static inline void cpuid(unsigned int op } /* Some CPUID calls want 'count' to be placed in ecx */ -static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, - int *edx) +static inline void +cpuid_count(int op, int count, unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) { *eax = op; *ecx = count; patches/net-core-preempt-fix.patch0000664000077200007720000000073110655544574016534 0ustar mingomingo--- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/net/core/dev.c =================================================================== --- linux-rt-rebase.q.orig/net/core/dev.c +++ linux-rt-rebase.q/net/core/dev.c @@ -1745,8 +1745,8 @@ int netif_rx_ni(struct sk_buff *skb) { int err; - preempt_disable(); err = netif_rx(skb); + preempt_disable(); if (local_softirq_pending()) do_softirq(); preempt_enable(); patches/preempt-realtime-supress-rtc-printk.patch0000664000077200007720000000106410655544575021634 0ustar mingomingo--- drivers/char/rtc.c | 2 ++ 1 file changed, 2 insertions(+) Index: linux-rt-rebase.q/drivers/char/rtc.c =================================================================== --- linux-rt-rebase.q.orig/drivers/char/rtc.c +++ linux-rt-rebase.q/drivers/char/rtc.c @@ -1315,8 +1315,10 @@ static void rtc_dropped_irq(unsigned lon spin_unlock_irq(&rtc_lock); +#ifndef CONFIG_PREEMPT_RT if (printk_ratelimit()) printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq); +#endif /* Now we have new data */ wake_up_interruptible(&rtc_wait); patches/rcu-2.patch0000664000077200007720000000564010655544573013515 0ustar mingomingo Finally, RCU gets its own softirq. With it being used extensively, the per-cpu tasklet used earlier was just a softirq with overheads. This makes things more efficient. Signed-off-by: Dipankar Sarma include/linux/interrupt.h | 1 + kernel/rcuclassic.c | 12 +++++------- 2 files changed, 6 insertions(+), 7 deletions(-) Index: linux/include/linux/interrupt.h =================================================================== --- linux.orig/include/linux/interrupt.h +++ linux/include/linux/interrupt.h @@ -269,6 +269,7 @@ enum #ifdef CONFIG_HIGH_RES_TIMERS HRTIMER_SOFTIRQ, #endif + RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ }; /* softirq mask and active fields moved to irq_cpustat_t in Index: linux/kernel/rcuclassic.c =================================================================== --- linux.orig/kernel/rcuclassic.c +++ linux/kernel/rcuclassic.c @@ -69,7 +69,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_data DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; /* Fake initialization required by compiler */ -static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; static int blimit = 10; static int qhimark = 10000; static int qlowmark = 100; @@ -215,7 +214,7 @@ static void rcu_do_batch(struct rcu_data if (!rdp->donelist) rdp->donetail = &rdp->donelist; else - tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); + raise_softirq(RCU_SOFTIRQ); } /* @@ -367,7 +366,6 @@ static void rcu_offline_cpu(int cpu) &per_cpu(rcu_bh_data, cpu)); put_cpu_var(rcu_data); put_cpu_var(rcu_bh_data); - tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu); } #else @@ -379,7 +377,7 @@ static void rcu_offline_cpu(int cpu) #endif /* - * This does the RCU processing work from tasklet context. + * This does the RCU processing work from softirq context. */ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) @@ -424,7 +422,7 @@ static void __rcu_process_callbacks(stru rcu_do_batch(rdp); } -static void rcu_process_callbacks(unsigned long unused) +static void rcu_process_callbacks(struct softirq_action *unused) { __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); @@ -488,7 +486,7 @@ void rcu_check_callbacks(int cpu, int us rcu_bh_qsctr_inc(cpu); } else if (!in_softirq()) rcu_bh_qsctr_inc(cpu); - tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); + raise_softirq(RCU_SOFTIRQ); } static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, @@ -511,7 +509,7 @@ static void __devinit rcu_online_cpu(int rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); - tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); + open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL); } static int __devinit rcu_cpu_notify(struct notifier_block *self, patches/rcu-4.patch0000664000077200007720000003611610655544573013521 0ustar mingomingo This patch consolidates the RCU tracing code in the preemptible RCU implementation, moves them to a separate "trace" file and cleans up the #ifdefs. Moving to a separate file will eventually allow dynamic tracing of RCU implementation. Signed-off-by: Paul McKenney Signed-off-by: Dipankar Sarma include/linux/rcupreempt_trace.h | 84 ++++++++++++++++++++++++++++ kernel/Kconfig.preempt | 11 +-- kernel/Makefile | 1 kernel/rcupreempt.c | 113 ++++++++++++--------------------------- kernel/rcupreempt_trace.c | 99 ++++++++++++++++++++++++++++++++++ 5 files changed, 225 insertions(+), 83 deletions(-) Index: linux/include/linux/rcupreempt_trace.h =================================================================== --- /dev/null +++ linux/include/linux/rcupreempt_trace.h @@ -0,0 +1,84 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (RT implementation) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Paul McKenney + * + * Based on the original work by Paul McKenney + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +#ifndef __LINUX_RCUPREEMPT_TRACE_H +#define __LINUX_RCUPREEMPT_TRACE_H + +#ifdef __KERNEL__ +#include +#include + +#include + +/* + * PREEMPT_RCU data structures. + */ + +struct rcupreempt_trace { + long next_length; + long next_add; + long wait_length; + long wait_add; + long done_length; + long done_add; + long done_remove; + atomic_t done_invoked; + long rcu_check_callbacks; + atomic_t rcu_try_flip1; + long rcu_try_flip2; + long rcu_try_flip3; + atomic_t rcu_try_flip_e1; + long rcu_try_flip_e2; + long rcu_try_flip_e3; +}; + +#ifdef CONFIG_RCU_TRACE +#define RCU_TRACE(fn, arg) fn(arg); +#else +#define RCU_TRACE(fn, arg) +#endif + +extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_e2(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_e3(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip2(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip3(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_RCUPREEMPT_TRACE_H */ Index: linux/kernel/Kconfig.preempt =================================================================== --- linux.orig/kernel/Kconfig.preempt +++ linux/kernel/Kconfig.preempt @@ -93,13 +93,12 @@ config PREEMPT_RCU endchoice -config RCU_STATS - bool "/proc stats for preemptible RCU read-side critical sections" - depends on PREEMPT_RCU +config RCU_TRACE + bool "Enable tracing for RCU - currently stats in /proc" default y help - This option provides /proc stats to provide debugging info for - the preemptible realtime RCU implementation. + This option provides tracing in RCU which presents /proc + stats for debugging RCU implementation. - Say Y here if you want to see RCU stats in /proc + Say Y here if you want to enable RCU tracing Say N if you are unsure. Index: linux/kernel/Makefile =================================================================== --- linux.orig/kernel/Makefile +++ linux/kernel/Makefile @@ -54,6 +54,7 @@ obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o obj-$(CONFIG_CLASSIC_RCU) += rcupdate.o rcuclassic.o obj-$(CONFIG_PREEMPT_RCU) += rcupdate.o rcupreempt.o +obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o Index: linux/kernel/rcupreempt.c =================================================================== --- linux.orig/kernel/rcupreempt.c +++ linux/kernel/rcupreempt.c @@ -48,6 +48,7 @@ #include #include #include +#include /* * PREEMPT_RCU data structures. @@ -63,23 +64,9 @@ struct rcu_data { struct rcu_head **waittail; struct rcu_head *donelist; struct rcu_head **donetail; -#ifdef CONFIG_RCU_STATS - long n_next_length; - long n_next_add; - long n_wait_length; - long n_wait_add; - long n_done_length; - long n_done_add; - long n_done_remove; - atomic_t n_done_invoked; - long n_rcu_check_callbacks; - atomic_t n_rcu_try_flip1; - long n_rcu_try_flip2; - long n_rcu_try_flip3; - atomic_t n_rcu_try_flip_e1; - long n_rcu_try_flip_e2; - long n_rcu_try_flip_e3; -#endif /* #ifdef CONFIG_RCU_STATS */ +#ifdef CONFIG_RCU_TRACE + struct rcupreempt_trace trace; +#endif /* #ifdef CONFIG_RCU_TRACE */ }; struct rcu_ctrlblk { spinlock_t fliplock; @@ -180,22 +167,14 @@ static void __rcu_advance_callbacks(void if (rcu_data.waitlist != NULL) { *rcu_data.donetail = rcu_data.waitlist; rcu_data.donetail = rcu_data.waittail; -#ifdef CONFIG_RCU_STATS - rcu_data.n_done_length += rcu_data.n_wait_length; - rcu_data.n_done_add += rcu_data.n_wait_length; - rcu_data.n_wait_length = 0; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_move2done, &rcu_data.trace); } if (rcu_data.nextlist != NULL) { rcu_data.waitlist = rcu_data.nextlist; rcu_data.waittail = rcu_data.nexttail; rcu_data.nextlist = NULL; rcu_data.nexttail = &rcu_data.nextlist; -#ifdef CONFIG_RCU_STATS - rcu_data.n_wait_length += rcu_data.n_next_length; - rcu_data.n_wait_add += rcu_data.n_next_length; - rcu_data.n_next_length = 0; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_move2wait, &rcu_data.trace); } else { rcu_data.waitlist = NULL; rcu_data.waittail = &rcu_data.waitlist; @@ -220,22 +199,16 @@ static void rcu_try_flip(void) unsigned long oldirq; flipctr = rcu_ctrlblk.completed; -#ifdef CONFIG_RCU_STATS - atomic_inc(&rcu_data.n_rcu_try_flip1); -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_try_flip1, &rcu_data.trace); if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, oldirq))) { -#ifdef CONFIG_RCU_STATS - atomic_inc(&rcu_data.n_rcu_try_flip_e1); -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_try_flip_e1, &rcu_data.trace); return; } if (unlikely(flipctr != rcu_ctrlblk.completed)) { /* Our work is done! ;-) */ -#ifdef CONFIG_RCU_STATS - rcu_data.n_rcu_try_flip_e2++; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_try_flip_e2, &rcu_data.trace); spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, oldirq); return; } @@ -246,14 +219,11 @@ static void rcu_try_flip(void) * that started prior to the previous flip. */ -#ifdef CONFIG_RCU_STATS - rcu_data.n_rcu_try_flip2++; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_try_flip2, &rcu_data.trace); for_each_possible_cpu(cpu) { if (atomic_read(&per_cpu(rcu_flipctr, cpu)[!flipctr]) != 0) { -#ifdef CONFIG_RCU_STATS - rcu_data.n_rcu_try_flip_e3++; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_try_flip_e3, + &rcu_data.trace); spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, oldirq); return; } @@ -264,9 +234,7 @@ static void rcu_try_flip(void) smp_mb(); rcu_ctrlblk.completed++; -#ifdef CONFIG_RCU_STATS - rcu_data.n_rcu_try_flip3++; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_try_flip3, &rcu_data.trace); spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, oldirq); } @@ -281,9 +249,7 @@ void rcu_check_callbacks(int cpu, int us } } spin_lock_irqsave(&rcu_data.lock, oldirq); -#ifdef CONFIG_RCU_STATS - rcu_data.n_rcu_check_callbacks++; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_check_callbacks, &rcu_data.trace); __rcu_advance_callbacks(); if (rcu_data.donelist == NULL) { spin_unlock_irqrestore(&rcu_data.lock, oldirq); @@ -306,18 +272,13 @@ static void rcu_process_callbacks(unsign } rcu_data.donelist = NULL; rcu_data.donetail = &rcu_data.donelist; -#ifdef CONFIG_RCU_STATS - rcu_data.n_done_remove += rcu_data.n_done_length; - rcu_data.n_done_length = 0; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_done_remove, &rcu_data.trace); spin_unlock_irqrestore(&rcu_data.lock, flags); while (list) { next = list->next; list->func(list); list = next; -#ifdef CONFIG_RCU_STATS - atomic_inc(&rcu_data.n_done_invoked); -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_invoke, &rcu_data.trace); } } @@ -332,10 +293,7 @@ void fastcall call_rcu(struct rcu_head * __rcu_advance_callbacks(); *rcu_data.nexttail = head; rcu_data.nexttail = &head->next; -#ifdef CONFIG_RCU_STATS - rcu_data.n_next_add++; - rcu_data.n_next_length++; -#endif /* #ifdef CONFIG_RCU_STATS */ + RCU_TRACE(rcupreempt_trace_next_add, &rcu_data.trace); spin_unlock_irqrestore(&rcu_data.lock, flags); } @@ -389,9 +347,10 @@ void synchronize_kernel(void) synchronize_rcu(); } -#ifdef CONFIG_RCU_STATS +#ifdef CONFIG_RCU_TRACE int rcu_read_proc_data(char *page) { + struct rcupreempt_trace *trace = &rcu_data.trace; return sprintf(page, "ggp=%ld lgp=%ld rcc=%ld\n" "na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n" @@ -399,23 +358,23 @@ int rcu_read_proc_data(char *page) rcu_ctrlblk.completed, rcu_data.completed, - rcu_data.n_rcu_check_callbacks, + trace->rcu_check_callbacks, - rcu_data.n_next_add, - rcu_data.n_next_length, - rcu_data.n_wait_add, - rcu_data.n_wait_length, - rcu_data.n_done_add, - rcu_data.n_done_length, - rcu_data.n_done_remove, - atomic_read(&rcu_data.n_done_invoked), - - atomic_read(&rcu_data.n_rcu_try_flip1), - rcu_data.n_rcu_try_flip2, - rcu_data.n_rcu_try_flip3, - atomic_read(&rcu_data.n_rcu_try_flip_e1), - rcu_data.n_rcu_try_flip_e2, - rcu_data.n_rcu_try_flip_e3); + trace->next_add, + trace->next_length, + trace->wait_add, + trace->wait_length, + trace->done_add, + trace->done_length, + trace->done_remove, + atomic_read(&trace->done_invoked), + + atomic_read(&trace->rcu_try_flip1), + trace->rcu_try_flip2, + trace->rcu_try_flip3, + atomic_read(&trace->rcu_try_flip_e1), + trace->rcu_try_flip_e2, + trace->rcu_try_flip_e3); } int rcu_read_proc_gp_data(char *page) @@ -454,7 +413,7 @@ int rcu_read_proc_ctrs_data(char *page) return (cnt); } -#endif /* #ifdef CONFIG_RCU_STATS */ +#endif /* #ifdef CONFIG_RCU_TRACE */ EXPORT_SYMBOL_GPL(call_rcu); EXPORT_SYMBOL_GPL(rcu_batches_completed); Index: linux/kernel/rcupreempt_trace.c =================================================================== --- /dev/null +++ linux/kernel/rcupreempt_trace.c @@ -0,0 +1,99 @@ +/* + * Read-Copy Update tracing for realtime implementation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Papers: http://www.rdrop.com/users/paulmck/RCU + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU/ *.txt + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void rcupreempt_trace_move2done(struct rcupreempt_trace *trace) +{ + trace->done_length += trace->wait_length; + trace->done_add += trace->wait_length; + trace->wait_length = 0; +} +void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace) +{ + trace->wait_length += trace->next_length; + trace->wait_add += trace->next_length; + trace->next_length = 0; +} +void rcupreempt_trace_try_flip1(struct rcupreempt_trace *trace) +{ + atomic_inc(&trace->rcu_try_flip1); +} +void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace) +{ + atomic_inc(&trace->rcu_try_flip_e1); +} +void rcupreempt_trace_try_flip_e2(struct rcupreempt_trace *trace) +{ + trace->rcu_try_flip_e2++; +} +void rcupreempt_trace_try_flip_e3(struct rcupreempt_trace *trace) +{ + trace->rcu_try_flip_e3++; +} +void rcupreempt_trace_try_flip2(struct rcupreempt_trace *trace) +{ + trace->rcu_try_flip2++; +} +void rcupreempt_trace_try_flip3(struct rcupreempt_trace *trace) +{ + trace->rcu_try_flip3++; +} +void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace) +{ + trace->rcu_check_callbacks++; +} +void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace) +{ + trace->done_remove += trace->done_length; + trace->done_length = 0; +} +void rcupreempt_trace_invoke(struct rcupreempt_trace *trace) +{ + atomic_inc(&trace->done_invoked); +} +void rcupreempt_trace_next_add(struct rcupreempt_trace *trace) +{ + trace->next_add++; + trace->next_length++; +} patches/preempt-realtime-usb.patch0000664000077200007720000001320410655544575016625 0ustar mingomingo--- drivers/net/usb/usbnet.c | 2 ++ drivers/usb/core/devio.c | 7 ++++--- drivers/usb/core/hcd.c | 25 +++++++++---------------- drivers/usb/core/message.c | 11 ++++++----- 4 files changed, 21 insertions(+), 24 deletions(-) Index: linux-rt-rebase.q/drivers/net/usb/usbnet.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/usb/usbnet.c +++ linux-rt-rebase.q/drivers/net/usb/usbnet.c @@ -898,6 +898,8 @@ static void tx_complete (struct urb *urb urb->dev = NULL; entry->state = tx_done; + spin_lock_rt(&dev->txq.lock); + spin_unlock_rt(&dev->txq.lock); defer_bh(dev, skb, &dev->txq); } Index: linux-rt-rebase.q/drivers/usb/core/devio.c =================================================================== --- linux-rt-rebase.q.orig/drivers/usb/core/devio.c +++ linux-rt-rebase.q/drivers/usb/core/devio.c @@ -308,10 +308,11 @@ static void async_completed(struct urb * struct async *as = urb->context; struct dev_state *ps = as->ps; struct siginfo sinfo; + unsigned long flags; - spin_lock(&ps->lock); - list_move_tail(&as->asynclist, &ps->async_completed); - spin_unlock(&ps->lock); + spin_lock_irqsave(&ps->lock, flags); + list_move_tail(&as->asynclist, &ps->async_completed); + spin_unlock_irqrestore(&ps->lock, flags); if (as->signr) { sinfo.si_signo = as->signr; sinfo.si_errno = as->urb->status; Index: linux-rt-rebase.q/drivers/usb/core/hcd.c =================================================================== --- linux-rt-rebase.q.orig/drivers/usb/core/hcd.c +++ linux-rt-rebase.q/drivers/usb/core/hcd.c @@ -523,13 +523,11 @@ error: } /* any errors get returned through the urb completion */ - local_irq_save (flags); - spin_lock (&urb->lock); + spin_lock_irqsave(&urb->lock, flags); if (urb->status == -EINPROGRESS) urb->status = status; - spin_unlock (&urb->lock); + spin_unlock_irqrestore(&urb->lock, flags); usb_hcd_giveback_urb (hcd, urb); - local_irq_restore (flags); return 0; } @@ -559,8 +557,7 @@ void usb_hcd_poll_rh_status(struct usb_h if (length > 0) { /* try to complete the status urb */ - local_irq_save (flags); - spin_lock(&hcd_root_hub_lock); + spin_lock_irqsave(&hcd_root_hub_lock, flags); urb = hcd->status_urb; if (urb) { spin_lock(&urb->lock); @@ -576,14 +573,13 @@ void usb_hcd_poll_rh_status(struct usb_h spin_unlock(&urb->lock); } else length = 0; - spin_unlock(&hcd_root_hub_lock); + spin_unlock_irqrestore(&hcd_root_hub_lock, flags); /* local irqs are always blocked in completions */ if (length > 0) usb_hcd_giveback_urb (hcd, urb); else hcd->poll_pending = 1; - local_irq_restore (flags); } /* The USB 2.0 spec says 256 ms. This is close enough and won't @@ -657,17 +653,15 @@ static int usb_rh_urb_dequeue (struct us } else { /* Status URB */ if (!hcd->uses_new_polling) del_timer (&hcd->rh_timer); - local_irq_save (flags); - spin_lock (&hcd_root_hub_lock); + spin_lock_irqsave(&hcd_root_hub_lock, flags); if (urb == hcd->status_urb) { hcd->status_urb = NULL; urb->hcpriv = NULL; } else urb = NULL; /* wasn't fully queued */ - spin_unlock (&hcd_root_hub_lock); + spin_unlock_irqrestore(&hcd_root_hub_lock, flags); if (urb) usb_hcd_giveback_urb (hcd, urb); - local_irq_restore (flags); } return 0; @@ -1200,13 +1194,13 @@ void usb_hcd_endpoint_disable (struct us { struct usb_hcd *hcd; struct urb *urb; + unsigned long flags; hcd = bus_to_hcd(udev->bus); - local_irq_disable (); /* ep is already gone from udev->ep_{in,out}[]; no more submits */ rescan: - spin_lock(&hcd_urb_list_lock); + spin_lock_irqsave(&hcd_urb_list_lock, flags); list_for_each_entry (urb, &ep->urb_list, urb_list) { int tmp; @@ -1243,8 +1237,7 @@ rescan: /* list contents may have changed */ goto rescan; } - spin_unlock(&hcd_urb_list_lock); - local_irq_enable (); + spin_unlock_irqrestore(&hcd_urb_list_lock, flags); /* synchronize with the hardware, so old configuration state * clears out immediately (and will be freed). Index: linux-rt-rebase.q/drivers/usb/core/message.c =================================================================== --- linux-rt-rebase.q.orig/drivers/usb/core/message.c +++ linux-rt-rebase.q/drivers/usb/core/message.c @@ -258,8 +258,9 @@ static void sg_complete (struct urb *urb { struct usb_sg_request *io = urb->context; int status = urb->status; + unsigned long flags; - spin_lock (&io->lock); + spin_lock_irqsave (&io->lock, flags); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets @@ -293,7 +294,7 @@ static void sg_complete (struct urb *urb * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ - spin_unlock (&io->lock); + spin_unlock_irqrestore (&io->lock, flags); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs [i] || !io->urbs [i]->dev) continue; @@ -308,7 +309,7 @@ static void sg_complete (struct urb *urb } else if (urb == io->urbs [i]) found = 1; } - spin_lock (&io->lock); + spin_lock_irqsave (&io->lock, flags); } urb->dev = NULL; @@ -318,7 +319,7 @@ static void sg_complete (struct urb *urb if (!io->count) complete (&io->complete); - spin_unlock (&io->lock); + spin_unlock_irqrestore (&io->lock, flags); } @@ -600,7 +601,7 @@ void usb_sg_cancel (struct usb_sg_reques dev_warn (&io->dev->dev, "%s, unlink --> %d\n", __FUNCTION__, retval); } - spin_lock (&io->lock); + spin_lock_irqsave (&io->lock, flags); } spin_unlock_irqrestore (&io->lock, flags); } patches/preempt-realtime-core.patch0000664000077200007720000011465710655544575017002 0ustar mingomingo--- include/linux/completion.h | 1 include/linux/hardirq.h | 42 +++++++------ include/linux/kernel.h | 15 ++++ include/linux/profile.h | 12 ++- include/linux/radix-tree.h | 13 ++++ include/linux/smp.h | 11 +++ include/linux/smp_lock.h | 4 - include/linux/workqueue.h | 3 kernel/Kconfig.preempt | 145 +++++++++++++++++++++++++++++---------------- kernel/exit.c | 21 ++++-- kernel/fork.c | 26 +++++++- kernel/futex.c | 10 ++- kernel/power/swsusp.c | 1 kernel/signal.c | 9 ++ kernel/softirq.c | 14 +++- kernel/stop_machine.c | 4 - kernel/sys.c | 9 +- kernel/user.c | 7 +- kernel/workqueue.c | 52 +++++++++++++++- lib/Kconfig.debug | 4 - lib/Makefile | 3 lib/kernel_lock.c | 27 +++++--- lib/locking-selftest.c | 29 +++++---- lib/radix-tree.c | 7 +- lib/smp_processor_id.c | 4 - 25 files changed, 348 insertions(+), 125 deletions(-) Index: linux-rt-rebase.q/include/linux/completion.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/completion.h +++ linux-rt-rebase.q/include/linux/completion.h @@ -48,6 +48,7 @@ extern unsigned long FASTCALL(wait_for_c unsigned long timeout)); extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout( struct completion *x, unsigned long timeout)); +extern unsigned int FASTCALL(completion_done(struct completion *x)); extern void FASTCALL(complete(struct completion *)); extern void FASTCALL(complete_all(struct completion *)); Index: linux-rt-rebase.q/include/linux/hardirq.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/hardirq.h +++ linux-rt-rebase.q/include/linux/hardirq.h @@ -41,23 +41,25 @@ # error HARDIRQ_BITS is too low! #endif #endif +#define PREEMPT_ACTIVE_BITS 1 -#define PREEMPT_SHIFT 0 -#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) -#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) - -#define __IRQ_MASK(x) ((1UL << (x))-1) - -#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) -#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) -#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) - -#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) -#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) -#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) +#define PREEMPT_SHIFT 0 +#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) +#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) +#define PREEMPT_ACTIVE_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) + +#define __IRQ_MASK(x) ((1UL << (x))-1) + +#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) +#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) +#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) + +#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) +#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) +#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) -#error PREEMPT_ACTIVE is too low! +# error PREEMPT_ACTIVE is too low! #endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) @@ -68,11 +70,13 @@ * Are we doing bottom half or hardware interrupt processing? * Are we in a softirq context? Interrupt context? */ -#define in_irq() (hardirq_count()) -#define in_softirq() (softirq_count()) -#define in_interrupt() (irq_count()) - -#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) +#define in_irq() (hardirq_count() || (current->flags & PF_HARDIRQ)) +#define in_softirq() (softirq_count() || (current->flags & PF_SOFTIRQ)) +#define in_interrupt() (irq_count()) + +#if defined(CONFIG_PREEMPT) && \ + !defined(CONFIG_PREEMPT_BKL) && \ + !defined(CONFIG_PREEMPT_RT) # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) #else # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) Index: linux-rt-rebase.q/include/linux/kernel.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/kernel.h +++ linux-rt-rebase.q/include/linux/kernel.h @@ -88,7 +88,7 @@ extern int cond_resched(void); # define might_resched() do { } while (0) #endif -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) void __might_sleep(char *file, int line); # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) @@ -167,6 +167,18 @@ static inline int printk(const char *s, static inline int __cold printk(const char *s, ...) { return 0; } #endif +#ifdef CONFIG_PREEMPT_RT +extern void zap_rt_locks(void); +#else +# define zap_rt_locks() do { } while (0) +#endif + +#ifdef CONFIG_PREEMPT_RT +extern void zap_rt_locks(void); +#else +# define zap_rt_locks() do { } while (0) +#endif + unsigned long int_sqrt(unsigned long); extern int printk_ratelimit(void); @@ -198,6 +210,7 @@ extern void add_taint(unsigned); /* Values used for system_state */ extern enum system_states { SYSTEM_BOOTING, + SYSTEM_BOOTING_SCHEDULER_OK, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, Index: linux-rt-rebase.q/include/linux/profile.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/profile.h +++ linux-rt-rebase.q/include/linux/profile.h @@ -6,16 +6,18 @@ #include #include #include +#include #include #include extern int prof_on __read_mostly; -#define CPU_PROFILING 1 -#define SCHED_PROFILING 2 -#define SLEEP_PROFILING 3 -#define KVM_PROFILING 4 +#define CPU_PROFILING 1 +#define SCHED_PROFILING 2 +#define SLEEP_PROFILING 3 +#define KVM_PROFILING 4 +#define PREEMPT_PROFILING 5 struct proc_dir_entry; struct pt_regs; @@ -54,6 +56,8 @@ enum profile_type { PROFILE_MUNMAP }; +extern int prof_pid; + #ifdef CONFIG_PROFILING struct task_struct; Index: linux-rt-rebase.q/include/linux/radix-tree.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/radix-tree.h +++ linux-rt-rebase.q/include/linux/radix-tree.h @@ -155,7 +155,18 @@ void *radix_tree_delete(struct radix_tre unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); +/* + * On a mutex based kernel we can freely schedule within the radix code: + */ +#ifdef CONFIG_PREEMPT_RT +static inline int radix_tree_preload(gfp_t gfp_mask) +{ + return 0; +} +#else int radix_tree_preload(gfp_t gfp_mask); +#endif + void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); @@ -171,7 +182,9 @@ int radix_tree_tagged(struct radix_tree_ static inline void radix_tree_preload_end(void) { +#ifndef CONFIG_PREEMPT_RT preempt_enable(); +#endif } #endif /* _LINUX_RADIX_TREE_H */ Index: linux-rt-rebase.q/include/linux/smp.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/smp.h +++ linux-rt-rebase.q/include/linux/smp.h @@ -33,6 +33,16 @@ extern void smp_send_stop(void); */ extern void smp_send_reschedule(int cpu); +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + /* * Prepare machine for booting other CPUs. @@ -97,6 +107,7 @@ static inline int up_smp_call_function(v 0; \ }) static inline void smp_send_reschedule(int cpu) { } +static inline void smp_send_reschedule_allbutself(void) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_single(cpuid, func, info, retry, wait) \ Index: linux-rt-rebase.q/include/linux/smp_lock.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/smp_lock.h +++ linux-rt-rebase.q/include/linux/smp_lock.h @@ -17,6 +17,8 @@ extern void __lockfunc __release_kernel_ __release_kernel_lock(); \ } while (0) + + /* * Non-SMP kernels will never block on the kernel lock, * so we are better off returning a constant zero from @@ -44,7 +46,7 @@ extern void __lockfunc unlock_kernel(voi #define lock_kernel() do { } while(0) #define unlock_kernel() do { } while(0) #define release_kernel_lock(task) do { } while(0) -#define reacquire_kernel_lock(task) 0 +#define reacquire_kernel_lock(task) do { } while(0) #define kernel_locked() 1 #endif /* CONFIG_LOCK_KERNEL */ Index: linux-rt-rebase.q/include/linux/workqueue.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/workqueue.h +++ linux-rt-rebase.q/include/linux/workqueue.h @@ -125,6 +125,9 @@ extern struct workqueue_struct *__create #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) +extern void set_workqueue_prio(struct workqueue_struct *wq, int policy, + int rt_priority, int nice); + extern void destroy_workqueue(struct workqueue_struct *wq); extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); Index: linux-rt-rebase.q/kernel/Kconfig.preempt =================================================================== --- linux-rt-rebase.q.orig/kernel/Kconfig.preempt +++ linux-rt-rebase.q/kernel/Kconfig.preempt @@ -1,14 +1,13 @@ - choice - prompt "Preemption Model" - default PREEMPT_NONE + prompt "Preemption Mode" + default PREEMPT_RT config PREEMPT_NONE bool "No Forced Preemption (Server)" help - This is the traditional Linux preemption model, geared towards + This is the traditional Linux preemption model geared towards throughput. It will still provide good latencies most of the - time, but there are no guarantees and occasional longer delays + time but there are no guarantees and occasional long delays are possible. Select this option if you are building a kernel for a server or @@ -21,7 +20,7 @@ config PREEMPT_VOLUNTARY help This option reduces the latency of the kernel by adding more "explicit preemption points" to the kernel code. These new - preemption points have been selected to reduce the maximum + preemption points have been selected to minimize the maximum latency of rescheduling, providing faster application reactions, at the cost of slightly lower throughput. @@ -33,32 +32,103 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT +config PREEMPT_DESKTOP bool "Preemptible Kernel (Low-Latency Desktop)" help This option reduces the latency of the kernel by making - all kernel code (that is not executing in a critical section) + all kernel code that is not executing in a critical section preemptible. This allows reaction to interactive events by permitting a low priority process to be preempted involuntarily even if it is in kernel mode executing a system call and would - otherwise not be about to reach a natural preemption point. - This allows applications to run more 'smoothly' even when the - system is under load, at the cost of slightly lower throughput - and a slight runtime overhead to kernel code. + otherwise not about to reach a preemption point. This allows + applications to run more 'smoothly' even when the system is + under load, at the cost of slighly lower throughput and a + slight runtime overhead to kernel code. + + (According to profiles, when this mode is selected then even + during kernel-intense workloads the system is in an immediately + preemptible state more than 50% of the time.) Select this if you are building a kernel for a desktop or embedded system with latency requirements in the milliseconds range. +config PREEMPT_RT + bool "Complete Preemption (Real-Time)" + select PREEMPT_SOFTIRQS + select PREEMPT_HARDIRQS + select PREEMPT_RCU + select RT_MUTEXES + help + This option further reduces the scheduling latency of the + kernel by replacing almost every spinlock used by the kernel + with preemptible mutexes and thus making all but the most + critical kernel code involuntarily preemptible. The remaining + handful of lowlevel non-preemptible codepaths are short and + have a deterministic latency of a couple of tens of + microseconds (depending on the hardware). This also allows + applications to run more 'smoothly' even when the system is + under load, at the cost of lower throughput and runtime + overhead to kernel code. + + (According to profiles, when this mode is selected then even + during kernel-intense workloads the system is in an immediately + preemptible state more than 95% of the time.) + + Select this if you are building a kernel for a desktop, + embedded or real-time system with guaranteed latency + requirements of 100 usecs or lower. + endchoice -config PREEMPT_BKL - bool "Preempt The Big Kernel Lock" - depends on SMP || PREEMPT +config PREEMPT + bool default y + depends on PREEMPT_DESKTOP || PREEMPT_RT + +config PREEMPT_SOFTIRQS + bool "Thread Softirqs" + default n +# depends on PREEMPT + help + This option reduces the latency of the kernel by 'threading' + soft interrupts. This means that all softirqs will execute + in softirqd's context. While this helps latency, it can also + reduce performance. + + The threading of softirqs can also be controlled via + /proc/sys/kernel/softirq_preemption runtime flag and the + sofirq-preempt=0/1 boot-time option. + + Say N if you are unsure. + +config PREEMPT_HARDIRQS + bool "Thread Hardirqs" + default n + depends on !GENERIC_HARDIRQS_NO__DO_IRQ + help + This option reduces the latency of the kernel by 'threading' + hardirqs. This means that all (or selected) hardirqs will run + in their own kernel thread context. While this helps latency, + this feature can also reduce performance. + + The threading of hardirqs can also be controlled via the + /proc/sys/kernel/hardirq_preemption runtime flag and the + hardirq-preempt=0/1 boot-time option. Per-irq threading can + be enabled/disable via the /proc/irq///threaded + runtime flags. + + Say N if you are unsure. + +config SPINLOCK_BKL + bool "Old-Style Big Kernel Lock" + depends on (PREEMPT || SMP) && !PREEMPT_RT + default n help - This option reduces the latency of the kernel by making the - big kernel lock preemptible. + This option increases the latency of the kernel by making the + big kernel lock spinlock-based (which is bad for latency). + However, enable this option if you see any problems to revert + back to the traditional spinlock BKL design. Say Y here if you are building a kernel for a desktop system. Say N if you are unsure. @@ -66,12 +136,19 @@ config PREEMPT_BKL config PREEMPT_NOTIFIERS bool +config PREEMPT_BKL + bool + depends on PREEMPT_RT || !SPINLOCK_BKL + default n if !PREEMPT + default y + choice prompt "RCU implementation type:" default CLASSIC_RCU config CLASSIC_RCU bool "Classic RCU" + depends on !PREEMPT_RT help This option selects the classic RCU implementation that is designed for best read-side performance on non-realtime @@ -102,37 +179,3 @@ config RCU_TRACE Say Y here if you want to enable RCU tracing Say N if you are unsure. - -config PREEMPT_SOFTIRQS - bool "Thread Softirqs" - default n -# depends on PREEMPT - help - This option reduces the latency of the kernel by 'threading' - soft interrupts. This means that all softirqs will execute - in softirqd's context. While this helps latency, it can also - reduce performance. - - The threading of softirqs can also be controlled via - /proc/sys/kernel/softirq_preemption runtime flag and the - sofirq-preempt=0/1 boot-time option. - - Say N if you are unsure. - -config PREEMPT_HARDIRQS - bool "Thread Hardirqs" - default n - depends on !GENERIC_HARDIRQS_NO__DO_IRQ - help - This option reduces the latency of the kernel by 'threading' - hardirqs. This means that all (or selected) hardirqs will run - in their own kernel thread context. While this helps latency, - this feature can also reduce performance. - - The threading of hardirqs can also be controlled via the - /proc/sys/kernel/hardirq_preemption runtime flag and the - hardirq-preempt=0/1 boot-time option. Per-irq threading can - be enabled/disable via the /proc/irq///threaded - runtime flags. - - Say N if you are unsure. Index: linux-rt-rebase.q/kernel/exit.c =================================================================== --- linux-rt-rebase.q.orig/kernel/exit.c +++ linux-rt-rebase.q/kernel/exit.c @@ -65,7 +65,9 @@ static void __unhash_process(struct task detach_pid(p, PIDTYPE_SID); list_del_rcu(&p->tasks); + preempt_disable(); __get_cpu_var(process_counts)--; + preempt_enable(); } list_del_rcu(&p->thread_group); remove_parent(p); @@ -594,9 +596,11 @@ static void exit_mm(struct task_struct * task_lock(tsk); tsk->mm = NULL; up_read(&mm->mmap_sem); + preempt_disable(); // FIXME enter_lazy_tlb(mm, current); /* We don't want this task to be frozen prematurely */ clear_freeze_flag(tsk); + preempt_enable(); task_unlock(tsk); mmput(mm); } @@ -900,6 +904,7 @@ fastcall NORET_TYPE void do_exit(long co WARN_ON(atomic_read(&tsk->fs_excl)); + BUG_ON(in_interrupt()); if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) @@ -1029,15 +1034,18 @@ fastcall NORET_TYPE void do_exit(long co if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); - preempt_disable(); +again: + local_irq_disable(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; - schedule(); - BUG(); - /* Avoid "noreturn function does return". */ - for (;;) - cpu_relax(); /* For when BUG is null */ + __schedule(); + printk(KERN_ERR "BUG: dead task %s:%d back from the grave!\n", + current->comm, current->pid); + printk(KERN_ERR ".... flags: %08x, count: %d, state: %08lx\n", + current->flags, atomic_read(¤t->usage), current->state); + printk(KERN_ERR ".... trying again ...\n"); + goto again; } EXPORT_SYMBOL_GPL(do_exit); @@ -1544,6 +1552,7 @@ repeat: list_for_each(_p,&tsk->children) { p = list_entry(_p, struct task_struct, sibling); + BUG_ON(!atomic_read(&p->usage)); ret = eligible_child(pid, options, p); if (!ret) continue; Index: linux-rt-rebase.q/kernel/fork.c =================================================================== --- linux-rt-rebase.q.orig/kernel/fork.c +++ linux-rt-rebase.q/kernel/fork.c @@ -125,10 +125,13 @@ void free_task(struct task_struct *tsk) } EXPORT_SYMBOL(free_task); -void __put_task_struct(struct task_struct *tsk) +#ifdef CONFIG_PREEMPT_RT +void __put_task_struct_cb(struct rcu_head *rhp) { + struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); + + BUG_ON(atomic_read(&tsk->usage)); WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); - WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); security_task_free(tsk); @@ -140,6 +143,23 @@ void __put_task_struct(struct task_struc free_task(tsk); } +#else + +void __put_task_struct(struct task_struct *tsk) +{ + WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); + BUG_ON(atomic_read(&tsk->usage)); + WARN_ON(tsk == current); + + security_task_free(tsk); + free_uid(tsk->user); + put_group_info(tsk->group_info); + + if (!profile_handoff_task(tsk)) + free_task(tsk); +} +#endif + void __init fork_init(unsigned long mempages) { int i; @@ -1224,10 +1244,12 @@ static struct task_struct *copy_process( * to ensure it is on a valid CPU (and if not, just force it back to * parent's CPU). This avoids alot of nasty races. */ + preempt_disable(); p->cpus_allowed = current->cpus_allowed; if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || !cpu_online(task_cpu(p)))) set_task_cpu(p, smp_processor_id()); + preempt_enable(); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) Index: linux-rt-rebase.q/kernel/futex.c =================================================================== --- linux-rt-rebase.q.orig/kernel/futex.c +++ linux-rt-rebase.q/kernel/futex.c @@ -945,7 +945,7 @@ static int futex_requeue(u32 __user *uad plist_del(&this->list, &hb1->chain); plist_add(&this->list, &hb2->chain); this->lock_ptr = &hb2->lock; -#ifdef CONFIG_DEBUG_PI_LIST +#if defined(CONFIG_DEBUG_PI_LIST) && !defined(CONFIG_PREEMPT_RT) this->list.plist.lock = &hb2->lock; #endif } @@ -1006,7 +1006,7 @@ static inline void __queue_me(struct fut prio = min(current->normal_prio, MAX_RT_PRIO); plist_node_init(&q->list, prio); -#ifdef CONFIG_DEBUG_PI_LIST +#if defined(CONFIG_DEBUG_PI_LIST) && !defined(CONFIG_PREEMPT_RT) q->list.plist.lock = &hb->lock; #endif plist_add(&q->list, &hb->chain); @@ -1244,6 +1244,10 @@ static int futex_wait(u32 __user *uaddr, * q.lock_ptr != 0 is not safe, because of ordering against wakeup. */ if (likely(!plist_node_empty(&q.list))) { + unsigned long nosched_flag = current->flags & PF_NOSCHED; + + current->flags &= ~PF_NOSCHED; + if (!abs_time) schedule(); else { @@ -1266,6 +1270,8 @@ static int futex_wait(u32 __user *uaddr, /* Flag if a timeout occured */ rem = (t.task == NULL); } + + current->flags |= nosched_flag; } __set_current_state(TASK_RUNNING); Index: linux-rt-rebase.q/kernel/power/swsusp.c =================================================================== --- linux-rt-rebase.q.orig/kernel/power/swsusp.c +++ linux-rt-rebase.q/kernel/power/swsusp.c @@ -294,6 +294,7 @@ int swsusp_suspend(void) printk(KERN_ERR "Error %d suspending\n", error); /* Restore control flow magically appears here */ restore_processor_state(); + touch_softlockup_watchdog(); /* NOTE: device_power_up() is just a resume() for devices * that suspended with irqs off ... no overall powerup. */ Index: linux-rt-rebase.q/kernel/signal.c =================================================================== --- linux-rt-rebase.q.orig/kernel/signal.c +++ linux-rt-rebase.q/kernel/signal.c @@ -765,8 +765,10 @@ specific_send_sig_info(int sig, struct s { int ret = 0; - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); +#ifdef CONFIG_SMP assert_spin_locked(&t->sighand->siglock); +#endif /* Short-circuit ignored signals. */ if (sig_ignored(t, sig)) @@ -1608,6 +1610,7 @@ static void ptrace_stop(int exit_code, i if (may_ptrace_stop()) { do_notify_parent_cldstop(current, CLD_TRAPPED); read_unlock(&tasklist_lock); + current->flags &= ~PF_NOSCHED; schedule(); } else { /* @@ -1668,6 +1671,7 @@ finish_stop(int stop_count) } do { + current->flags &= ~PF_NOSCHED; schedule(); } while (try_to_freeze()); /* @@ -1779,6 +1783,9 @@ int get_signal_to_deliver(siginfo_t *inf try_to_freeze(); +#ifdef CONFIG_PREEMPT_RT + might_sleep(); +#endif relock: spin_lock_irq(¤t->sighand->siglock); for (;;) { Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -120,6 +121,8 @@ static void trigger_softirqs(void) } } +#ifndef CONFIG_PREEMPT_RT + /* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: @@ -237,6 +240,8 @@ void local_bh_enable_ip(unsigned long ip } EXPORT_SYMBOL(local_bh_enable_ip); +#endif + /* * We restart softirq processing MAX_SOFTIRQ_RESTART times, * and we fall back to softirqd after that. @@ -650,7 +655,7 @@ void tasklet_kill(struct tasklet_struct while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do - yield(); + msleep(1); while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); @@ -913,6 +918,11 @@ int softirq_preemption = 1; EXPORT_SYMBOL(softirq_preemption); +/* + * Real-Time Preemption depends on softirq threading: + */ +#ifndef CONFIG_PREEMPT_RT + static int __init softirq_preempt_setup (char *str) { if (!strncmp(str, "off", 3)) @@ -926,7 +936,7 @@ static int __init softirq_preempt_setup } __setup("softirq-preempt=", softirq_preempt_setup); - +#endif #endif #ifdef CONFIG_SMP Index: linux-rt-rebase.q/kernel/stop_machine.c =================================================================== --- linux-rt-rebase.q.orig/kernel/stop_machine.c +++ linux-rt-rebase.q/kernel/stop_machine.c @@ -63,7 +63,7 @@ static int stopmachine(void *cpu) /* Yield in first stage: migration threads need to * help our sisters onto their CPUs. */ if (!prepared && !irqs_disabled) - yield(); + __yield(); else cpu_relax(); } @@ -109,7 +109,7 @@ static int stop_machine(void) /* Wait for them all to come to life. */ while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) - yield(); + __yield(); /* If some failed, kill them all. */ if (ret < 0) { Index: linux-rt-rebase.q/kernel/sys.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sys.c +++ linux-rt-rebase.q/kernel/sys.c @@ -35,6 +35,7 @@ #include #include +#include #include #include @@ -158,9 +159,9 @@ static int notifier_chain_unregister(str * last notifier function called. */ -static int __kprobes notifier_call_chain(struct notifier_block **nl, - unsigned long val, void *v, - int nr_to_call, int *nr_calls) +static int __kprobes notrace notifier_call_chain(struct notifier_block **nl, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; struct notifier_block *nb, *next_nb; @@ -496,7 +497,7 @@ int srcu_notifier_chain_register(struct * not yet working and interrupts must remain disabled. At * such times we must not call mutex_lock(). */ - if (unlikely(system_state == SYSTEM_BOOTING)) + if (unlikely(system_state < SYSTEM_RUNNING)) return notifier_chain_register(&nh->head, n); mutex_lock(&nh->mutex); Index: linux-rt-rebase.q/kernel/user.c =================================================================== --- linux-rt-rebase.q.orig/kernel/user.c +++ linux-rt-rebase.q/kernel/user.c @@ -108,15 +108,16 @@ void free_uid(struct user_struct *up) if (!up) return; - local_irq_save(flags); + local_irq_save_nort(flags); if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { uid_hash_remove(up); - spin_unlock_irqrestore(&uidhash_lock, flags); + spin_unlock(&uidhash_lock); + local_irq_restore_nort(flags); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); } else { - local_irq_restore(flags); + local_irq_restore_nort(flags); } } Index: linux-rt-rebase.q/kernel/workqueue.c =================================================================== --- linux-rt-rebase.q.orig/kernel/workqueue.c +++ linux-rt-rebase.q/kernel/workqueue.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,8 @@ #include #include +#include + /* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). @@ -157,15 +160,16 @@ static void __queue_work(struct cpu_work * * We queue the work to the CPU it was submitted, but there is no * guarantee that it will be processed by that CPU. + * + * Especially no such guarantee on PREEMPT_RT. */ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) { - int ret = 0; + int ret = 0, cpu = raw_smp_processor_id(); if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { BUG_ON(!list_empty(&work->entry)); - __queue_work(wq_per_cpu(wq, get_cpu()), work); - put_cpu(); + __queue_work(wq_per_cpu(wq, cpu), work); ret = 1; } return ret; @@ -767,6 +771,47 @@ static void cleanup_workqueue_thread(str cwq->thread = NULL; } +void set_workqueue_thread_prio(struct workqueue_struct *wq, int cpu, + int policy, int rt_priority, int nice) +{ + struct sched_param param = { .sched_priority = rt_priority }; + struct cpu_workqueue_struct *cwq; + mm_segment_t oldfs = get_fs(); + struct task_struct *p; + unsigned long flags; + int ret; + + cwq = per_cpu_ptr(wq->cpu_wq, cpu); + spin_lock_irqsave(&cwq->lock, flags); + p = cwq->thread; + spin_unlock_irqrestore(&cwq->lock, flags); + + set_user_nice(p, nice); + + set_fs(KERNEL_DS); + ret = sys_sched_setscheduler(p->pid, policy, ¶m); + set_fs(oldfs); + + WARN_ON(ret); +} + + void set_workqueue_prio(struct workqueue_struct *wq, int policy, + int rt_priority, int nice) +{ + int cpu; + + /* We don't need the distraction of CPUs appearing and vanishing. */ + mutex_lock(&workqueue_mutex); + if (is_single_threaded(wq)) + set_workqueue_thread_prio(wq, 0, policy, rt_priority, nice); + else { + for_each_online_cpu(cpu) + set_workqueue_thread_prio(wq, cpu, policy, + rt_priority, nice); + } + mutex_unlock(&workqueue_mutex); +} + /** * destroy_workqueue - safely terminate a workqueue * @wq: target workqueue @@ -849,4 +894,5 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); + set_workqueue_prio(keventd_wq, SCHED_FIFO, 1, -20); } Index: linux-rt-rebase.q/lib/Kconfig.debug =================================================================== --- linux-rt-rebase.q.orig/lib/Kconfig.debug +++ linux-rt-rebase.q/lib/Kconfig.debug @@ -181,6 +181,8 @@ config DEBUG_RT_MUTEXES help This allows rt mutex semantics violations and rt mutex related deadlocks (lockups) to be detected and reported automatically. + When realtime preemption is enabled this includes spinlocks, + rwlocks, mutexes and (rw)semaphores config DEBUG_PI_LIST bool @@ -204,7 +206,7 @@ config DEBUG_SPINLOCK config DEBUG_MUTEXES bool "Mutex debugging: basic checks" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !PREEMPT_RT help This feature allows mutex semantics violations to be detected and reported. Index: linux-rt-rebase.q/lib/Makefile =================================================================== --- linux-rt-rebase.q.orig/lib/Makefile +++ linux-rt-rebase.q/lib/Makefile @@ -24,7 +24,8 @@ obj-$(CONFIG_GENERIC_IOMAP) += iomap.o obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o -lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o +obj-$(CONFIG_PREEMPT_RT) += plist.o +obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o Index: linux-rt-rebase.q/lib/kernel_lock.c =================================================================== --- linux-rt-rebase.q.orig/lib/kernel_lock.c +++ linux-rt-rebase.q/lib/kernel_lock.c @@ -35,22 +35,25 @@ DECLARE_MUTEX(kernel_sem); * about recursion, both due to the down() and due to the enabling of * preemption. schedule() will re-check the preemption flag after * reacquiring the semaphore. + * + * Called with interrupts disabled. */ int __lockfunc __reacquire_kernel_lock(void) { struct task_struct *task = current; int saved_lock_depth = task->lock_depth; + local_irq_enable(); BUG_ON(saved_lock_depth < 0); task->lock_depth = -1; - __preempt_enable_no_resched(); down(&kernel_sem); - preempt_disable(); task->lock_depth = saved_lock_depth; + local_irq_disable(); + return 0; } @@ -67,11 +70,15 @@ void __lockfunc lock_kernel(void) struct task_struct *task = current; int depth = task->lock_depth + 1; - if (likely(!depth)) + if (likely(!depth)) { /* * No recursion worries - we set up lock_depth _after_ */ down(&kernel_sem); +#ifdef CONFIG_DEBUG_RT_MUTEXES + current->last_kernel_lock = __builtin_return_address(0); +#endif + } task->lock_depth = depth; } @@ -82,8 +89,12 @@ void __lockfunc unlock_kernel(void) BUG_ON(task->lock_depth < 0); - if (likely(--task->lock_depth < 0)) + if (likely(--task->lock_depth == -1)) { +#ifdef CONFIG_DEBUG_RT_MUTEXES + current->last_kernel_lock = NULL; +#endif up(&kernel_sem); + } } #else @@ -116,11 +127,9 @@ static __cacheline_aligned_in_smp DEFIN */ int __lockfunc __reacquire_kernel_lock(void) { - while (!_raw_spin_trylock(&kernel_flag)) { - if (test_thread_flag(TIF_NEED_RESCHED)) - return -EAGAIN; - cpu_relax(); - } + local_irq_enable(); + _raw_spin_lock(&kernel_flag); + local_irq_disable(); preempt_disable(); return 0; } Index: linux-rt-rebase.q/lib/locking-selftest.c =================================================================== --- linux-rt-rebase.q.orig/lib/locking-selftest.c +++ linux-rt-rebase.q/lib/locking-selftest.c @@ -158,7 +158,7 @@ static void init_shared_classes(void) local_bh_disable(); \ local_irq_disable(); \ trace_softirq_enter(); \ - WARN_ON(!in_softirq()); + /* FIXME: preemptible softirqs. WARN_ON(!in_softirq()); */ #define SOFTIRQ_EXIT() \ trace_softirq_exit(); \ @@ -550,6 +550,11 @@ GENERATE_TESTCASE(init_held_rsem) #undef E /* + * FIXME: turns these into raw-spinlock tests on -rt + */ +#ifndef CONFIG_PREEMPT_RT + +/* * locking an irq-safe lock with irqs enabled: */ #define E1() \ @@ -890,6 +895,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_ #include "locking-selftest-softirq.h" // GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft) +#endif /* !CONFIG_PREEMPT_RT */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) # define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) @@ -1004,7 +1011,7 @@ static inline void print_testname(const #define DO_TESTCASE_1(desc, name, nr) \ print_testname(desc"/"#nr); \ - dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ + dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); #define DO_TESTCASE_1B(desc, name, nr) \ @@ -1012,17 +1019,17 @@ static inline void print_testname(const dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \ printk("\n"); -#define DO_TESTCASE_3(desc, name, nr) \ - print_testname(desc"/"#nr); \ - dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ - dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ +#define DO_TESTCASE_3(desc, name, nr) \ + print_testname(desc"/"#nr); \ + dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ + dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); -#define DO_TESTCASE_3RW(desc, name, nr) \ - print_testname(desc"/"#nr); \ +#define DO_TESTCASE_3RW(desc, name, nr) \ + print_testname(desc"/"#nr); \ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ - dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ + dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); @@ -1053,7 +1060,7 @@ static inline void print_testname(const print_testname(desc); \ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \ - dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \ + dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ @@ -1185,6 +1192,7 @@ void locking_selftest(void) /* * irq-context testcases: */ +#ifndef CONFIG_PREEMPT_RT DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); @@ -1194,6 +1202,7 @@ void locking_selftest(void) DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); +#endif if (unexpected_testcase_failures) { printk("-----------------------------------------------------------------\n"); Index: linux-rt-rebase.q/lib/radix-tree.c =================================================================== --- linux-rt-rebase.q.orig/lib/radix-tree.c +++ linux-rt-rebase.q/lib/radix-tree.c @@ -97,12 +97,13 @@ radix_tree_node_alloc(struct radix_tree_ if (ret == NULL && !(gfp_mask & __GFP_WAIT)) { struct radix_tree_preload *rtp; - rtp = &__get_cpu_var(radix_tree_preloads); + rtp = &get_cpu_var(radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; rtp->nodes[rtp->nr - 1] = NULL; rtp->nr--; } + put_cpu_var(radix_tree_preloads); } BUG_ON(radix_tree_is_direct_ptr(ret)); return ret; @@ -121,6 +122,8 @@ radix_tree_node_free(struct radix_tree_n call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } +#ifndef CONFIG_PREEMPT_RT + /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On @@ -153,6 +156,8 @@ out: } EXPORT_SYMBOL(radix_tree_preload); +#endif + static inline void tag_set(struct radix_tree_node *node, unsigned int tag, int offset) { Index: linux-rt-rebase.q/lib/smp_processor_id.c =================================================================== --- linux-rt-rebase.q.orig/lib/smp_processor_id.c +++ linux-rt-rebase.q/lib/smp_processor_id.c @@ -7,7 +7,7 @@ #include #include -unsigned int debug_smp_processor_id(void) +unsigned int notrace debug_smp_processor_id(void) { unsigned long preempt_count = preempt_count(); int this_cpu = raw_smp_processor_id(); @@ -42,7 +42,7 @@ unsigned int debug_smp_processor_id(void if (!printk_ratelimit()) goto out_enable; - printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid); + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count()-1, current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); patches/kmap-atomic-i386-fix.patch0000664000077200007720000000275410655544576016250 0ustar mingomingo--- arch/i386/mm/highmem.c | 2 +- include/asm-i386/highmem.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/arch/i386/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mm/highmem.c +++ linux-rt-rebase.q/arch/i386/mm/highmem.c @@ -3,9 +3,9 @@ void *kmap(struct page *page) { - might_sleep(); if (!PageHighMem(page)) return page_address(page); + might_sleep(); return kmap_high(page); } Index: linux-rt-rebase.q/include/asm-i386/highmem.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-i386/highmem.h +++ linux-rt-rebase.q/include/asm-i386/highmem.h @@ -94,10 +94,10 @@ struct page *kmap_atomic_to_page(void *p * on PREEMPT_RT kmap_atomic() is a wrapper that uses kmap(): */ #ifdef CONFIG_PREEMPT_RT -# define kmap_atomic_prot(page, type, prot) kmap(page) -# define kmap_atomic(page, type) kmap(page) +# define kmap_atomic_prot(page, type, prot) ({ pagefault_disable(); kmap(page); }) +# define kmap_atomic(page, type) ({ pagefault_disable(); kmap(page); }) # define kmap_atomic_pfn(pfn, type) kmap(pfn_to_page(pfn)) -# define kunmap_atomic(kvaddr, type) kunmap_virt(kvaddr) +# define kunmap_atomic(kvaddr, type) do { pagefault_enable(); kunmap_virt(kvaddr); } while(0) # define kmap_atomic_to_page(kvaddr) kmap_to_page(kvaddr) #else # define kmap_atomic_prot(page, type, prot) __kmap_atomic_prot(page, type, prot) patches/preempt-realtime-ide.patch0000664000077200007720000002355210655544575016604 0ustar mingomingo--- drivers/ide/ide-floppy.c | 4 ++-- drivers/ide/ide-io.c | 4 ++-- drivers/ide/ide-iops.c | 26 ++++++++++++-------------- drivers/ide/ide-lib.c | 14 +++++--------- drivers/ide/ide-probe.c | 8 ++++---- drivers/ide/ide-taskfile.c | 6 +++--- drivers/ide/pci/alim15x3.c | 12 ++++++------ drivers/ide/pci/hpt366.c | 4 ++-- 8 files changed, 36 insertions(+), 42 deletions(-) Index: linux-rt-rebase.q/drivers/ide/ide-floppy.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/ide-floppy.c +++ linux-rt-rebase.q/drivers/ide/ide-floppy.c @@ -1672,9 +1672,9 @@ static int idefloppy_get_format_progress atapi_status_t status; unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); status.all = HWIF(drive)->INB(IDE_STATUS_REG); - local_irq_restore(flags); + local_irq_restore_nort(flags); progress_indication = !status.b.dsc ? 0 : 0x10000; } Index: linux-rt-rebase.q/drivers/ide/ide-io.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/ide-io.c +++ linux-rt-rebase.q/drivers/ide/ide-io.c @@ -1199,7 +1199,7 @@ static void ide_do_request (ide_hwgroup_ ide_get_lock(ide_intr, hwgroup); /* caller must own ide_lock */ - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); while (!hwgroup->busy) { hwgroup->busy = 1; @@ -1467,7 +1467,7 @@ void ide_timer_expiry (unsigned long dat #endif /* DISABLE_IRQ_NOSYNC */ /* local CPU only, * as if we were handling an interrupt */ - local_irq_disable(); + local_irq_disable_nort(); if (hwgroup->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { Index: linux-rt-rebase.q/drivers/ide/ide-iops.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/ide-iops.c +++ linux-rt-rebase.q/drivers/ide/ide-iops.c @@ -220,10 +220,10 @@ static void ata_input_data(ide_drive_t * if (io_32bit) { if (io_32bit & 2) { unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(drive, IDE_NSECTOR_REG); hwif->INSL(IDE_DATA_REG, buffer, wcount); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else hwif->INSL(IDE_DATA_REG, buffer, wcount); } else { @@ -242,10 +242,10 @@ static void ata_output_data(ide_drive_t if (io_32bit) { if (io_32bit & 2) { unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(drive, IDE_NSECTOR_REG); hwif->OUTSL(IDE_DATA_REG, buffer, wcount); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else hwif->OUTSL(IDE_DATA_REG, buffer, wcount); } else { @@ -540,12 +540,12 @@ int ide_wait_stat (ide_startstop_t *star if (!(stat & BUSY_STAT)) break; - local_irq_restore(flags); + local_irq_restore_nort(flags); *startstop = ide_error(drive, "status timeout", stat); return 1; } } - local_irq_restore(flags); + local_irq_restore_nort(flags); } /* * Allow status to settle, then read it again. @@ -720,17 +720,15 @@ int ide_driveid_update (ide_drive_t *dri printk("%s: CHECK for good STATUS\n", drive->name); return 0; } - local_irq_save(flags); - SELECT_MASK(drive, 0); id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC); - if (!id) { - local_irq_restore(flags); + if (!id) return 0; - } + local_irq_save_nort(flags); + SELECT_MASK(drive, 0); ata_input_data(drive, id, SECTOR_WORDS); (void) hwif->INB(IDE_STATUS_REG); /* clear drive IRQ */ - local_irq_enable(); - local_irq_restore(flags); + local_irq_enable_nort(); + local_irq_restore_nort(flags); ide_fix_driveid(id); if (id) { drive->id->dma_ultra = id->dma_ultra; @@ -810,7 +808,7 @@ int ide_config_drive_speed (ide_drive_t if (time_after(jiffies, timeout)) break; } - local_irq_restore(flags); + local_irq_restore_nort(flags); } /* Index: linux-rt-rebase.q/drivers/ide/ide-lib.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/ide-lib.c +++ linux-rt-rebase.q/drivers/ide/ide-lib.c @@ -388,15 +388,16 @@ int ide_set_xfer_rate(ide_drive_t *drive static void ide_dump_opcode(ide_drive_t *drive) { + unsigned long flags; struct request *rq; u8 opcode = 0; int found = 0; - spin_lock(&ide_lock); + spin_lock_irqsave(&ide_lock, flags); rq = NULL; if (HWGROUP(drive)) rq = HWGROUP(drive)->rq; - spin_unlock(&ide_lock); + spin_unlock_irqrestore(&ide_lock, flags); if (!rq) return; if (rq->cmd_type == REQ_TYPE_ATA_CMD || @@ -425,10 +426,8 @@ static void ide_dump_opcode(ide_drive_t static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat) { ide_hwif_t *hwif = HWIF(drive); - unsigned long flags; u8 err = 0; - local_irq_save(flags); printk("%s: %s: status=0x%02x { ", drive->name, msg, stat); if (stat & BUSY_STAT) printk("Busy "); @@ -488,7 +487,7 @@ static u8 ide_dump_ata_status(ide_drive_ printk("\n"); } ide_dump_opcode(drive); - local_irq_restore(flags); + return err; } @@ -503,14 +502,11 @@ static u8 ide_dump_ata_status(ide_drive_ static u8 ide_dump_atapi_status(ide_drive_t *drive, const char *msg, u8 stat) { - unsigned long flags; - atapi_status_t status; atapi_error_t error; status.all = stat; error.all = 0; - local_irq_save(flags); printk("%s: %s: status=0x%02x { ", drive->name, msg, stat); if (status.b.bsy) printk("Busy "); @@ -536,7 +532,7 @@ static u8 ide_dump_atapi_status(ide_driv printk("}\n"); } ide_dump_opcode(drive); - local_irq_restore(flags); + return error.all; } Index: linux-rt-rebase.q/drivers/ide/ide-probe.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/ide-probe.c +++ linux-rt-rebase.q/drivers/ide/ide-probe.c @@ -141,7 +141,7 @@ static inline void do_identify (ide_driv hwif->ata_input_data(drive, id, SECTOR_WORDS); drive->id_read = 1; - local_irq_enable(); + local_irq_enable_nort(); ide_fix_driveid(id); #if defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA) @@ -323,14 +323,14 @@ static int actual_try_to_identify (ide_d unsigned long flags; /* local CPU only; some systems need this */ - local_irq_save(flags); + local_irq_save_nort(flags); /* drive returned ID */ do_identify(drive, cmd); /* drive responded with ID */ rc = 0; /* clear drive IRQ */ (void) hwif->INB(IDE_STATUS_REG); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else { /* drive refused ID */ rc = 2; @@ -807,7 +807,7 @@ static void probe_hwif(ide_hwif_t *hwif, } while ((stat & BUSY_STAT) && time_after(timeout, jiffies)); } - local_irq_restore(flags); + local_irq_restore_nort(flags); /* * Use cached IRQ number. It might be (and is...) changed by probe * code above Index: linux-rt-rebase.q/drivers/ide/ide-taskfile.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/ide-taskfile.c +++ linux-rt-rebase.q/drivers/ide/ide-taskfile.c @@ -278,7 +278,7 @@ static void ide_pio_sector(ide_drive_t * offset %= PAGE_SIZE; #ifdef CONFIG_HIGHMEM - local_irq_save(flags); + local_irq_save_nort(flags); #endif buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; @@ -298,7 +298,7 @@ static void ide_pio_sector(ide_drive_t * kunmap_atomic(buf, KM_BIO_SRC_IRQ); #ifdef CONFIG_HIGHMEM - local_irq_restore(flags); + local_irq_restore_nort(flags); #endif } @@ -464,7 +464,7 @@ ide_startstop_t pre_task_out_intr (ide_d } if (!drive->unmask) - local_irq_disable(); + local_irq_disable_nort(); ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL); ide_pio_datablock(drive, rq, 1); Index: linux-rt-rebase.q/drivers/ide/pci/alim15x3.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/pci/alim15x3.c +++ linux-rt-rebase.q/drivers/ide/pci/alim15x3.c @@ -326,7 +326,7 @@ static u8 ali15x3_tune_pio (ide_drive_t if (r_clc >= 16) r_clc = 0; } - local_irq_save(flags); + local_irq_save_nort(flags); /* * PIO mode => ATA FIFO on, ATAPI FIFO off @@ -348,7 +348,7 @@ static u8 ali15x3_tune_pio (ide_drive_t pci_write_config_byte(dev, port, s_clc); pci_write_config_byte(dev, port+drive->select.b.unit+2, (a_clc << 4) | r_clc); - local_irq_restore(flags); + local_irq_restore_nort(flags); /* * setup active rec @@ -519,7 +519,7 @@ static unsigned int __devinit init_chips } #endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ - local_irq_save(flags); + local_irq_save_nort(flags); if (m5229_revision < 0xC2) { /* @@ -580,7 +580,7 @@ static unsigned int __devinit init_chips out: pci_dev_put(north); pci_dev_put(isa_dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); return 0; } @@ -630,7 +630,7 @@ static u8 __devinit ata66_ali15x3(ide_hw unsigned long flags; u8 cbl = ATA_CBL_PATA40, tmpbyte; - local_irq_save(flags); + local_irq_save_nort(flags); if (m5229_revision >= 0xC2) { /* @@ -679,7 +679,7 @@ static u8 __devinit ata66_ali15x3(ide_hw pci_write_config_byte(dev, 0x53, tmpbyte); - local_irq_restore(flags); + local_irq_restore_nort(flags); return cbl; } Index: linux-rt-rebase.q/drivers/ide/pci/hpt366.c =================================================================== --- linux-rt-rebase.q.orig/drivers/ide/pci/hpt366.c +++ linux-rt-rebase.q/drivers/ide/pci/hpt366.c @@ -1351,7 +1351,7 @@ static void __devinit init_dma_hpt366(id dma_old = hwif->INB(dmabase + 2); - local_irq_save(flags); + local_irq_save_nort(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); @@ -1362,7 +1362,7 @@ static void __devinit init_dma_hpt366(id if (dma_new != dma_old) hwif->OUTB(dma_new, dmabase + 2); - local_irq_restore(flags); + local_irq_restore_nort(flags); ide_setup_dma(hwif, dmabase, 8); } patches/preempt-realtime-net.patch0000664000077200007720000004652410655544575016635 0ustar mingomingo--- include/linux/netdevice.h | 8 ++--- include/net/dn_dev.h | 6 ++-- include/net/sock.h | 4 +- net/core/dev.c | 49 +++++++++++++++++++++++++++----- net/core/netpoll.c | 60 ++++++++++++++++++++++++++-------------- net/core/sock.c | 2 - net/decnet/dn_dev.c | 44 ++++++++++++++--------------- net/ipv4/icmp.c | 5 ++- net/ipv4/route.c | 4 +- net/ipv6/netfilter/ip6_tables.c | 4 +- net/sched/sch_generic.c | 29 +++++++++++++++---- net/unix/af_unix.c | 3 +- 12 files changed, 145 insertions(+), 73 deletions(-) Index: linux-rt-rebase.q/include/linux/netdevice.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/netdevice.h +++ linux-rt-rebase.q/include/linux/netdevice.h @@ -993,7 +993,7 @@ static inline int netif_rx_reschedule(st local_irq_save(flags); list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + raise_softirq_irqoff(NET_RX_SOFTIRQ); local_irq_restore(flags); return 1; } @@ -1041,20 +1041,20 @@ static inline void netif_poll_enable(str static inline void netif_tx_lock(struct net_device *dev) { spin_lock(&dev->_xmit_lock); - dev->xmit_lock_owner = smp_processor_id(); + dev->xmit_lock_owner = raw_smp_processor_id(); } static inline void netif_tx_lock_bh(struct net_device *dev) { spin_lock_bh(&dev->_xmit_lock); - dev->xmit_lock_owner = smp_processor_id(); + dev->xmit_lock_owner = raw_smp_processor_id(); } static inline int netif_tx_trylock(struct net_device *dev) { int ok = spin_trylock(&dev->_xmit_lock); if (likely(ok)) - dev->xmit_lock_owner = smp_processor_id(); + dev->xmit_lock_owner = raw_smp_processor_id(); return ok; } Index: linux-rt-rebase.q/include/net/dn_dev.h =================================================================== --- linux-rt-rebase.q.orig/include/net/dn_dev.h +++ linux-rt-rebase.q/include/net/dn_dev.h @@ -76,9 +76,9 @@ struct dn_dev_parms { int priority; /* Priority to be a router */ char *name; /* Name for sysctl */ int ctl_name; /* Index for sysctl */ - int (*up)(struct net_device *); - void (*down)(struct net_device *); - void (*timer3)(struct net_device *, struct dn_ifaddr *ifa); + int (*dn_up)(struct net_device *); + void (*dn_down)(struct net_device *); + void (*dn_timer3)(struct net_device *, struct dn_ifaddr *ifa); void *sysctl; }; Index: linux-rt-rebase.q/include/net/sock.h =================================================================== --- linux-rt-rebase.q.orig/include/net/sock.h +++ linux-rt-rebase.q/include/net/sock.h @@ -625,12 +625,12 @@ static inline void sk_refcnt_debug_relea /* Called with local bh disabled */ static __inline__ void sock_prot_inc_use(struct proto *prot) { - prot->stats[smp_processor_id()].inuse++; + prot->stats[raw_smp_processor_id()].inuse++; } static __inline__ void sock_prot_dec_use(struct proto *prot) { - prot->stats[smp_processor_id()].inuse--; + prot->stats[raw_smp_processor_id()].inuse--; } /* With per-bucket locks this operation is not-atomic, so that Index: linux-rt-rebase.q/net/core/dev.c =================================================================== --- linux-rt-rebase.q.orig/net/core/dev.c +++ linux-rt-rebase.q/net/core/dev.c @@ -1501,7 +1501,7 @@ out_kfree_skb: return 0; } -#define HARD_TX_LOCK(dev, cpu) { \ +#define HARD_TX_LOCK(dev) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ netif_tx_lock(dev); \ } \ @@ -1633,11 +1633,17 @@ gso: Either shot noqueue qdisc, it is even simpler 8) */ if (dev->flags & IFF_UP) { - int cpu = smp_processor_id(); /* ok because BHs are off */ + /* + * No need to check for recursion with threaded interrupts: + */ +#ifdef CONFIG_PREEMPT_RT + if (1) { +#else + int cpu = raw_smp_processor_id(); /* ok because BHs are off */ if (dev->xmit_lock_owner != cpu) { - - HARD_TX_LOCK(dev, cpu); +#endif + HARD_TX_LOCK(dev); if (!netif_queue_stopped(dev) && !netif_subqueue_stopped(dev, skb->queue_mapping)) { @@ -1773,7 +1779,8 @@ static inline struct net_device *skb_bon static void net_tx_action(struct softirq_action *h) { - struct softnet_data *sd = &__get_cpu_var(softnet_data); + struct softnet_data *sd = &per_cpu(softnet_data, + raw_smp_processor_id()); if (sd->completion_queue) { struct sk_buff *clist; @@ -1789,6 +1796,11 @@ static void net_tx_action(struct softirq BUG_TRAP(!atomic_read(&skb->users)); __kfree_skb(skb); + /* + * Safe to reschedule - the list is private + * at this point. + */ + cond_resched_softirq_context(); } } @@ -1807,12 +1819,27 @@ static void net_tx_action(struct softirq smp_mb__before_clear_bit(); clear_bit(__LINK_STATE_SCHED, &dev->state); + /* + * We are executing in softirq context here, and + * if softirqs are preemptible, we must avoid + * infinite reactivation of the softirq by + * either the tx handler, or by netif_schedule(). + * (it would result in an infinitely looping + * softirq context) + * So we take the spinlock unconditionally. + */ +#ifdef CONFIG_PREEMPT_SOFTIRQS + spin_lock(&dev->queue_lock); + qdisc_run(dev); + spin_unlock(&dev->queue_lock); +#else if (spin_trylock(&dev->queue_lock)) { qdisc_run(dev); spin_unlock(&dev->queue_lock); } else { netif_schedule(dev); } +#endif } } } @@ -1941,7 +1968,7 @@ int netif_receive_skb(struct sk_buff *sk if (!orig_dev) return NET_RX_DROP; - __get_cpu_var(netdev_rx_stat).total++; + per_cpu(netdev_rx_stat, raw_smp_processor_id()).total++; skb_reset_network_header(skb); skb_reset_transport_header(skb); @@ -2021,9 +2048,10 @@ static int process_backlog(struct net_de { int work = 0; int quota = min(backlog_dev->quota, *budget); - struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct softnet_data *queue; unsigned long start_time = jiffies; + queue = &per_cpu(softnet_data, raw_smp_processor_id()); backlog_dev->weight = weight_p; for (;;) { struct sk_buff *skb; @@ -2066,12 +2094,13 @@ job_done: static void net_rx_action(struct softirq_action *h) { - struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct softnet_data *queue; unsigned long start_time = jiffies; int budget = netdev_budget; void *have; local_irq_disable(); + queue = &__get_cpu_var(softnet_data); while (!list_empty(&queue->poll_list)) { struct net_device *dev; @@ -2080,6 +2109,10 @@ static void net_rx_action(struct softirq goto softnet_break; local_irq_enable(); + if (unlikely(cond_resched_softirq_context())) { + local_irq_disable(); + continue; + } dev = list_entry(queue->poll_list.next, struct net_device, poll_list); Index: linux-rt-rebase.q/net/core/netpoll.c =================================================================== --- linux-rt-rebase.q.orig/net/core/netpoll.c +++ linux-rt-rebase.q/net/core/netpoll.c @@ -64,20 +64,20 @@ static void queue_process(struct work_st continue; } - local_irq_save(flags); + local_irq_save_nort(flags); netif_tx_lock(dev); if ((netif_queue_stopped(dev) || netif_subqueue_stopped(dev, skb->queue_mapping)) || dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { skb_queue_head(&npinfo->txq, skb); netif_tx_unlock(dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); schedule_delayed_work(&npinfo->tx_work, HZ/10); return; } netif_tx_unlock(dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } } @@ -122,7 +122,7 @@ static void poll_napi(struct netpoll *np int budget = 16; if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && - npinfo->poll_owner != smp_processor_id() && + npinfo->poll_owner != raw_smp_processor_id() && spin_trylock(&npinfo->poll_lock)) { npinfo->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); @@ -156,7 +156,9 @@ void netpoll_poll(struct netpoll *np) return; /* Process pending work on NIC */ +// WARN_ON_RT(irqs_disabled()); np->dev->poll_controller(np->dev); +// WARN_ON_RT(irqs_disabled()); if (np->dev->poll) poll_napi(np); @@ -183,28 +185,31 @@ static void refill_skbs(void) static void zap_completion_queue(void) { - unsigned long flags; struct softnet_data *sd = &get_cpu_var(softnet_data); + struct sk_buff *clist = NULL; + unsigned long flags; if (sd->completion_queue) { - struct sk_buff *clist; - local_irq_save(flags); clist = sd->completion_queue; sd->completion_queue = NULL; local_irq_restore(flags); - - while (clist != NULL) { - struct sk_buff *skb = clist; - clist = clist->next; - if (skb->destructor) - dev_kfree_skb_any(skb); /* put this one back */ - else - __kfree_skb(skb); - } } + /* + * Took the list private, can drop our softnet + * reference: + */ put_cpu_var(softnet_data); + + while (clist != NULL) { + struct sk_buff *skb = clist; + clist = clist->next; + if (skb->destructor) + dev_kfree_skb_any(skb); /* put this one back */ + else + __kfree_skb(skb); + } } static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) @@ -212,13 +217,26 @@ static struct sk_buff *find_skb(struct n int count = 0; struct sk_buff *skb; +#ifdef CONFIG_PREEMPT_RT + /* + * On -rt skb_pool.lock is schedulable, so if we are + * in an atomic context we just try to dequeue from the + * pool and fail if we cannot get one. + */ + if (in_atomic() || irqs_disabled()) + goto pick_atomic; +#endif zap_completion_queue(); refill_skbs(); repeat: skb = alloc_skb(len, GFP_ATOMIC); - if (!skb) + if (!skb) { +#ifdef CONFIG_PREEMPT_RT +pick_atomic: +#endif skb = skb_dequeue(&skb_pool); + } if (!skb) { if (++count < 10) { @@ -247,10 +265,10 @@ static void netpoll_send_skb(struct netp /* don't get messages out of order, and no recursion */ if (skb_queue_len(&npinfo->txq) == 0 && - npinfo->poll_owner != smp_processor_id()) { + npinfo->poll_owner != raw_smp_processor_id()) { unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { @@ -270,7 +288,7 @@ static void netpoll_send_skb(struct netp udelay(USEC_PER_POLL); } - local_irq_restore(flags); + local_irq_restore_nort(flags); } if (status != NETDEV_TX_OK) { @@ -698,7 +716,7 @@ int netpoll_setup(struct netpoll *np) np->name); break; } - cond_resched(); + schedule_timeout_uninterruptible(1); } /* If carrier appears to come up instantly, we don't Index: linux-rt-rebase.q/net/core/sock.c =================================================================== --- linux-rt-rebase.q.orig/net/core/sock.c +++ linux-rt-rebase.q/net/core/sock.c @@ -1462,7 +1462,7 @@ static void sock_def_readable(struct soc { read_lock(&sk->sk_callback_lock); if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible(sk->sk_sleep); + wake_up_interruptible_sync(sk->sk_sleep); sk_wake_async(sk,1,POLL_IN); read_unlock(&sk->sk_callback_lock); } Index: linux-rt-rebase.q/net/decnet/dn_dev.c =================================================================== --- linux-rt-rebase.q.orig/net/decnet/dn_dev.c +++ linux-rt-rebase.q/net/decnet/dn_dev.c @@ -89,9 +89,9 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 10, .name = "ethernet", .ctl_name = NET_DECNET_CONF_ETHER, - .up = dn_eth_up, - .down = dn_eth_down, - .timer3 = dn_send_brd_hello, + .dn_up = dn_eth_up, + .dn_down = dn_eth_down, + .dn_timer3 = dn_send_brd_hello, }, { .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */ @@ -101,7 +101,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 10, .name = "ipgre", .ctl_name = NET_DECNET_CONF_GRE, - .timer3 = dn_send_brd_hello, + .dn_timer3 = dn_send_brd_hello, }, #if 0 { @@ -112,7 +112,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 120, .name = "x25", .ctl_name = NET_DECNET_CONF_X25, - .timer3 = dn_send_ptp_hello, + .dn_timer3 = dn_send_ptp_hello, }, #endif #if 0 @@ -124,7 +124,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 10, .name = "ppp", .ctl_name = NET_DECNET_CONF_PPP, - .timer3 = dn_send_brd_hello, + .dn_timer3 = dn_send_brd_hello, }, #endif { @@ -135,7 +135,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 120, .name = "ddcmp", .ctl_name = NET_DECNET_CONF_DDCMP, - .timer3 = dn_send_ptp_hello, + .dn_timer3 = dn_send_ptp_hello, }, { .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */ @@ -145,7 +145,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 10, .name = "loopback", .ctl_name = NET_DECNET_CONF_LOOPBACK, - .timer3 = dn_send_brd_hello, + .dn_timer3 = dn_send_brd_hello, } }; @@ -326,11 +326,11 @@ static int dn_forwarding_proc(ctl_table */ tmp = dn_db->parms.forwarding; dn_db->parms.forwarding = old; - if (dn_db->parms.down) - dn_db->parms.down(dev); + if (dn_db->parms.dn_down) + dn_db->parms.dn_down(dev); dn_db->parms.forwarding = tmp; - if (dn_db->parms.up) - dn_db->parms.up(dev); + if (dn_db->parms.dn_up) + dn_db->parms.dn_up(dev); } return err; @@ -364,11 +364,11 @@ static int dn_forwarding_sysctl(ctl_tabl if (value > 2) return -EINVAL; - if (dn_db->parms.down) - dn_db->parms.down(dev); + if (dn_db->parms.dn_down) + dn_db->parms.dn_down(dev); dn_db->parms.forwarding = value; - if (dn_db->parms.up) - dn_db->parms.up(dev); + if (dn_db->parms.dn_up) + dn_db->parms.dn_up(dev); } return 0; @@ -1087,10 +1087,10 @@ static void dn_dev_timer_func(unsigned l struct dn_ifaddr *ifa; if (dn_db->t3 <= dn_db->parms.t2) { - if (dn_db->parms.timer3) { + if (dn_db->parms.dn_timer3) { for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { if (!(ifa->ifa_flags & IFA_F_SECONDARY)) - dn_db->parms.timer3(dev, ifa); + dn_db->parms.dn_timer3(dev, ifa); } } dn_db->t3 = dn_db->parms.t3; @@ -1149,8 +1149,8 @@ struct dn_dev *dn_dev_create(struct net_ return NULL; } - if (dn_db->parms.up) { - if (dn_db->parms.up(dev) < 0) { + if (dn_db->parms.dn_up) { + if (dn_db->parms.dn_up(dev) < 0) { neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); dev->dn_ptr = NULL; kfree(dn_db); @@ -1244,8 +1244,8 @@ static void dn_dev_delete(struct net_dev dn_dev_check_default(dev); neigh_ifdown(&dn_neigh_table, dev); - if (dn_db->parms.down) - dn_db->parms.down(dev); + if (dn_db->parms.dn_down) + dn_db->parms.dn_down(dev); dev->dn_ptr = NULL; Index: linux-rt-rebase.q/net/ipv4/icmp.c =================================================================== --- linux-rt-rebase.q.orig/net/ipv4/icmp.c +++ linux-rt-rebase.q/net/ipv4/icmp.c @@ -230,7 +230,10 @@ static const struct icmp_control icmp_po * On SMP we have one ICMP socket per-cpu. */ static DEFINE_PER_CPU(struct socket *, __icmp_socket) = NULL; -#define icmp_socket __get_cpu_var(__icmp_socket) +/* + * Should be safe on PREEMPT_SOFTIRQS/HARDIRQS to use raw-smp-processor-id: + */ +#define icmp_socket per_cpu(__icmp_socket, raw_smp_processor_id()) static __inline__ int icmp_xmit_lock(void) { Index: linux-rt-rebase.q/net/ipv4/route.c =================================================================== --- linux-rt-rebase.q.orig/net/ipv4/route.c +++ linux-rt-rebase.q/net/ipv4/route.c @@ -205,13 +205,13 @@ struct rt_hash_bucket { struct rtable *chain; }; #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ - defined(CONFIG_PROVE_LOCKING) + defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_PREEMPT_RT) /* * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks * The size of this table is a power of two and depends on the number of CPUS. * (on lockdep we have a quite big spinlock_t, so keep the size down there) */ -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) # define RT_HASH_LOCK_SZ 256 #else # if NR_CPUS >= 32 Index: linux-rt-rebase.q/net/ipv6/netfilter/ip6_tables.c =================================================================== --- linux-rt-rebase.q.orig/net/ipv6/netfilter/ip6_tables.c +++ linux-rt-rebase.q/net/ipv6/netfilter/ip6_tables.c @@ -380,7 +380,7 @@ ip6t_do_table(struct sk_buff **pskb, read_lock_bh(&table->lock); private = table->private; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); - table_base = (void *)private->entries[smp_processor_id()]; + table_base = (void *)private->entries[raw_smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); /* For return from builtin chain */ @@ -1190,7 +1190,7 @@ do_add_counters(void __user *user, unsig i = 0; /* Choose the copy that is on our node */ - loc_cpu_entry = private->entries[smp_processor_id()]; + loc_cpu_entry = private->entries[raw_smp_processor_id()]; IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, add_counter_to_entry, Index: linux-rt-rebase.q/net/sched/sch_generic.c =================================================================== --- linux-rt-rebase.q.orig/net/sched/sch_generic.c +++ linux-rt-rebase.q/net/sched/sch_generic.c @@ -12,6 +12,7 @@ */ #include +#include #include #include #include @@ -24,6 +25,7 @@ #include #include #include +#include #include /* Main transmission queue. */ @@ -150,16 +152,28 @@ static inline int qdisc_restart(struct n */ lockless = (dev->features & NETIF_F_LLTX); - if (!lockless && !netif_tx_trylock(dev)) { - /* Another CPU grabbed the driver tx lock */ - return handle_dev_cpu_collision(skb, dev, q); + if (!lockless) { +#ifdef CONFIG_PREEMPT_RT + netif_tx_lock(dev); +#else + if (!netif_tx_trylock(dev)) + /* Another CPU grabbed the driver tx lock */ + return handle_dev_cpu_collision(skb, dev, q); +#endif } /* And release queue */ spin_unlock(&dev->queue_lock); + WARN_ON_RT(irqs_disabled()); ret = dev_hard_start_xmit(skb, dev); - +#ifdef CONFIG_PREEMPT_RT + if (irqs_disabled()) { + if (printk_ratelimit()) + print_symbol("network driver disabled raw interrupts: %s\n", (unsigned long)dev->hard_start_xmit); + local_irq_enable(); + } +#endif if (!lockless) netif_tx_unlock(dev); @@ -576,9 +590,12 @@ void dev_deactivate(struct net_device *d /* Wait for outstanding dev_queue_xmit calls. */ synchronize_rcu(); - /* Wait for outstanding qdisc_run calls. */ + /* + * Wait for outstanding qdisc_run calls. + * TODO: shouldnt this be wakeup-based, instead of polling it? + */ while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) - yield(); + msleep(1); } void dev_init_scheduler(struct net_device *dev) Index: linux-rt-rebase.q/net/unix/af_unix.c =================================================================== --- linux-rt-rebase.q.orig/net/unix/af_unix.c +++ linux-rt-rebase.q/net/unix/af_unix.c @@ -333,10 +333,11 @@ static void unix_write_space(struct sock read_lock(&sk->sk_callback_lock); if (unix_writable(sk)) { if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible(sk->sk_sleep); + wake_up_interruptible_sync(sk->sk_sleep); sk_wake_async(sk, 2, POLL_OUT); } read_unlock(&sk->sk_callback_lock); + preempt_check_resched_delayed(); } /* When dgram socket disconnects (or changes its peer), we clear its receive patches/latency-measurement-drivers.patch0000664000077200007720000004527310655544573020231 0ustar mingomingo this patch adds: - histogram support to /dev/rtc - the /dev/blocker lock-latency test-device - the /dev/lpptest parallel-port irq latency test-device drivers/char/Kconfig | 40 ++++++++++ drivers/char/Makefile | 2 drivers/char/blocker.c | 109 +++++++++++++++++++++++++++++ drivers/char/lpptest.c | 178 ++++++++++++++++++++++++++++++++++++++++++++++++ drivers/char/rtc.c | 179 ++++++++++++++++++++++++++++++++++++++++++++++++- scripts/Makefile | 3 scripts/testlpp.c | 159 +++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 668 insertions(+), 2 deletions(-) Index: linux/drivers/char/Kconfig =================================================================== --- linux.orig/drivers/char/Kconfig +++ linux/drivers/char/Kconfig @@ -773,6 +773,46 @@ config JS_RTC To compile this driver as a module, choose M here: the module will be called js-rtc. +config RTC_HISTOGRAM + bool "Real Time Clock Histogram Support" + default n + depends on RTC + ---help--- + If you say Y here then the kernel will track the delivery and + wakeup latency of /dev/rtc using tasks and will report a + histogram to the kernel log when the application closes /dev/rtc. + +config BLOCKER + tristate "Priority Inheritance Debugging (Blocker) Device Support" + depends on X86 + default y + ---help--- + If you say Y here then a device will be created that the userspace + pi_test suite uses to test and measure kernel locking primitives. + +config LPPTEST + tristate "Parallel Port Based Latency Measurement Device" + depends on !PARPORT && X86 + default y + ---help--- + If you say Y here then a device will be created that the userspace + testlpp utility uses to measure IRQ latencies of a target system + from an independent measurement system. + + NOTE: this code assumes x86 PCs and that the parallel port is + bidirectional and is on IRQ 7. + + to use the device, both the target and the source system needs to + run a kernel with CONFIG_LPPTEST enabled. To measure latencies, + use the scripts/testlpp utility in your kernel source directory, + and run it (as root) on the source system - it will start printing + out the latencies it took to get a response from the target system: + + Latency of response: 12.2 usecs (121265 cycles) + + then generate various workloads on the target system to see how + (worst-case-) latencies are impacted. + config SGI_DS1286 tristate "SGI DS1286 RTC support" depends on SGI_IP22 Index: linux/drivers/char/Makefile =================================================================== --- linux.orig/drivers/char/Makefile +++ linux/drivers/char/Makefile @@ -85,6 +85,8 @@ obj-$(CONFIG_TOSHIBA) += toshiba.o obj-$(CONFIG_I8K) += i8k.o obj-$(CONFIG_DS1620) += ds1620.o obj-$(CONFIG_HW_RANDOM) += hw_random/ +obj-$(CONFIG_BLOCKER) += blocker.o +obj-$(CONFIG_LPPTEST) += lpptest.o obj-$(CONFIG_COBALT_LCD) += lcd.o obj-$(CONFIG_PPDEV) += ppdev.o obj-$(CONFIG_NWBUTTON) += nwbutton.o Index: linux/drivers/char/blocker.c =================================================================== --- /dev/null +++ linux/drivers/char/blocker.c @@ -0,0 +1,109 @@ +/* + * priority inheritance testing device + */ + +#include +#include +#include +#include + +#define BLOCKER_MINOR 221 + +#define BLOCK_IOCTL 4245 +#define BLOCK_SET_DEPTH 4246 + +#define BLOCKER_MAX_LOCK_DEPTH 10 + +void loop(int loops) +{ + int i; + + for (i = 0; i < loops; i++) + get_cycles(); +} + +static spinlock_t blocker_lock[BLOCKER_MAX_LOCK_DEPTH]; + +static unsigned int lock_depth = 1; + +void do_the_lock_and_loop(unsigned int args) +{ + int i, max; + + if (rt_task(current)) + max = lock_depth; + else if (lock_depth > 1) + max = (current->pid % lock_depth) + 1; + else + max = 1; + + /* Always lock from the top down */ + for (i = max-1; i >= 0; i--) + spin_lock(&blocker_lock[i]); + loop(args); + for (i = 0; i < max; i++) + spin_unlock(&blocker_lock[i]); +} + +static int blocker_open(struct inode *in, struct file *file) +{ + printk(KERN_INFO "blocker_open called\n"); + + return 0; +} + +static long blocker_ioctl(struct file *file, + unsigned int cmd, unsigned long args) +{ + switch(cmd) { + case BLOCK_IOCTL: + do_the_lock_and_loop(args); + return 0; + case BLOCK_SET_DEPTH: + if (args >= BLOCKER_MAX_LOCK_DEPTH) + return -EINVAL; + lock_depth = args; + return 0; + default: + return -EINVAL; + } +} + +static struct file_operations blocker_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = blocker_ioctl, + .open = blocker_open, +}; + +static struct miscdevice blocker_dev = +{ + BLOCKER_MINOR, + "blocker", + &blocker_fops +}; + +static int __init blocker_init(void) +{ + int i; + + if (misc_register(&blocker_dev)) + return -ENODEV; + + for (i = 0; i < BLOCKER_MAX_LOCK_DEPTH; i++) + spin_lock_init(blocker_lock + i); + + return 0; +} + +void __exit blocker_exit(void) +{ + printk(KERN_INFO "blocker device uninstalled\n"); + misc_deregister(&blocker_dev); +} + +module_init(blocker_init); +module_exit(blocker_exit); + +MODULE_LICENSE("GPL"); + Index: linux/drivers/char/lpptest.c =================================================================== --- /dev/null +++ linux/drivers/char/lpptest.c @@ -0,0 +1,178 @@ +/* + * /dev/lpptest device: test IRQ handling latencies over parallel port + * + * Copyright (C) 2005 Thomas Gleixner, Ingo Molnar + * + * licensed under the GPL + * + * You need to have CONFIG_PARPORT disabled for this device, it is a + * completely self-contained device that assumes sole ownership of the + * parallel port. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * API wrappers so that the code can be shared with the -rt tree: + */ +#ifndef local_irq_disable +# define local_irq_disable local_irq_disable +# define local_irq_enable local_irq_enable +#endif + +#ifndef IRQ_NODELAY +# define IRQ_NODELAY 0 +# define IRQF_NODELAY 0 +#endif + +/* + * Driver: + */ +#define LPPTEST_CHAR_MAJOR 245 +#define LPPTEST_DEVICE_NAME "lpptest" + +#define LPPTEST_IRQ 7 + +#define LPPTEST_TEST _IOR (LPPTEST_CHAR_MAJOR, 1, unsigned long long) +#define LPPTEST_DISABLE _IOR (LPPTEST_CHAR_MAJOR, 2, unsigned long long) +#define LPPTEST_ENABLE _IOR (LPPTEST_CHAR_MAJOR, 3, unsigned long long) + +static char dev_id[] = "lpptest"; + +#define INIT_PORT() outb(0x04, 0x37a) +#define ENABLE_IRQ() outb(0x10, 0x37a) +#define DISABLE_IRQ() outb(0, 0x37a) + +static unsigned char out = 0x5a; + +/** + * Interrupt handler. Flip a bit in the reply. + */ +static int lpptest_irq (int irq, void *dev_id) +{ + out ^= 0xff; + outb(out, 0x378); + + return IRQ_HANDLED; +} + +static cycles_t test_response(void) +{ + cycles_t now, end; + unsigned char in; + int timeout = 0; + + local_irq_disable(); + in = inb(0x379); + inb(0x378); + outb(0x08, 0x378); + now = get_cycles(); + while(1) { + if (inb(0x379) != in) + break; + if (timeout++ > 1000000) { + outb(0x00, 0x378); + local_irq_enable(); + + return 0; + } + } + end = get_cycles(); + outb(0x00, 0x378); + local_irq_enable(); + + return end - now; +} + +static int lpptest_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int lpptest_close(struct inode *inode, struct file *file) +{ + return 0; +} + +int lpptest_ioctl(struct inode *inode, struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) +{ + int retval = 0; + + switch (ioctl_num) { + + case LPPTEST_DISABLE: + DISABLE_IRQ(); + break; + + case LPPTEST_ENABLE: + ENABLE_IRQ(); + break; + + case LPPTEST_TEST: { + + cycles_t diff = test_response(); + if (copy_to_user((void *)ioctl_param, (void*) &diff, sizeof(diff))) + goto errcpy; + break; + } + default: retval = -EINVAL; + } + + return retval; + + errcpy: + return -EFAULT; +} + +static struct file_operations lpptest_dev_fops = { + .ioctl = lpptest_ioctl, + .open = lpptest_open, + .release = lpptest_close, +}; + +static int __init lpptest_init (void) +{ + if (register_chrdev(LPPTEST_CHAR_MAJOR, LPPTEST_DEVICE_NAME, &lpptest_dev_fops)) + { + printk(KERN_NOTICE "Can't allocate major number %d for lpptest.\n", + LPPTEST_CHAR_MAJOR); + return -EAGAIN; + } + + if (request_irq (LPPTEST_IRQ, lpptest_irq, 0, "lpptest", dev_id)) { + printk (KERN_WARNING "lpptest: irq %d in use. Unload parport module!\n", LPPTEST_IRQ); + unregister_chrdev(LPPTEST_CHAR_MAJOR, LPPTEST_DEVICE_NAME); + return -EAGAIN; + } + irq_desc[LPPTEST_IRQ].status |= IRQ_NODELAY; + irq_desc[LPPTEST_IRQ].action->flags |= IRQF_NODELAY | IRQF_DISABLED; + + INIT_PORT(); + ENABLE_IRQ(); + + return 0; +} +module_init (lpptest_init); + +static void __exit lpptest_exit (void) +{ + DISABLE_IRQ(); + + free_irq(LPPTEST_IRQ, dev_id); + unregister_chrdev(LPPTEST_CHAR_MAJOR, LPPTEST_DEVICE_NAME); +} +module_exit (lpptest_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("lpp test module"); + Index: linux/drivers/char/rtc.c =================================================================== --- linux.orig/drivers/char/rtc.c +++ linux/drivers/char/rtc.c @@ -90,6 +90,32 @@ #include #include +#ifdef CONFIG_MIPS +# include +#endif + +#ifdef CONFIG_RTC_HISTOGRAM + +static cycles_t last_interrupt_time; + +#include + +#define CPU_MHZ (cpu_khz / 1000) + +#define HISTSIZE 10000 +static int histogram[HISTSIZE]; + +static int rtc_state; + +enum rtc_states { + S_STARTUP, /* First round - let the application start */ + S_IDLE, /* Waiting for an interrupt */ + S_WAITING_FOR_READ, /* Signal delivered. waiting for rtc_read() */ + S_READ_MISSED, /* Signal delivered, read() deadline missed */ +}; + +#endif + static unsigned long rtc_port; static int rtc_irq = PCI_IRQ_NONE; #endif @@ -222,7 +248,146 @@ static inline unsigned char rtc_is_updat return uip; } +#ifndef RTC_IRQ +# undef CONFIG_RTC_HISTOGRAM +#endif + +static inline void rtc_open_event(void) +{ +#ifdef CONFIG_RTC_HISTOGRAM + int i; + + last_interrupt_time = 0; + rtc_state = S_STARTUP; + rtc_irq_data = 0; + + for (i = 0; i < HISTSIZE; i++) + histogram[i] = 0; +#endif +} + +static inline void rtc_wake_event(void) +{ +#ifndef CONFIG_RTC_HISTOGRAM + kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); +#else + if (!(rtc_status & RTC_IS_OPEN)) + return; + + switch (rtc_state) { + /* Startup */ + case S_STARTUP: + kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); + break; + /* Waiting for an interrupt */ + case S_IDLE: + kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); + last_interrupt_time = get_cycles(); + rtc_state = S_WAITING_FOR_READ; + break; + + /* Signal has been delivered. waiting for rtc_read() */ + case S_WAITING_FOR_READ: + /* + * Well foo. The usermode application didn't + * schedule and read in time. + */ + last_interrupt_time = get_cycles(); + rtc_state = S_READ_MISSED; + printk("Read missed before next interrupt\n"); + break; + /* Signal has been delivered, read() deadline was missed */ + case S_READ_MISSED: + /* + * Not much we can do here. We're waiting for the usermode + * application to read the rtc + */ + last_interrupt_time = get_cycles(); + break; + } +#endif +} + +static inline void rtc_read_event(void) +{ +#ifdef CONFIG_RTC_HISTOGRAM + cycles_t now = get_cycles(); + + switch (rtc_state) { + /* Startup */ + case S_STARTUP: + rtc_state = S_IDLE; + break; + + /* Waiting for an interrupt */ + case S_IDLE: + printk("bug in rtc_read(): called in state S_IDLE!\n"); + break; + case S_WAITING_FOR_READ: /* + * Signal has been delivered. + * waiting for rtc_read() + */ + /* + * Well done + */ + case S_READ_MISSED: /* + * Signal has been delivered, read() + * deadline was missed + */ + /* + * So, you finally got here. + */ + if (!last_interrupt_time) + printk("bug in rtc_read(): last_interrupt_time = 0\n"); + rtc_state = S_IDLE; + { + cycles_t latency = now - last_interrupt_time; + unsigned long delta; /* Microseconds */ + + delta = latency; + delta /= CPU_MHZ; + + if (delta > 1000 * 1000) { + printk("rtc: eek\n"); + } else { + unsigned long slot = delta; + if (slot >= HISTSIZE) + slot = HISTSIZE - 1; + histogram[slot]++; + if (delta > 2000) + printk("wow! That was a " + "%ld millisec bump\n", + delta / 1000); + } + } + rtc_state = S_IDLE; + break; + } +#endif +} + +static inline void rtc_close_event(void) +{ +#ifdef CONFIG_RTC_HISTOGRAM + int i = 0; + unsigned long total = 0; + + for (i = 0; i < HISTSIZE; i++) + total += histogram[i]; + if (!total) + return; + + printk("\nrtc latency histogram of {%s/%d, %lu samples}:\n", + current->comm, current->pid, total); + for (i = 0; i < HISTSIZE; i++) { + if (histogram[i]) + printk("%d %d\n", i, histogram[i]); + } +#endif +} + #ifdef RTC_IRQ + /* * A very tiny interrupt handler. It runs with IRQF_DISABLED set, * but there is possibility of conflicting with the set_rtc_mmss() @@ -266,9 +431,9 @@ irqreturn_t rtc_interrupt(int irq, void if (rtc_callback) rtc_callback->func(rtc_callback->private_data); spin_unlock(&rtc_task_lock); - wake_up_interruptible(&rtc_wait); - kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); + rtc_wake_event(); + wake_up_interruptible(&rtc_wait); return IRQ_HANDLED; } @@ -378,6 +543,8 @@ static ssize_t rtc_read(struct file *fil schedule(); } while (1); + rtc_read_event(); + if (count == sizeof(unsigned int)) retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int); else @@ -610,6 +777,11 @@ static int rtc_do_ioctl(unsigned int cmd save_freq_select = CMOS_READ(RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + /* + * Make CMOS date writes nonpreemptible even on PREEMPT_RT. + * There's a limit to everything! =B-) + */ + preempt_disable(); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif @@ -619,6 +791,7 @@ static int rtc_do_ioctl(unsigned int cmd CMOS_WRITE(hrs, RTC_HOURS); CMOS_WRITE(min, RTC_MINUTES); CMOS_WRITE(sec, RTC_SECONDS); + preempt_enable(); CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); @@ -717,6 +890,7 @@ static int rtc_open(struct inode *inode, if(rtc_status & RTC_IS_OPEN) goto out_busy; + rtc_open_event(); rtc_status |= RTC_IS_OPEN; rtc_irq_data = 0; @@ -772,6 +946,7 @@ no_irq: rtc_irq_data = 0; rtc_status &= ~RTC_IS_OPEN; spin_unlock_irq (&rtc_lock); + rtc_close_event(); return 0; } Index: linux/scripts/Makefile =================================================================== --- linux.orig/scripts/Makefile +++ linux/scripts/Makefile @@ -13,6 +13,9 @@ hostprogs-$(CONFIG_LOGO) += pnmt hostprogs-$(CONFIG_VT) += conmakehash hostprogs-$(CONFIG_PROM_CONSOLE) += conmakehash hostprogs-$(CONFIG_IKCONFIG) += bin2c +ifdef CONFIG_LPPTEST +hostprogs-y += testlpp +endif always := $(hostprogs-y) $(hostprogs-m) Index: linux/scripts/testlpp.c =================================================================== --- /dev/null +++ linux/scripts/testlpp.c @@ -0,0 +1,159 @@ +/* + * testlpp.c: use the /dev/lpptest device to test IRQ handling + * latencies over parallel port + * + * Copyright (C) 2005 Thomas Gleixner + * + * licensed under the GPL + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LPPTEST_CHAR_MAJOR 245 +#define LPPTEST_DEVICE_NAME "lpptest" + +#define LPPTEST_TEST _IOR (LPPTEST_CHAR_MAJOR, 1, unsigned long long) +#define LPPTEST_DISABLE _IOR (LPPTEST_CHAR_MAJOR, 2, unsigned long long) +#define LPPTEST_ENABLE _IOR (LPPTEST_CHAR_MAJOR, 3, unsigned long long) + +#define HIST_SIZE 10000 + +static int hist_total; +static unsigned long hist[HIST_SIZE]; + +static void hist_hit(unsigned long usecs) +{ + hist_total++; + if (usecs >= HIST_SIZE-1) + hist[HIST_SIZE-1]++; + else + hist[usecs]++; +} + +static void print_hist(void) +{ + int i; + + printf("LPP latency histogram:\n"); + + for (i = 0; i < HIST_SIZE; i++) { + if (hist[i]) + printf("%3d usecs: %9ld\n", i, hist[i]); + } +} + +static inline unsigned long long int rdtsc(void) +{ + unsigned long long int x, y; + for (;;) { + __asm__ volatile ("rdtsc" : "=A" (x)); + __asm__ volatile ("rdtsc" : "=A" (y)); + if (y - x < 1000) + return y; + } +} + +static unsigned long long calibrate_loop(void) +{ + unsigned long long mytime1, mytime2; + + mytime1 = rdtsc(); + usleep(500000); + mytime2 = rdtsc(); + + return (mytime2 - mytime1) * 2; +} + +#define time_to_usecs(time) ((double)time*1000000.0/(double)cycles_per_sec) + +#define time_to_usecs_l(time) (long)(time*1000000/cycles_per_sec) + +int fd, total; +unsigned long long tim, sum_tim, min_tim = -1ULL, max_tim, cycles_per_sec; + +void cleanup(int sig) +{ + ioctl (fd, LPPTEST_ENABLE, &tim); + if (sig) + printf("[ interrupted - exiting ]\n"); + printf("\ntotal number of responses: %d\n", total); + printf("average reponse latency: %.2lf usecs\n", + time_to_usecs(sum_tim/total)); + printf("minimum latency: %.2lf usecs\n", + time_to_usecs(min_tim)); + printf("maximum latency: %.2lf usecs\n", + time_to_usecs(max_tim)); + print_hist(); + exit(0); +} + +#define HZ 3000 + +int main (int argc, char **argv) +{ + unsigned int nr_requests = 0; + + if (argc > 2) { + fprintf(stderr, "usage: testlpp []\n"); + exit(-1); + } + if (argc == 2) + nr_requests = atol(argv[1]); + + if (getuid() != 0) { + fprintf(stderr, "need to run as root!\n"); + exit(-1); + } + mknod("/dev/lpptest", S_IFCHR|0666, makedev(245, 1)); + + fd = open("/dev/lpptest", O_RDWR); + if (fd == -1) { + fprintf(stderr, "could not open /dev/lpptest, your kernel doesnt have CONFIG_LPPTEST enabled?\n"); + exit(-1); + } + + signal(SIGINT,&cleanup); + + ioctl (fd, LPPTEST_DISABLE, &tim); + + fprintf(stderr, "calibrating cycles to usecs: "); + cycles_per_sec = calibrate_loop(); + fprintf(stderr, "%lld cycles per usec\n", cycles_per_sec/1000000); + if (nr_requests) + fprintf(stderr, "[max # of requests: %u]\n", nr_requests); + fprintf(stderr, "starting %dHz test, hit Ctrl-C to stop:\n\n", HZ); + + while(1) { + ioctl (fd, LPPTEST_TEST, &tim); + if (tim == 0) + printf ("No response from target.\n"); + else { + hist_hit(time_to_usecs_l(tim)); + if (tim > max_tim) { + printf ("new max latency: %.2lf usecs (%Ld cycles)\n", time_to_usecs(tim), tim); + max_tim = tim; + } + if (tim < min_tim) + min_tim = tim; + total++; + if (total == nr_requests) + break; + sum_tim += tim; + } + usleep(1000000/HZ); + } + cleanup(0); + + return 0; +} + + patches/x86_64-use-i386-i8253-h.patch0000664000077200007720000000311010655544570015764 0ustar mingomingoSubject: i386: prepare sharing the PIT code PIT clock events work already and the PIT handling is the same for i386 and x86_64. x86_64 does not support PIT as a clock source, so disable the PIT clocksource for x86_64. Use the i386 i8253.h include file for x86_64 as well to share the exports and the PIT constants. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/time.c | 4 ---- include/asm-x86_64/i8253.h | 8 ++------ 2 files changed, 2 insertions(+), 10 deletions(-) Index: linux/arch/x86_64/kernel/time.c =================================================================== --- linux.orig/arch/x86_64/kernel/time.c +++ linux/arch/x86_64/kernel/time.c @@ -32,7 +32,6 @@ #include /* for PM timer frequency */ #include #endif -#include #include #include #include @@ -292,9 +291,6 @@ static unsigned int __init tsc_calibrate return pmc_now * tsc_khz / (tsc_now - tsc_start); } -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 - static void __pit_init(int val, u8 mode) { unsigned long flags; Index: linux/include/asm-x86_64/i8253.h =================================================================== --- linux.orig/include/asm-x86_64/i8253.h +++ linux/include/asm-x86_64/i8253.h @@ -1,6 +1,2 @@ -#ifndef __ASM_I8253_H__ -#define __ASM_I8253_H__ - -extern spinlock_t i8253_lock; - -#endif /* __ASM_I8253_H__ */ +#include +#include patches/radix-tree-concurrent.patch0000664000077200007720000004657110655544576017024 0ustar mingomingoSubject: radix-tree: concurrent write side support Provide support for concurrent write side operations without changing the API for all current uses. Concurrency is realized by means of two locking models; the simple one is ladder locking, the more complex one is path locking. Ladder locking is like walking down a ladder, you place your foot on a spoke below the one your other foot finds support etc.. There is no walking with both feet in the air. Likewise with walking a tree, you lock a node below the current node before releasing it. This allows other modifying operations to start as soon as you release the lock on the root node and even complete before you if they walk another path downward. The modifying operations: insert, lookup_slot and set_tag, use this simple method. The more complex path locking method is needed for operations that need to walk upwards again after they walked down, those are: tag_clear and delete. These lock their whole path downwards and release whole sections at points where it can be determined the walk upwards will stop, thus also allowing concurrency. Finding the conditions for the terminated walk upwards while doing the downward walk is the 'interesting' part of this approach. The remaining - unmodified - operations will have exclusive locking (since they're unmodified, they never move the lock downwards from the root node). The API for this looks like: DEFINE_RADIX_TREE_CONTEXT(ctx, &mapping->page_tree) radix_tree_lock(&ctx) ... do _1_ modifying operation ... radix_tree_unlock(&ctx) Note that before the radix operation the root node is held and will provide exclusive locking, after the operation the held lock might only be enough to protect a single item. Signed-off-by: Peter Zijlstra --- include/linux/radix-tree.h | 77 +++++++++++- init/Kconfig | 4 lib/radix-tree.c | 283 ++++++++++++++++++++++++++++++++++++--------- 3 files changed, 302 insertions(+), 62 deletions(-) Index: linux-rt-rebase.q/include/linux/radix-tree.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/radix-tree.h +++ linux-rt-rebase.q/include/linux/radix-tree.h @@ -62,23 +62,65 @@ struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; + spinlock_t lock; }; #define RADIX_TREE_INIT(mask) { \ .height = 0, \ .gfp_mask = (mask), \ .rnode = NULL, \ + .lock = __SPIN_LOCK_UNLOCKED(radix_tree_root.lock), \ } #define RADIX_TREE(name, mask) \ struct radix_tree_root name = RADIX_TREE_INIT(mask) -#define INIT_RADIX_TREE(root, mask) \ -do { \ - (root)->height = 0; \ - (root)->gfp_mask = (mask); \ - (root)->rnode = NULL; \ -} while (0) +static inline void INIT_RADIX_TREE(struct radix_tree_root *root, gfp_t gfp_mask) +{ + root->height = 0; + root->gfp_mask = gfp_mask; + root->rnode = NULL; + spin_lock_init(&root->lock); +} + +struct radix_tree_context { + struct radix_tree_root *tree; + struct radix_tree_root *root; +#ifdef CONFIG_RADIX_TREE_CONCURRENT + spinlock_t *locked; +#endif +}; + +#ifdef CONFIG_RADIX_TREE_CONCURRENT +#define RADIX_CONTEXT_ROOT(context) \ + ((struct radix_tree_root *)(((unsigned long)context) + 1)) + +#define __RADIX_TREE_CONTEXT_INIT(context, _tree) \ + .tree = RADIX_CONTEXT_ROOT(&context), \ + .locked = NULL, +#else +#define __RADIX_TREE_CONTEXT_INIT(context, _tree) \ + .tree = (_tree), +#endif + +#define DEFINE_RADIX_TREE_CONTEXT(context, _tree) \ + struct radix_tree_context context = { \ + .root = (_tree), \ + __RADIX_TREE_CONTEXT_INIT(context, _tree) \ + } + +static inline void +init_radix_tree_context(struct radix_tree_context *ctx, + struct radix_tree_root *root) +{ + ctx->root = root; +#ifdef CONFIG_RADIX_TREE_CONCURRENT + ctx->tree = RADIX_CONTEXT_ROOT(ctx); + ctx->locked = NULL; +#else + ctx->tree = root; +#endif +} /** * Radix-tree synchronization @@ -155,6 +197,29 @@ static inline void radix_tree_replace_sl rcu_assign_pointer(*pslot, item); } +static inline void radix_tree_lock(struct radix_tree_context *context) +{ + struct radix_tree_root *root = context->root; + rcu_read_lock(); + spin_lock(&root->lock); +#ifdef CONFIG_RADIX_TREE_CONCURRENT + BUG_ON(context->locked); + context->locked = &root->lock; +#endif +} + +static inline void radix_tree_unlock(struct radix_tree_context *context) +{ +#ifdef CONFIG_RADIX_TREE_CONCURRENT + BUG_ON(!context->locked); + spin_unlock(context->locked); + context->locked = NULL; +#else + spin_unlock(&context->root->lock); +#endif + rcu_read_unlock(); +} + int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); void *radix_tree_lookup(struct radix_tree_root *, unsigned long); void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); Index: linux-rt-rebase.q/init/Kconfig =================================================================== --- linux-rt-rebase.q.orig/init/Kconfig +++ linux-rt-rebase.q/init/Kconfig @@ -350,6 +350,10 @@ config CC_OPTIMIZE_FOR_SIZE config SYSCTL bool +config RADIX_TREE_CONCURRENT + bool "Enable concurrent radix tree operations (EXPERIMENTAL)" + default y if SMP + menuconfig EMBEDDED bool "Configure standard kernel features (for small systems)" help Index: linux-rt-rebase.q/lib/radix-tree.c =================================================================== --- linux-rt-rebase.q.orig/lib/radix-tree.c +++ linux-rt-rebase.q/lib/radix-tree.c @@ -32,6 +32,7 @@ #include #include #include +#include #ifdef __KERNEL__ @@ -52,11 +53,17 @@ struct radix_tree_node { struct rcu_head rcu_head; void *slots[RADIX_TREE_MAP_SIZE]; unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; +#ifdef CONFIG_RADIX_TREE_CONCURRENT + spinlock_t lock; +#endif }; struct radix_tree_path { struct radix_tree_node *node; int offset; +#ifdef CONFIG_RADIX_TREE_CONCURRENT + spinlock_t *locked; +#endif }; #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) @@ -64,6 +71,10 @@ struct radix_tree_path { static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH] __read_mostly; +#ifdef CONFIG_RADIX_TREE_CONCURRENT +static struct lock_class_key radix_node_class[RADIX_TREE_MAX_PATH]; +#endif + /* * Radix tree node cache. */ @@ -88,7 +99,7 @@ static inline gfp_t root_gfp_mask(struct * that the caller has pinned this thread of control to the current CPU. */ static struct radix_tree_node * -radix_tree_node_alloc(struct radix_tree_root *root) +radix_tree_node_alloc(struct radix_tree_root *root, int height) { struct radix_tree_node *ret; gfp_t gfp_mask = root_gfp_mask(root); @@ -106,6 +117,11 @@ radix_tree_node_alloc(struct radix_tree_ put_cpu_var(radix_tree_preloads); } BUG_ON(radix_tree_is_indirect_ptr(ret)); +#ifdef CONFIG_RADIX_TREE_CONCURRENT + spin_lock_init(&ret->lock); + lockdep_set_class(&ret->lock, &radix_node_class[height]); +#endif + ret->height = height; return ret; } @@ -211,6 +227,22 @@ static inline int any_tag_set(struct rad return 0; } +static inline int any_tag_set_but(struct radix_tree_node *node, + unsigned int tag, int offset) +{ + int idx; + int offset_idx = offset / BITS_PER_LONG; + unsigned long offset_mask = ~(1UL << (offset % BITS_PER_LONG)); + for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { + unsigned long mask = ~0UL; + if (idx == offset_idx) + mask = offset_mask; + if (node->tags[tag][idx] & mask) + return 1; + } + return 0; +} + /* * Return the maximum key which can be store into a * radix tree with height HEIGHT. @@ -240,8 +272,8 @@ static int radix_tree_extend(struct radi } do { - unsigned int newheight; - if (!(node = radix_tree_node_alloc(root))) + unsigned int newheight = root->height + 1; + if (!(node = radix_tree_node_alloc(root, newheight))) return -ENOMEM; /* Increase the height. */ @@ -253,8 +285,6 @@ static int radix_tree_extend(struct radi tag_set(node, tag, 0); } - newheight = root->height+1; - node->height = newheight; node->count = 1; node = radix_tree_ptr_to_indirect(node); rcu_assign_pointer(root->rnode, node); @@ -264,6 +294,80 @@ out: return 0; } +#ifdef CONFIG_RADIX_TREE_CONCURRENT +static inline struct radix_tree_context * +radix_tree_get_context(struct radix_tree_root **rootp) +{ + struct radix_tree_context *context = NULL; + unsigned long addr = (unsigned long)*rootp; + + if (addr & 1) { + context = (struct radix_tree_context *)(addr - 1); + *rootp = context->root; + } + + return context; +} + +#define RADIX_TREE_CONTEXT(context, root) \ + struct radix_tree_context *context = \ + radix_tree_get_context(&root) + +static inline spinlock_t *radix_node_lock(struct radix_tree_root *root, + struct radix_tree_node *node) +{ + spinlock_t *locked = &node->lock; + spin_lock(locked); + return locked; +} + +static inline void radix_ladder_lock(struct radix_tree_context *context, + struct radix_tree_node *node) +{ + if (context) { + struct radix_tree_root *root = context->root; + spinlock_t *locked = radix_node_lock(root, node); + if (locked) { + spin_unlock(context->locked); + context->locked = locked; + } + } +} + +static inline void radix_path_init(struct radix_tree_context *context, + struct radix_tree_path *pathp) +{ + pathp->locked = context ? context->locked : NULL; +} + +static inline void radix_path_lock(struct radix_tree_context *context, + struct radix_tree_path *pathp, struct radix_tree_node *node) +{ + if (context) { + struct radix_tree_root *root = context->root; + spinlock_t *locked = radix_node_lock(root, node); + if (locked) + context->locked = locked; + pathp->locked = locked; + } else + pathp->locked = NULL; +} + +static inline void radix_path_unlock(struct radix_tree_context *context, + struct radix_tree_path *punlock) +{ + if (context && punlock->locked && + context->locked != punlock->locked) + spin_unlock(punlock->locked); +} +#else +#define RADIX_TREE_CONTEXT(context, root) do { } while (0) +#define radix_ladder_lock(context, node) do { } while (0) +#define radix_path_init(context, pathp) do { } while (0) +#define radix_path_lock(context, pathp, node) do { } while (0) +#define radix_path_unlock(context, punlock) do { } while (0) +#endif + /** * radix_tree_insert - insert into a radix tree * @root: radix tree root @@ -279,6 +383,8 @@ int radix_tree_insert(struct radix_tree_ unsigned int height, shift; int offset; int error; + int tag; + RADIX_TREE_CONTEXT(context, root); BUG_ON(radix_tree_is_indirect_ptr(item)); @@ -298,9 +404,8 @@ int radix_tree_insert(struct radix_tree_ while (height > 0) { if (slot == NULL) { /* Have to add a child node. */ - if (!(slot = radix_tree_node_alloc(root))) + if (!(slot = radix_tree_node_alloc(root, height))) return -ENOMEM; - slot->height = height; if (node) { rcu_assign_pointer(node->slots[offset], slot); node->count++; @@ -312,6 +417,9 @@ int radix_tree_insert(struct radix_tree_ /* Go a level down */ offset = (index >> shift) & RADIX_TREE_MAP_MASK; node = slot; + + radix_ladder_lock(context, node); + slot = node->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; @@ -323,12 +431,12 @@ int radix_tree_insert(struct radix_tree_ if (node) { node->count++; rcu_assign_pointer(node->slots[offset], item); - BUG_ON(tag_get(node, 0, offset)); - BUG_ON(tag_get(node, 1, offset)); + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + BUG_ON(tag_get(node, tag, offset)); } else { rcu_assign_pointer(root->rnode, item); - BUG_ON(root_tag_get(root, 0)); - BUG_ON(root_tag_get(root, 1)); + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + BUG_ON(root_tag_get(root, tag)); } return 0; @@ -352,6 +460,7 @@ void **radix_tree_lookup_slot(struct rad { unsigned int height, shift; struct radix_tree_node *node, **slot; + RADIX_TREE_CONTEXT(context, root); node = rcu_dereference(root->rnode); if (node == NULL) @@ -377,6 +486,8 @@ void **radix_tree_lookup_slot(struct rad if (node == NULL) return NULL; + radix_ladder_lock(context, node); + shift -= RADIX_TREE_MAP_SHIFT; height--; } while (height > 0); @@ -452,6 +563,7 @@ void *radix_tree_tag_set(struct radix_tr { unsigned int height, shift; struct radix_tree_node *slot; + RADIX_TREE_CONTEXT(context, root); height = root->height; BUG_ON(index > radix_tree_maxindex(height)); @@ -459,9 +571,15 @@ void *radix_tree_tag_set(struct radix_tr slot = radix_tree_indirect_to_ptr(root->rnode); shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + /* set the root's tag bit */ + if (slot && !root_tag_get(root, tag)) + root_tag_set(root, tag); + while (height > 0) { int offset; + radix_ladder_lock(context, slot); + offset = (index >> shift) & RADIX_TREE_MAP_MASK; if (!tag_get(slot, tag, offset)) tag_set(slot, tag, offset); @@ -471,14 +589,24 @@ void *radix_tree_tag_set(struct radix_tr height--; } - /* set the root's tag bit */ - if (slot && !root_tag_get(root, tag)) - root_tag_set(root, tag); - return slot; } EXPORT_SYMBOL(radix_tree_tag_set); +/* + * the change can never propagate upwards from here. + */ +static inline int radix_tree_unlock_tag(struct radix_tree_root *root, + struct radix_tree_path *pathp, int tag) +{ + int this, other; + + this = tag_get(pathp->node, tag, pathp->offset); + other = any_tag_set_but(pathp->node, tag, pathp->offset); + + return !this || other; +} + /** * radix_tree_tag_clear - clear a tag on a radix tree node * @root: radix tree root @@ -497,15 +625,19 @@ void *radix_tree_tag_clear(struct radix_ unsigned long index, unsigned int tag) { struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; + struct radix_tree_path *punlock = path, *piter; struct radix_tree_node *slot = NULL; unsigned int height, shift; + RADIX_TREE_CONTEXT(context, root); + + pathp->node = NULL; + radix_path_init(context, pathp); height = root->height; if (index > radix_tree_maxindex(height)) goto out; shift = (height - 1) * RADIX_TREE_MAP_SHIFT; - pathp->node = NULL; slot = radix_tree_indirect_to_ptr(root->rnode); while (height > 0) { @@ -515,10 +647,17 @@ void *radix_tree_tag_clear(struct radix_ goto out; offset = (index >> shift) & RADIX_TREE_MAP_MASK; - pathp[1].offset = offset; - pathp[1].node = slot; - slot = slot->slots[offset]; pathp++; + pathp->offset = offset; + pathp->node = slot; + radix_path_lock(context, pathp, slot); + + if (radix_tree_unlock_tag(root, pathp, tag)) { + for (; punlock < pathp; punlock++) + radix_path_unlock(context, punlock); + } + + slot = slot->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; } @@ -526,20 +665,22 @@ void *radix_tree_tag_clear(struct radix_ if (slot == NULL) goto out; - while (pathp->node) { - if (!tag_get(pathp->node, tag, pathp->offset)) - goto out; - tag_clear(pathp->node, tag, pathp->offset); - if (any_tag_set(pathp->node, tag)) - goto out; - pathp--; + for (piter = pathp; piter >= punlock; piter--) { + if (piter->node) { + if (!tag_get(piter->node, tag, piter->offset)) + break; + tag_clear(piter->node, tag, piter->offset); + if (any_tag_set(piter->node, tag)) + break; + } else { + if (root_tag_get(root, tag)) + root_tag_clear(root, tag); + } } - /* clear the root's tag bit */ - if (root_tag_get(root, tag)) - root_tag_clear(root, tag); - out: + for (; punlock < pathp; punlock++) + radix_path_unlock(context, punlock); return slot; } EXPORT_SYMBOL(radix_tree_tag_clear); @@ -992,6 +1133,7 @@ static inline void radix_tree_shrink(str while (root->height > 0) { struct radix_tree_node *to_free = root->rnode; void *newptr; + int tag; BUG_ON(!radix_tree_is_indirect_ptr(to_free)); to_free = radix_tree_indirect_to_ptr(to_free); @@ -1018,14 +1160,29 @@ static inline void radix_tree_shrink(str root->rnode = newptr; root->height--; /* must only free zeroed nodes into the slab */ - tag_clear(to_free, 0, 0); - tag_clear(to_free, 1, 0); + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + tag_clear(to_free, tag, 0); to_free->slots[0] = NULL; to_free->count = 0; - radix_tree_node_free(to_free); } } +static inline int radix_tree_unlock_all(struct radix_tree_root *root, + struct radix_tree_path *pathp) +{ + int tag; + int unlock = 1; + + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { + if (!radix_tree_unlock_tag(root, pathp, tag)) { + unlock = 0; + break; + } + } + + return unlock; +} + /** * radix_tree_delete - delete an item from a radix tree * @root: radix tree root @@ -1038,11 +1195,15 @@ static inline void radix_tree_shrink(str void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) { struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; + struct radix_tree_path *punlock = path, *piter; struct radix_tree_node *slot = NULL; - struct radix_tree_node *to_free; unsigned int height, shift; int tag; int offset; + RADIX_TREE_CONTEXT(context, root); + + pathp->node = NULL; + radix_path_init(context, pathp); height = root->height; if (index > radix_tree_maxindex(height)) @@ -1057,7 +1218,6 @@ void *radix_tree_delete(struct radix_tre slot = radix_tree_indirect_to_ptr(slot); shift = (height - 1) * RADIX_TREE_MAP_SHIFT; - pathp->node = NULL; do { if (slot == NULL) @@ -1067,6 +1227,13 @@ void *radix_tree_delete(struct radix_tre offset = (index >> shift) & RADIX_TREE_MAP_MASK; pathp->offset = offset; pathp->node = slot; + radix_path_lock(context, pathp, slot); + + if (slot->count > 2 && radix_tree_unlock_all(root, pathp)) { + for (; punlock < pathp; punlock++) + radix_path_unlock(context, punlock); + } + slot = slot->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; @@ -1079,41 +1246,45 @@ void *radix_tree_delete(struct radix_tre * Clear all tags associated with the just-deleted item */ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { - if (tag_get(pathp->node, tag, pathp->offset)) - radix_tree_tag_clear(root, index, tag); + for (piter = pathp; piter >= punlock; piter--) { + if (piter->node) { + if (!tag_get(piter->node, tag, piter->offset)) + break; + tag_clear(piter->node, tag, piter->offset); + if (any_tag_set(piter->node, tag)) + break; + } else { + if (root_tag_get(root, tag)) + root_tag_clear(root, tag); + } + } } - to_free = NULL; - /* Now free the nodes we do not need anymore */ - while (pathp->node) { - pathp->node->slots[pathp->offset] = NULL; - pathp->node->count--; - /* - * Queue the node for deferred freeing after the - * last reference to it disappears (set NULL, above). - */ - if (to_free) - radix_tree_node_free(to_free); + /* Now unhook the nodes we do not need anymore */ + for (piter = pathp; piter >= punlock && piter->node; piter--) { + piter->node->slots[piter->offset] = NULL; + piter->node->count--; - if (pathp->node->count) { - if (pathp->node == + if (piter->node->count) { + if (piter->node == radix_tree_indirect_to_ptr(root->rnode)) radix_tree_shrink(root); goto out; } + } - /* Node with zero slots in use so free it */ - to_free = pathp->node; - pathp--; + BUG_ON(piter->node); - } root_tag_clear_all(root); root->height = 0; root->rnode = NULL; - if (to_free) - radix_tree_node_free(to_free); out: + for (; punlock <= pathp; punlock++) { + radix_path_unlock(context, punlock); + if (punlock->node && punlock->node->count == 0) + radix_tree_node_free(punlock->node); + } return slot; } EXPORT_SYMBOL(radix_tree_delete); patches/radix-concurrent-lockdep.patch0000664000077200007720000000247010655544576017474 0ustar mingomingo--- lib/radix-tree.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/lib/radix-tree.c =================================================================== --- linux-rt-rebase.q.orig/lib/radix-tree.c +++ linux-rt-rebase.q/lib/radix-tree.c @@ -72,6 +72,26 @@ struct radix_tree_path { static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH] __read_mostly; #ifdef CONFIG_RADIX_TREE_CONCURRENT +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static const char *radix_node_key_string[RADIX_TREE_MAX_PATH] = { + "radix-node-00", + "radix-node-01", + "radix-node-02", + "radix-node-03", + "radix-node-04", + "radix-node-05", + "radix-node-06", + "radix-node-07", + "radix-node-08", + "radix-node-09", + "radix-node-10", + "radix-node-11", + "radix-node-12", + "radix-node-13", + "radix-node-14", + "radix-node-15", +}; +#endif static struct lock_class_key radix_node_class[RADIX_TREE_MAX_PATH]; #endif @@ -218,7 +238,9 @@ radix_tree_node_alloc(struct radix_tree_ BUG_ON(radix_tree_is_indirect_ptr(ret)); #ifdef CONFIG_RADIX_TREE_CONCURRENT spin_lock_init(&ret->lock); - lockdep_set_class(&ret->lock, &radix_node_class[height]); + lockdep_set_class_and_name(&ret->lock, + &radix_node_class[height], + radix_node_key_string[height]); #endif ret->height = height; return ret; patches/preempt-realtime-net-drivers.patch0000664000077200007720000000106010655544575020273 0ustar mingomingo--- drivers/net/tulip/tulip_core.c | 1 + 1 file changed, 1 insertion(+) Index: linux-rt-rebase.q/drivers/net/tulip/tulip_core.c =================================================================== --- linux-rt-rebase.q.orig/drivers/net/tulip/tulip_core.c +++ linux-rt-rebase.q/drivers/net/tulip/tulip_core.c @@ -1790,6 +1790,7 @@ static void __devexit tulip_remove_one ( pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); + pci_disable_device (pdev); pci_set_drvdata (pdev, NULL); /* pci_power_off (pdev, -1); */ patches/preempt-realtime-arm-ixp4xx.patch0000664000077200007720000000116510655544574020057 0ustar mingomingo--- arch/arm/mach-ixp4xx/common-pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/arm/mach-ixp4xx/common-pci.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/mach-ixp4xx/common-pci.c +++ linux-rt-rebase.q/arch/arm/mach-ixp4xx/common-pci.c @@ -53,7 +53,7 @@ unsigned long ixp4xx_pci_reg_base = 0; * these transactions are atomic or we will end up * with corrupt data on the bus or in a driver. */ -static DEFINE_SPINLOCK(ixp4xx_pci_lock); +static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock); /* * Read from PCI config space patches/rcu-3.patch0000664000077200007720000004755410655544573013530 0ustar mingomingoFrom: Paul McKenney This patch implements a new version of RCU which allows its read-side critical sections to be preempted. It uses a set of counter pairs to keep track of the read-side critical sections and flips them when all tasks exit read-side critical section. The details of this implementation can be found in this paper - http://www.rdrop.com/users/paulmck/RCU/OLSrtRCU.2006.08.11a.pdf This patch was developed as a part of the -rt kernel development and meant to provide better latencies when read-side critical sections of RCU don't disable preemption. As a consequence of keeping track of RCU readers, the readers have a slight overhead (optimizations in the paper). This implementation co-exists with the "classic" RCU implementations and can be switched to at compiler. Signed-off-by: Paul McKenney Signed-off-by: Dipankar Sarma include/linux/rcupdate.h | 5 include/linux/rcupreempt.h | 66 ++++++ include/linux/sched.h | 6 kernel/Kconfig.preempt | 37 +++ kernel/Makefile | 4 kernel/rcupreempt.c | 464 +++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 581 insertions(+), 1 deletion(-) Index: linux/include/linux/rcupdate.h =================================================================== --- linux.orig/include/linux/rcupdate.h +++ linux/include/linux/rcupdate.h @@ -41,7 +41,12 @@ #include #include #include + +#ifdef CONFIG_CLASSIC_RCU #include +#else +#include +#endif /** * struct rcu_head - callback structure for use with RCU Index: linux/include/linux/rcupreempt.h =================================================================== --- /dev/null +++ linux/include/linux/rcupreempt.h @@ -0,0 +1,66 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (RT implementation) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Paul McKenney + * + * Based on the original work by Paul McKenney + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +#ifndef __LINUX_RCUPREEMPT_H +#define __LINUX_RCUPREEMPT_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include + +#define rcu_qsctr_inc(cpu) +#define rcu_bh_qsctr_inc(cpu) +#define call_rcu_bh(head, rcu) call_rcu(head, rcu) + +extern void __rcu_read_lock(void); +extern void __rcu_read_unlock(void); +extern int rcu_pending(int cpu); + +#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } +#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } + +#define __rcu_read_lock_nesting() (current->rcu_read_lock_nesting) + +extern void __synchronize_sched(void); + +extern void __rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); +extern void rcu_restart_cpu(int cpu); +extern long rcu_batches_completed(void); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_RCUPREEMPT_H */ Index: linux/include/linux/sched.h =================================================================== --- linux.orig/include/linux/sched.h +++ linux/include/linux/sched.h @@ -1069,6 +1069,12 @@ struct task_struct { cpumask_t cpus_allowed; unsigned int time_slice; +#ifdef CONFIG_PREEMPT_RCU + int rcu_read_lock_nesting; + atomic_t *rcu_flipctr1; + atomic_t *rcu_flipctr2; +#endif + #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; #endif Index: linux/kernel/Kconfig.preempt =================================================================== --- linux.orig/kernel/Kconfig.preempt +++ linux/kernel/Kconfig.preempt @@ -66,3 +66,40 @@ config PREEMPT_BKL config PREEMPT_NOTIFIERS bool +choice + prompt "RCU implementation type:" + default CLASSIC_RCU + +config CLASSIC_RCU + bool "Classic RCU" + help + This option selects the classic RCU implementation that is + designed for best read-side performance on non-realtime + systems. + + Say Y if you are unsure. + +config PREEMPT_RCU + bool "Preemptible RCU" + help + This option reduces the latency of the kernel by making certain + RCU sections preemptible. Normally RCU code is non-preemptible, if + this option is selected then read-only RCU sections become + preemptible. This helps latency, but may expose bugs due to + now-naive assumptions about each RCU read-side critical section + remaining on a given CPU through its execution. + + Say N if you are unsure. + +endchoice + +config RCU_STATS + bool "/proc stats for preemptible RCU read-side critical sections" + depends on PREEMPT_RCU + default y + help + This option provides /proc stats to provide debugging info for + the preemptible realtime RCU implementation. + + Say Y here if you want to see RCU stats in /proc + Say N if you are unsure. Index: linux/kernel/Makefile =================================================================== --- linux.orig/kernel/Makefile +++ linux/kernel/Makefile @@ -6,7 +6,7 @@ obj-y = sched.o fork.o exec_domain.o exit.o itimer.o time.o softirq.o resource.o \ sysctl.o capability.o ptrace.o timer.o user.o user_namespace.o \ signal.o sys.o kmod.o workqueue.o pid.o \ - rcupdate.o rcuclassic.o extable.o params.o posix-timers.o \ + extable.o params.o posix-timers.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o \ utsname.o @@ -52,6 +52,8 @@ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softl obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o +obj-$(CONFIG_CLASSIC_RCU) += rcupdate.o rcuclassic.o +obj-$(CONFIG_PREEMPT_RCU) += rcupdate.o rcupreempt.o obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o Index: linux/kernel/rcupreempt.c =================================================================== --- /dev/null +++ linux/kernel/rcupreempt.c @@ -0,0 +1,464 @@ +/* + * Read-Copy Update mechanism for mutual exclusion, realtime implementation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2001 + * + * Authors: Paul E. McKenney + * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar + * for pushing me away from locks and towards counters. + * + * Papers: http://www.rdrop.com/users/paulmck/RCU + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU/ *.txt + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * PREEMPT_RCU data structures. + */ + +struct rcu_data { + spinlock_t lock; + long completed; /* Number of last completed batch. */ + struct tasklet_struct rcu_tasklet; + struct rcu_head *nextlist; + struct rcu_head **nexttail; + struct rcu_head *waitlist; + struct rcu_head **waittail; + struct rcu_head *donelist; + struct rcu_head **donetail; +#ifdef CONFIG_RCU_STATS + long n_next_length; + long n_next_add; + long n_wait_length; + long n_wait_add; + long n_done_length; + long n_done_add; + long n_done_remove; + atomic_t n_done_invoked; + long n_rcu_check_callbacks; + atomic_t n_rcu_try_flip1; + long n_rcu_try_flip2; + long n_rcu_try_flip3; + atomic_t n_rcu_try_flip_e1; + long n_rcu_try_flip_e2; + long n_rcu_try_flip_e3; +#endif /* #ifdef CONFIG_RCU_STATS */ +}; +struct rcu_ctrlblk { + spinlock_t fliplock; + long completed; /* Number of last completed batch. */ +}; +static struct rcu_data rcu_data; +static struct rcu_ctrlblk rcu_ctrlblk = { + .fliplock = SPIN_LOCK_UNLOCKED, + .completed = 0, +}; +static DEFINE_PER_CPU(atomic_t [2], rcu_flipctr) = + { ATOMIC_INIT(0), ATOMIC_INIT(0) }; + +/* + * Return the number of RCU batches processed thus far. Useful + * for debug and statistics. + */ +long rcu_batches_completed(void) +{ + return rcu_ctrlblk.completed; +} + +void __rcu_read_lock(void) +{ + int flipctr; + unsigned long oldirq; + + local_irq_save(oldirq); + + if (current->rcu_read_lock_nesting++ == 0) { + + /* + * Outermost nesting of rcu_read_lock(), so atomically + * increment the current counter for the current CPU. + */ + + flipctr = rcu_ctrlblk.completed & 0x1; + smp_read_barrier_depends(); + current->rcu_flipctr1 = &(__get_cpu_var(rcu_flipctr)[flipctr]); + /* Can optimize to non-atomic on fastpath, but start simple. */ + atomic_inc(current->rcu_flipctr1); + smp_mb__after_atomic_inc(); /* might optimize out... */ + if (unlikely(flipctr != (rcu_ctrlblk.completed & 0x1))) { + + /* + * We raced with grace-period processing (flip). + * Although we cannot be preempted here, there + * could be interrupts, ECC errors and the like, + * so just nail down both sides of the rcu_flipctr + * array for the duration of our RCU read-side + * critical section, preventing a second flip + * from racing with us. At some point, it would + * be safe to decrement one of the counters, but + * we have no way of knowing when that would be. + * So just decrement them both in rcu_read_unlock(). + */ + + current->rcu_flipctr2 = + &(__get_cpu_var(rcu_flipctr)[!flipctr]); + /* Can again optimize to non-atomic on fastpath. */ + atomic_inc(current->rcu_flipctr2); + smp_mb__after_atomic_inc(); /* might optimize out... */ + } + } + local_irq_restore(oldirq); +} + +void __rcu_read_unlock(void) +{ + unsigned long oldirq; + + local_irq_save(oldirq); + if (--current->rcu_read_lock_nesting == 0) { + + /* + * Just atomically decrement whatever we incremented. + * Might later want to awaken some task waiting for the + * grace period to complete, but keep it simple for the + * moment. + */ + + smp_mb__before_atomic_dec(); + atomic_dec(current->rcu_flipctr1); + current->rcu_flipctr1 = NULL; + if (unlikely(current->rcu_flipctr2 != NULL)) { + atomic_dec(current->rcu_flipctr2); + current->rcu_flipctr2 = NULL; + } + } + + local_irq_restore(oldirq); +} + +static void __rcu_advance_callbacks(void) +{ + + if (rcu_data.completed != rcu_ctrlblk.completed) { + if (rcu_data.waitlist != NULL) { + *rcu_data.donetail = rcu_data.waitlist; + rcu_data.donetail = rcu_data.waittail; +#ifdef CONFIG_RCU_STATS + rcu_data.n_done_length += rcu_data.n_wait_length; + rcu_data.n_done_add += rcu_data.n_wait_length; + rcu_data.n_wait_length = 0; +#endif /* #ifdef CONFIG_RCU_STATS */ + } + if (rcu_data.nextlist != NULL) { + rcu_data.waitlist = rcu_data.nextlist; + rcu_data.waittail = rcu_data.nexttail; + rcu_data.nextlist = NULL; + rcu_data.nexttail = &rcu_data.nextlist; +#ifdef CONFIG_RCU_STATS + rcu_data.n_wait_length += rcu_data.n_next_length; + rcu_data.n_wait_add += rcu_data.n_next_length; + rcu_data.n_next_length = 0; +#endif /* #ifdef CONFIG_RCU_STATS */ + } else { + rcu_data.waitlist = NULL; + rcu_data.waittail = &rcu_data.waitlist; + } + rcu_data.completed = rcu_ctrlblk.completed; + } +} + +/* + * Attempt a single flip of the counters. Remember, a single flip does + * -not- constitute a grace period. Instead, the interval between + * a pair of consecutive flips is a grace period. + * + * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation + * on a large SMP, they might want to use a hierarchical organization of + * the per-CPU-counter pairs. + */ +static void rcu_try_flip(void) +{ + int cpu; + long flipctr; + unsigned long oldirq; + + flipctr = rcu_ctrlblk.completed; +#ifdef CONFIG_RCU_STATS + atomic_inc(&rcu_data.n_rcu_try_flip1); +#endif /* #ifdef CONFIG_RCU_STATS */ + if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, oldirq))) { +#ifdef CONFIG_RCU_STATS + atomic_inc(&rcu_data.n_rcu_try_flip_e1); +#endif /* #ifdef CONFIG_RCU_STATS */ + return; + } + if (unlikely(flipctr != rcu_ctrlblk.completed)) { + + /* Our work is done! ;-) */ + +#ifdef CONFIG_RCU_STATS + rcu_data.n_rcu_try_flip_e2++; +#endif /* #ifdef CONFIG_RCU_STATS */ + spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, oldirq); + return; + } + flipctr &= 0x1; + + /* + * Check for completion of all RCU read-side critical sections + * that started prior to the previous flip. + */ + +#ifdef CONFIG_RCU_STATS + rcu_data.n_rcu_try_flip2++; +#endif /* #ifdef CONFIG_RCU_STATS */ + for_each_possible_cpu(cpu) { + if (atomic_read(&per_cpu(rcu_flipctr, cpu)[!flipctr]) != 0) { +#ifdef CONFIG_RCU_STATS + rcu_data.n_rcu_try_flip_e3++; +#endif /* #ifdef CONFIG_RCU_STATS */ + spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, oldirq); + return; + } + } + + /* Do the flip. */ + + smp_mb(); + rcu_ctrlblk.completed++; + +#ifdef CONFIG_RCU_STATS + rcu_data.n_rcu_try_flip3++; +#endif /* #ifdef CONFIG_RCU_STATS */ + spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, oldirq); +} + +void rcu_check_callbacks(int cpu, int user) +{ + unsigned long oldirq; + + if (rcu_ctrlblk.completed == rcu_data.completed) { + rcu_try_flip(); + if (rcu_ctrlblk.completed == rcu_data.completed) { + return; + } + } + spin_lock_irqsave(&rcu_data.lock, oldirq); +#ifdef CONFIG_RCU_STATS + rcu_data.n_rcu_check_callbacks++; +#endif /* #ifdef CONFIG_RCU_STATS */ + __rcu_advance_callbacks(); + if (rcu_data.donelist == NULL) { + spin_unlock_irqrestore(&rcu_data.lock, oldirq); + } else { + spin_unlock_irqrestore(&rcu_data.lock, oldirq); + tasklet_schedule(&rcu_data.rcu_tasklet); + } +} + +static void rcu_process_callbacks(unsigned long data) +{ + unsigned long flags; + struct rcu_head *next, *list; + + spin_lock_irqsave(&rcu_data.lock, flags); + list = rcu_data.donelist; + if (list == NULL) { + spin_unlock_irqrestore(&rcu_data.lock, flags); + return; + } + rcu_data.donelist = NULL; + rcu_data.donetail = &rcu_data.donelist; +#ifdef CONFIG_RCU_STATS + rcu_data.n_done_remove += rcu_data.n_done_length; + rcu_data.n_done_length = 0; +#endif /* #ifdef CONFIG_RCU_STATS */ + spin_unlock_irqrestore(&rcu_data.lock, flags); + while (list) { + next = list->next; + list->func(list); + list = next; +#ifdef CONFIG_RCU_STATS + atomic_inc(&rcu_data.n_done_invoked); +#endif /* #ifdef CONFIG_RCU_STATS */ + } +} + +void fastcall call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) +{ + unsigned long flags; + + head->func = func; + head->next = NULL; + spin_lock_irqsave(&rcu_data.lock, flags); + __rcu_advance_callbacks(); + *rcu_data.nexttail = head; + rcu_data.nexttail = &head->next; +#ifdef CONFIG_RCU_STATS + rcu_data.n_next_add++; + rcu_data.n_next_length++; +#endif /* #ifdef CONFIG_RCU_STATS */ + spin_unlock_irqrestore(&rcu_data.lock, flags); +} + +/* + * Crude hack, reduces but does not eliminate possibility of failure. + * Needs to wait for all CPUs to pass through a -voluntary- context + * switch to eliminate possibility of failure. (Maybe just crank + * priority down...) + */ +void __synchronize_sched(void) +{ + cpumask_t oldmask; + int cpu; + + if (sched_getaffinity(0, &oldmask) < 0) { + oldmask = cpu_possible_map; + } + for_each_online_cpu(cpu) { + sched_setaffinity(0, cpumask_of_cpu(cpu)); + schedule(); + } + sched_setaffinity(0, oldmask); +} + +int rcu_pending(int cpu) +{ + return (rcu_data.donelist != NULL || + rcu_data.waitlist != NULL || + rcu_data.nextlist != NULL); +} + +void __init __rcu_init(void) +{ +/*&&&&*/printk("WARNING: experimental RCU implementation.\n"); + spin_lock_init(&rcu_data.lock); + rcu_data.completed = 0; + rcu_data.nextlist = NULL; + rcu_data.nexttail = &rcu_data.nextlist; + rcu_data.waitlist = NULL; + rcu_data.waittail = &rcu_data.waitlist; + rcu_data.donelist = NULL; + rcu_data.donetail = &rcu_data.donelist; + tasklet_init(&rcu_data.rcu_tasklet, rcu_process_callbacks, 0UL); +} + +/* + * Deprecated, use synchronize_rcu() or synchronize_sched() instead. + */ +void synchronize_kernel(void) +{ + synchronize_rcu(); +} + +#ifdef CONFIG_RCU_STATS +int rcu_read_proc_data(char *page) +{ + return sprintf(page, + "ggp=%ld lgp=%ld rcc=%ld\n" + "na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n" + "rtf1=%d rtf2=%ld rtf3=%ld rtfe1=%d rtfe2=%ld rtfe3=%ld\n", + + rcu_ctrlblk.completed, + rcu_data.completed, + rcu_data.n_rcu_check_callbacks, + + rcu_data.n_next_add, + rcu_data.n_next_length, + rcu_data.n_wait_add, + rcu_data.n_wait_length, + rcu_data.n_done_add, + rcu_data.n_done_length, + rcu_data.n_done_remove, + atomic_read(&rcu_data.n_done_invoked), + + atomic_read(&rcu_data.n_rcu_try_flip1), + rcu_data.n_rcu_try_flip2, + rcu_data.n_rcu_try_flip3, + atomic_read(&rcu_data.n_rcu_try_flip_e1), + rcu_data.n_rcu_try_flip_e2, + rcu_data.n_rcu_try_flip_e3); +} + +int rcu_read_proc_gp_data(char *page) +{ + long oldgp = rcu_ctrlblk.completed; + + synchronize_rcu(); + return sprintf(page, "oldggp=%ld newggp=%ld\n", + oldgp, rcu_ctrlblk.completed); +} + +int rcu_read_proc_ptrs_data(char *page) +{ + return sprintf(page, + "nl=%p/%p nt=%p\n wl=%p/%p wt=%p dl=%p/%p dt=%p\n", + &rcu_data.nextlist, rcu_data.nextlist, rcu_data.nexttail, + &rcu_data.waitlist, rcu_data.waitlist, rcu_data.waittail, + &rcu_data.donelist, rcu_data.donelist, rcu_data.donetail + ); +} + +int rcu_read_proc_ctrs_data(char *page) +{ + int cnt = 0; + int cpu; + int f = rcu_data.completed & 0x1; + + cnt += sprintf(&page[cnt], "CPU last cur\n"); + for_each_online_cpu(cpu) { + cnt += sprintf(&page[cnt], "%3d %4d %3d\n", + cpu, + atomic_read(&per_cpu(rcu_flipctr, cpu)[!f]), + atomic_read(&per_cpu(rcu_flipctr, cpu)[f])); + } + cnt += sprintf(&page[cnt], "ggp = %ld\n", rcu_data.completed); + return (cnt); +} + +#endif /* #ifdef CONFIG_RCU_STATS */ + +EXPORT_SYMBOL_GPL(call_rcu); +EXPORT_SYMBOL_GPL(rcu_batches_completed); +EXPORT_SYMBOL_GPL(__synchronize_sched); +EXPORT_SYMBOL_GPL(__rcu_read_lock); +EXPORT_SYMBOL_GPL(__rcu_read_unlock); + patches/fix-emergency-reboot.patch0000664000077200007720000000302410655544576016614 0ustar mingomingoSubject: [patch] call reboot notifier list when doing an emergency reboot From: Ingo Molnar my laptop does not reboot unless the shutdown notifiers are called first. So the following command, which i use as a fast way to reboot into a new kernel: echo b > /proc/sysrq-trigger just hangs indefinitely after the kernel prints "System rebooting". the thing is, that the kernel is actually reschedulable in this stage, so we could as well process the reboot_notifier_list. (furthermore, on -rt kernels this place is preemptable even during SysRq-b) So just process the reboot notifier list if we are preemptable. This will shut disk caches and chipsets off. Signed-off-by: Ingo Molnar --- kernel/sys.c | 10 ++++++++++ 1 file changed, 10 insertions(+) Index: linux-rt-rebase.q/kernel/sys.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sys.c +++ linux-rt-rebase.q/kernel/sys.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -796,6 +797,15 @@ out_unlock: */ void emergency_restart(void) { + /* + * Call the notifier chain if we are not in an + * atomic context: + */ +#ifdef CONFIG_PREEMPT + if (!in_atomic() && !irqs_disabled()) + blocking_notifier_call_chain(&reboot_notifier_list, + SYS_RESTART, NULL); +#endif machine_emergency_restart(); } EXPORT_SYMBOL_GPL(emergency_restart); patches/rcu-preempt-hotplug-hackaround.patch0000664000077200007720000000067410655544577020633 0ustar mingomingo--- kernel/sched.c | 1 - 1 file changed, 1 deletion(-) Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -6928,7 +6928,6 @@ static void detach_destroy_domains(const for_each_cpu_mask(i, *cpu_map) cpu_attach_domain(NULL, i); - synchronize_sched(); arch_destroy_sched_domains(cpu_map); } patches/lockdep-prettify.patch0000664000077200007720000000361610655544572016052 0ustar mingomingoSubject: [patch] lockdep: prettify output From: Ingo Molnar recent changes to the lockdep code made some of the printouts uglier - mend them. Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) Index: linux/kernel/lockdep.c =================================================================== --- linux.orig/kernel/lockdep.c +++ linux/kernel/lockdep.c @@ -577,7 +577,7 @@ static void print_lock_dependencies(stru static void print_kernel_version(void) { - printk("%s %.*s\n", init_utsname()->release, + printk("[ %s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); } @@ -3117,13 +3117,13 @@ void __init lockdep_info(void) { printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); - printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); - printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); - printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); - printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); - printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); - printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); - printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); + printk("... MAX_LOCKDEP_SUBCLASSES: %6lu\n", MAX_LOCKDEP_SUBCLASSES); + printk("... MAX_LOCK_DEPTH: %6lu\n", MAX_LOCK_DEPTH); + printk("... MAX_LOCKDEP_KEYS: %6lu\n", MAX_LOCKDEP_KEYS); + printk("... CLASSHASH_SIZE: %6lu\n", CLASSHASH_SIZE); + printk("... MAX_LOCKDEP_ENTRIES: %6lu\n", MAX_LOCKDEP_ENTRIES); + printk("... MAX_LOCKDEP_CHAINS: %6lu\n", MAX_LOCKDEP_CHAINS); + printk("... CHAINHASH_SIZE: %6lu\n", CHAINHASH_SIZE); printk(" memory used by lock dependency info: %lu kB\n", (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + patches/preempt-realtime-supress-nohz-softirq-warning.patch0000664000077200007720000000114210655544575023642 0ustar mingomingo--- kernel/time/tick-sched.c | 3 +++ 1 file changed, 3 insertions(+) Index: linux-rt-rebase.q/kernel/time/tick-sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/time/tick-sched.c +++ linux-rt-rebase.q/kernel/time/tick-sched.c @@ -169,6 +169,8 @@ void tick_nohz_stop_sched_tick(void) goto end; cpu = smp_processor_id(); + +#ifndef CONFIG_PREEMPT_RT if (unlikely(local_softirq_pending())) { static int ratelimit; @@ -178,6 +180,7 @@ void tick_nohz_stop_sched_tick(void) ratelimit++; } } +#endif now = ktime_get(); /* patches/rt-mutex-mips.patch0000664000077200007720000001500210655544573015311 0ustar mingomingo--- arch/mips/Kconfig | 15 +++++++++++---- arch/mips/kernel/Makefile | 4 +++- include/asm-mips/atomic.h | 26 +++++++++++++++++++++----- include/asm-mips/semaphore.h | 30 +++++++++++++++++++++--------- 4 files changed, 56 insertions(+), 19 deletions(-) Index: linux-rt-rebase.q/arch/mips/Kconfig =================================================================== --- linux-rt-rebase.q.orig/arch/mips/Kconfig +++ linux-rt-rebase.q/arch/mips/Kconfig @@ -230,6 +230,7 @@ config MIPS_SIM config MARKEINS bool "NEC EMMA2RH Mark-eins" select DMA_NONCOHERENT + select NO_SPINLOCK select HW_HAS_PCI select IRQ_CPU select SWAP_IO_SPACE @@ -610,10 +611,17 @@ endmenu config RWSEM_GENERIC_SPINLOCK bool + depends on !PREEMPT_RT default y config RWSEM_XCHGADD_ALGORITHM bool + depends on !PREEMPT_RT + +config ASM_SEMAPHORES + bool +# depends on !PREEMPT_RT + default y config ARCH_HAS_ILOG2_U32 bool @@ -676,6 +684,9 @@ config DMA_NONCOHERENT config DMA_NEED_PCI_MAP_STATE bool +config NO_SPINLOCK + bool + config EARLY_PRINTK bool "Early printk" if EMBEDDED && DEBUG_KERNEL depends on SYS_HAS_EARLY_PRINTK @@ -1746,10 +1757,6 @@ config SECCOMP endmenu -config RWSEM_GENERIC_SPINLOCK - bool - default y - config LOCKDEP_SUPPORT bool default y Index: linux-rt-rebase.q/arch/mips/kernel/Makefile =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/Makefile +++ linux-rt-rebase.q/arch/mips/kernel/Makefile @@ -5,7 +5,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ - ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ + ptrace.o reset.o setup.o signal.o syscall.o \ time.o topology.o traps.o unaligned.o binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ @@ -17,6 +17,8 @@ obj-$(CONFIG_MODULES) += mips_ksyms.o m obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_ASM_SEMAPHORES) += semaphore.o + obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o Index: linux-rt-rebase.q/include/asm-mips/atomic.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/atomic.h +++ linux-rt-rebase.q/include/asm-mips/atomic.h @@ -171,7 +171,9 @@ static __inline__ int atomic_add_return( : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); - } else { + } +#if !defined(CONFIG_NO_SPINLOCK) && !defined(CONFIG_PREEMPT_RT) + else { unsigned long flags; raw_local_irq_save(flags); @@ -180,6 +182,7 @@ static __inline__ int atomic_add_return( v->counter = result; raw_local_irq_restore(flags); } +#endif smp_llsc_mb(); @@ -223,7 +226,9 @@ static __inline__ int atomic_sub_return( : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); - } else { + } +#if !defined(CONFIG_NO_SPINLOCK) && !defined(CONFIG_PREEMPT_RT) + else { unsigned long flags; raw_local_irq_save(flags); @@ -232,6 +237,7 @@ static __inline__ int atomic_sub_return( v->counter = result; raw_local_irq_restore(flags); } +#endif smp_llsc_mb(); @@ -291,7 +297,9 @@ static __inline__ int atomic_sub_if_posi : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); - } else { + } +#if !defined(CONFIG_NO_SPINLOCK) && !defined(CONFIG_PREEMPT_RT) + else { unsigned long flags; raw_local_irq_save(flags); @@ -301,6 +309,7 @@ static __inline__ int atomic_sub_if_posi v->counter = result; raw_local_irq_restore(flags); } +#endif smp_llsc_mb(); @@ -552,7 +561,9 @@ static __inline__ long atomic64_add_retu : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); - } else { + } +#if !defined(CONFIG_NO_SPINLOCK) && !defined(CONFIG_PREEMPT_RT) + else { unsigned long flags; raw_local_irq_save(flags); @@ -561,6 +572,8 @@ static __inline__ long atomic64_add_retu v->counter = result; raw_local_irq_restore(flags); } +#endif +#endif smp_llsc_mb(); @@ -604,7 +617,9 @@ static __inline__ long atomic64_sub_retu : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); - } else { + } +#if !defined(CONFIG_NO_SPINLOCK) && !defined(CONFIG_PREEMPT_RT) + else { unsigned long flags; raw_local_irq_save(flags); @@ -682,6 +697,7 @@ static __inline__ long atomic64_sub_if_p v->counter = result; raw_local_irq_restore(flags); } +#endif smp_llsc_mb(); Index: linux-rt-rebase.q/include/asm-mips/semaphore.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-mips/semaphore.h +++ linux-rt-rebase.q/include/asm-mips/semaphore.h @@ -24,12 +24,20 @@ #ifdef __KERNEL__ -#include -#include #include #include -struct semaphore { +/* + * On !PREEMPT_RT all semaphores are compat: + */ +#ifndef CONFIG_PREEMPT_RT +# define compat_semaphore semaphore +#endif + +#include +#include + +struct compat_semaphore { /* * Note that any negative value of count is equivalent to 0, * but additionally indicates that some process(es) might be @@ -79,31 +87,35 @@ static inline void down(struct semaphore * Try to get the semaphore, take the slow path if we fail. */ if (unlikely(atomic_dec_return(&sem->count) < 0)) - __down(sem); + __compat_down(sem); } -static inline int down_interruptible(struct semaphore * sem) +static inline int compat_down_interruptible(struct compat_semaphore * sem) { int ret = 0; might_sleep(); if (unlikely(atomic_dec_return(&sem->count) < 0)) - ret = __down_interruptible(sem); + ret = __compat_down_interruptible(sem); return ret; } -static inline int down_trylock(struct semaphore * sem) +static inline int compat_down_trylock(struct compat_semaphore * sem) { return atomic_dec_if_positive(&sem->count) < 0; } -static inline void up(struct semaphore * sem) +static inline void compat_up(struct compat_semaphore * sem) { if (unlikely(atomic_inc_return(&sem->count) <= 0)) - __up(sem); + __compat_up(sem); } +#define compat_sema_count(sem) atomic_read(&(sem)->count) + +#include + #endif /* __KERNEL__ */ #endif /* __ASM_SEMAPHORE_H */ patches/highmem_rewrite.patch0000664000077200007720000003770010655544576015751 0ustar mingomingoSubject: mm: remove kmap_lock Eradicate global locks. - kmap_lock is removed by extensive use of atomic_t and a new flush scheme. - pool_lock is removed by using the pkmap index for the page_address_maps and modifying set_page_address to only allow NULL<->virt transitions. Signed-off-by: Peter Zijlstra --- include/linux/mm.h | 32 ++- mm/highmem.c | 433 ++++++++++++++++++++++++++++++----------------------- 2 files changed, 276 insertions(+), 189 deletions(-) Index: linux-rt-rebase.q/include/linux/mm.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/mm.h +++ linux-rt-rebase.q/include/linux/mm.h @@ -592,23 +592,39 @@ static __always_inline void *lowmem_page #endif #if defined(WANT_PAGE_VIRTUAL) -#define page_address(page) ((page)->virtual) -#define set_page_address(page, address) \ - do { \ - (page)->virtual = (address); \ - } while(0) -#define page_address_init() do { } while(0) +/* + * wrap page->virtual so it is safe to set/read locklessly + */ +#define page_address(page) \ + ({ typeof((page)->virtual) v = (page)->virtual; \ + smp_read_barrier_depends(); \ + v; }) + +static inline int set_page_address(struct page *page, void *address) +{ + if (address) + return cmpxchg(&page->virtual, NULL, address) == NULL; + else { + /* + * cmpxchg is a bit abused because it is not guaranteed + * safe wrt direct assignment on all platforms. + */ + void *virt = page->virtual; + return cmpxchg(&page->vitrual, virt, NULL) == virt; + } +} +void page_address_init(void); #endif #if defined(HASHED_PAGE_VIRTUAL) void *page_address(struct page *page); -void set_page_address(struct page *page, void *virtual); +int set_page_address(struct page *page, void *virtual); void page_address_init(void); #endif #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) #define page_address(page) lowmem_page_address(page) -#define set_page_address(page, address) do { } while(0) +#define set_page_address(page, address) (0) #define page_address_init() do { } while(0) #endif Index: linux-rt-rebase.q/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/mm/highmem.c +++ linux-rt-rebase.q/mm/highmem.c @@ -14,6 +14,11 @@ * based on Linus' idea. * * Copyright (C) 1999 Ingo Molnar + * + * Largely rewritten to get rid of all global locks + * + * Copyright (C) 2006 Red Hat, Inc., Peter Zijlstra + * */ #include @@ -27,18 +32,14 @@ #include #include #include + #include +#include -/* - * Virtual_count is not a pure "count". - * 0 means that it is not mapped, and has not been mapped - * since a TLB flush - it is usable. - * 1 means that there are no users, but it has been mapped - * since the last TLB flush - so we can't use it. - * n means that there are (n-1) current users of it. - */ #ifdef CONFIG_HIGHMEM +static int __set_page_address(struct page *page, void *virtual, int pos); + unsigned long totalhigh_pages __read_mostly; unsigned int nr_free_highpages (void) @@ -58,164 +59,208 @@ unsigned int nr_free_highpages (void) return pages; } -static int pkmap_count[LAST_PKMAP]; -static unsigned int last_pkmap_nr; -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); +/* + * count is not a pure "count". + * 0 means its owned exclusively by someone + * 1 means its free for use - either mapped or not. + * n means that there are (n-1) current users of it. + */ +static atomic_t pkmap_count[LAST_PKMAP]; +static atomic_t pkmap_hand; pte_t * pkmap_page_table; static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); -static void flush_all_zero_pkmaps(void) +/* + * Try to free a given kmap slot. + * + * Returns: + * -1 - in use + * 0 - free, no TLB flush needed + * 1 - free, needs TLB flush + */ +static int pkmap_try_free(int pos) { - int i; - - flush_cache_kmaps(); + if (atomic_cmpxchg(&pkmap_count[pos], 1, 0) != 1) + return -1; - for (i = 0; i < LAST_PKMAP; i++) { - struct page *page; + /* + * TODO: add a young bit to make it CLOCK + */ + if (!pte_none(pkmap_page_table[pos])) { + struct page *page = pte_page(pkmap_page_table[pos]); + unsigned long addr = PKMAP_ADDR(pos); + pte_t *ptep = &pkmap_page_table[pos]; + + VM_BUG_ON(addr != (unsigned long)page_address(page)); + + if (!__set_page_address(page, NULL, pos)) + BUG(); + flush_kernel_dcache_page(page); + pte_clear(&init_mm, addr, ptep); - /* - * zero means we don't have anything to do, - * >1 means that it is still in use. Only - * a count of 1 means that it is free but - * needs to be unmapped - */ - if (pkmap_count[i] != 1) - continue; - pkmap_count[i] = 0; + return 1; + } - /* sanity check */ - BUG_ON(pte_none(pkmap_page_table[i])); + return 0; +} - /* - * Don't need an atomic fetch-and-clear op here; - * no-one has the page mapped, and cannot get at - * its virtual address (and hence PTE) without first - * getting the kmap_lock (which is held here). - * So no dangers, even with speculative execution. - */ - page = pte_page(pkmap_page_table[i]); - pte_clear(&init_mm, (unsigned long)page_address(page), - &pkmap_page_table[i]); +static inline void pkmap_put(atomic_t *counter) +{ + switch (atomic_dec_return(counter)) { + case 0: + BUG(); - set_page_address(page, NULL); + case 1: + wake_up(&pkmap_map_wait); } - flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); } -static inline unsigned long map_new_virtual(struct page *page) +#define TLB_BATCH 32 + +static int pkmap_get_free(void) { - unsigned long vaddr; - int count; + int i, pos, flush; + DECLARE_WAITQUEUE(wait, current); -start: - count = LAST_PKMAP; - /* Find an empty entry */ - for (;;) { - last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; - if (!last_pkmap_nr) { - flush_all_zero_pkmaps(); - count = LAST_PKMAP; - } - if (!pkmap_count[last_pkmap_nr]) - break; /* Found a usable entry */ - if (--count) - continue; +restart: + for (i = 0; i < LAST_PKMAP; i++) { + pos = atomic_inc_return(&pkmap_hand) % LAST_PKMAP; + flush = pkmap_try_free(pos); + if (flush >= 0) + goto got_one; + } + + /* + * wait for somebody else to unmap their entries + */ + __set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&pkmap_map_wait, &wait); + schedule(); + remove_wait_queue(&pkmap_map_wait, &wait); + + goto restart; + +got_one: + if (flush) { +#if 0 + flush_tlb_kernel_range(PKMAP_ADDR(pos), PKMAP_ADDR(pos+1)); +#else + int pos2 = (pos + 1) % LAST_PKMAP; + int nr; + int entries[TLB_BATCH]; /* - * Sleep for somebody else to unmap their entries + * For those architectures that cannot help but flush the + * whole TLB, flush some more entries to make it worthwhile. + * Scan ahead of the hand to minimise search distances. */ - { - DECLARE_WAITQUEUE(wait, current); + for (i = 0, nr = 0; i < LAST_PKMAP && nr < TLB_BATCH; + i++, pos2 = (pos2 + 1) % LAST_PKMAP) { - __set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&pkmap_map_wait, &wait); - spin_unlock(&kmap_lock); - schedule(); - remove_wait_queue(&pkmap_map_wait, &wait); - spin_lock(&kmap_lock); - - /* Somebody else might have mapped it while we slept */ - if (page_address(page)) - return (unsigned long)page_address(page); + flush = pkmap_try_free(pos2); + if (flush < 0) + continue; + + if (!flush) { + atomic_t *counter = &pkmap_count[pos2]; + VM_BUG_ON(atomic_read(counter) != 0); + atomic_set(counter, 2); + pkmap_put(counter); + } else + entries[nr++] = pos2; + } + flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); - /* Re-start */ - goto start; + for (i = 0; i < nr; i++) { + atomic_t *counter = &pkmap_count[entries[i]]; + VM_BUG_ON(atomic_read(counter) != 0); + atomic_set(counter, 2); + pkmap_put(counter); } +#endif } - vaddr = PKMAP_ADDR(last_pkmap_nr); - set_pte_at(&init_mm, vaddr, - &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); + return pos; +} + +static unsigned long pkmap_insert(struct page *page) +{ + int pos = pkmap_get_free(); + unsigned long vaddr = PKMAP_ADDR(pos); + pte_t *ptep = &pkmap_page_table[pos]; + pte_t entry = mk_pte(page, kmap_prot); + atomic_t *counter = &pkmap_count[pos]; + + VM_BUG_ON(atomic_read(counter) != 0); - pkmap_count[last_pkmap_nr] = 1; - set_page_address(page, (void *)vaddr); + set_pte_at(&init_mm, vaddr, ptep, entry); + if (unlikely(!__set_page_address(page, (void *)vaddr, pos))) { + /* + * concurrent pkmap_inserts for this page - + * the other won the race, release this entry. + * + * we can still clear the pte without a tlb flush since + * it couldn't have been used yet. + */ + pte_clear(&init_mm, vaddr, ptep); + VM_BUG_ON(atomic_read(counter) != 0); + atomic_set(counter, 2); + pkmap_put(counter); + vaddr = 0; + } else + atomic_set(counter, 2); return vaddr; } -void fastcall *kmap_high(struct page *page) +fastcall void *kmap_high(struct page *page) { unsigned long vaddr; - - /* - * For highmem pages, we can't trust "virtual" until - * after we have the lock. - * - * We cannot call this from interrupts, as it may block - */ - spin_lock(&kmap_lock); +again: vaddr = (unsigned long)page_address(page); + if (vaddr) { + atomic_t *counter = &pkmap_count[PKMAP_NR(vaddr)]; + if (atomic_inc_not_zero(counter)) { + /* + * atomic_inc_not_zero implies a (memory) barrier on success + * so page address will be reloaded. + */ + unsigned long vaddr2 = (unsigned long)page_address(page); + if (likely(vaddr == vaddr2)) + return (void *)vaddr; + + /* + * Oops, we got someone else. + * + * This can happen if we get preempted after + * page_address() and before atomic_inc_not_zero() + * and during that preemption this slot is freed and + * reused. + */ + pkmap_put(counter); + goto again; + } + } + + vaddr = pkmap_insert(page); if (!vaddr) - vaddr = map_new_virtual(page); - pkmap_count[PKMAP_NR(vaddr)]++; - BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); - spin_unlock(&kmap_lock); - return (void*) vaddr; + goto again; + + return (void *)vaddr; } EXPORT_SYMBOL(kmap_high); -void fastcall kunmap_high(struct page *page) +fastcall void kunmap_high(struct page *page) { - unsigned long vaddr; - unsigned long nr; - int need_wakeup; - - spin_lock(&kmap_lock); - vaddr = (unsigned long)page_address(page); + unsigned long vaddr = (unsigned long)page_address(page); BUG_ON(!vaddr); - nr = PKMAP_NR(vaddr); - - /* - * A count must never go down to zero - * without a TLB flush! - */ - need_wakeup = 0; - switch (--pkmap_count[nr]) { - case 0: - BUG(); - case 1: - /* - * Avoid an unnecessary wake_up() function call. - * The common case is pkmap_count[] == 1, but - * no waiters. - * The tasks queued in the wait-queue are guarded - * by both the lock in the wait-queue-head and by - * the kmap_lock. As the kmap_lock is held here, - * no need for the wait-queue-head's lock. Simply - * test if the queue is empty. - */ - need_wakeup = waitqueue_active(&pkmap_map_wait); - } - spin_unlock(&kmap_lock); - - /* do wake-up, if needed, race-free outside of the spin lock */ - if (need_wakeup) - wake_up(&pkmap_map_wait); + pkmap_put(&pkmap_count[PKMAP_NR(vaddr)]); } EXPORT_SYMBOL(kunmap_high); + #endif #if defined(HASHED_PAGE_VIRTUAL) @@ -223,19 +268,13 @@ EXPORT_SYMBOL(kunmap_high); #define PA_HASH_ORDER 7 /* - * Describes one page->virtual association + * Describes one page->virtual address association. */ -struct page_address_map { +static struct page_address_map { struct page *page; void *virtual; struct list_head list; -}; - -/* - * page_address_map freelist, allocated from page_address_maps. - */ -static struct list_head page_address_pool; /* freelist */ -static spinlock_t pool_lock; /* protects page_address_pool */ +} page_address_maps[LAST_PKMAP]; /* * Hash table bucket @@ -250,91 +289,123 @@ static struct page_address_slot *page_sl return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; } -void *page_address(struct page *page) +static void *__page_address(struct page_address_slot *pas, struct page *page) { - unsigned long flags; - void *ret; - struct page_address_slot *pas; - - if (!PageHighMem(page)) - return lowmem_page_address(page); + void *ret = NULL; - pas = page_slot(page); - ret = NULL; - spin_lock_irqsave(&pas->lock, flags); if (!list_empty(&pas->lh)) { struct page_address_map *pam; list_for_each_entry(pam, &pas->lh, list) { if (pam->page == page) { ret = pam->virtual; - goto done; + break; } } } -done: + + return ret; +} + +void *page_address(struct page *page) +{ + unsigned long flags; + void *ret; + struct page_address_slot *pas; + + if (!PageHighMem(page)) + return lowmem_page_address(page); + + pas = page_slot(page); + spin_lock_irqsave(&pas->lock, flags); + ret = __page_address(pas, page); spin_unlock_irqrestore(&pas->lock, flags); return ret; } EXPORT_SYMBOL(page_address); -void set_page_address(struct page *page, void *virtual) +static int __set_page_address(struct page *page, void *virtual, int pos) { + int ret = 0; unsigned long flags; struct page_address_slot *pas; struct page_address_map *pam; - BUG_ON(!PageHighMem(page)); + VM_BUG_ON(!PageHighMem(page)); + VM_BUG_ON(atomic_read(&pkmap_count[pos]) != 0); + VM_BUG_ON(pos < 0 || pos >= LAST_PKMAP); pas = page_slot(page); - if (virtual) { /* Add */ - BUG_ON(list_empty(&page_address_pool)); + pam = &page_address_maps[pos]; - spin_lock_irqsave(&pool_lock, flags); - pam = list_entry(page_address_pool.next, - struct page_address_map, list); - list_del(&pam->list); - spin_unlock_irqrestore(&pool_lock, flags); - - pam->page = page; - pam->virtual = virtual; - - spin_lock_irqsave(&pas->lock, flags); - list_add_tail(&pam->list, &pas->lh); - spin_unlock_irqrestore(&pas->lock, flags); - } else { /* Remove */ - spin_lock_irqsave(&pas->lock, flags); - list_for_each_entry(pam, &pas->lh, list) { - if (pam->page == page) { - list_del(&pam->list); - spin_unlock_irqrestore(&pas->lock, flags); - spin_lock_irqsave(&pool_lock, flags); - list_add_tail(&pam->list, &page_address_pool); - spin_unlock_irqrestore(&pool_lock, flags); - goto done; - } + spin_lock_irqsave(&pas->lock, flags); + if (virtual) { /* add */ + VM_BUG_ON(!list_empty(&pam->list)); + + if (!__page_address(pas, page)) { + pam->page = page; + pam->virtual = virtual; + list_add_tail(&pam->list, &pas->lh); + ret = 1; + } + } else { /* remove */ + if (!list_empty(&pam->list)) { + list_del_init(&pam->list); + ret = 1; } - spin_unlock_irqrestore(&pas->lock, flags); } -done: - return; + spin_unlock_irqrestore(&pas->lock, flags); + + return ret; } -static struct page_address_map page_address_maps[LAST_PKMAP]; +int set_page_address(struct page *page, void *virtual) +{ + /* + * set_page_address is not supposed to be called when using + * hashed virtual addresses. + */ + BUG(); + return 0; +} -void __init page_address_init(void) +void __init __page_address_init(void) { int i; - INIT_LIST_HEAD(&page_address_pool); for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) - list_add(&page_address_maps[i].list, &page_address_pool); + INIT_LIST_HEAD(&page_address_maps[i].list); + for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { INIT_LIST_HEAD(&page_address_htable[i].lh); spin_lock_init(&page_address_htable[i].lock); } - spin_lock_init(&pool_lock); +} + +#elif defined (CONFIG_HIGHMEM) /* HASHED_PAGE_VIRTUAL */ + +static int __set_page_address(struct page *page, void *virtual, int pos) +{ + return set_page_address(page, virtual); } #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ + +#if defined(CONFIG_HIGHMEM) || defined(HASHED_PAGE_VIRTUAL) + +void __init page_address_init(void) +{ +#ifdef CONFIG_HIGHMEM + int i; + + for (i = 0; i < ARRAY_SIZE(pkmap_count); i++) + atomic_set(&pkmap_count[i], 1); +#endif + +#ifdef HASHED_PAGE_VIRTUAL + __page_address_init(); +#endif +} + +#endif patches/ep93xx-timer-accuracy.patch0000664000077200007720000000320510655544571016624 0ustar mingomingo The ep93xx has a weird timer tick base (983.04 kHz.) This experimental patch tries to increase time of day accuracy by keeping the number of ticks until the next jiffy in a fractional value representation. Signed-off-by: Lennert Buytenhek --- arch/arm/mach-ep93xx/core.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) Index: linux/arch/arm/mach-ep93xx/core.c =================================================================== --- linux.orig/arch/arm/mach-ep93xx/core.c +++ linux/arch/arm/mach-ep93xx/core.c @@ -94,19 +94,32 @@ void __init ep93xx_map_io(void) * track of lost jiffies. */ static unsigned int last_jiffy_time; +static unsigned int next_jiffy_time; +static unsigned int accumulator; -#define TIMER4_TICKS_PER_JIFFY ((CLOCK_TICK_RATE + (HZ/2)) / HZ) +#define TIMER4_TICKS_PER_JIFFY (983040 / HZ) +#define TIMER4_TICKS_MOD_JIFFY (983040 % HZ) + +static int after_eq(unsigned long a, unsigned long b) +{ + return ((signed long)(a - b)) >= 0; +} static int ep93xx_timer_interrupt(int irq, void *dev_id) { write_seqlock(&xtime_lock); __raw_writel(1, EP93XX_TIMER1_CLEAR); - while ((signed long) - (__raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time) - >= TIMER4_TICKS_PER_JIFFY) { - last_jiffy_time += TIMER4_TICKS_PER_JIFFY; + while (after_eq(__raw_readl(EP93XX_TIMER4_VALUE_LOW), next_jiffy_time)) { timer_tick(); + + last_jiffy_time = next_jiffy_time; + next_jiffy_time += TIMER4_TICKS_PER_JIFFY; + accumulator += TIMER4_TICKS_MOD_JIFFY; + if (accumulator >= HZ) { + next_jiffy_time++; + accumulator -= HZ; + } } write_sequnlock(&xtime_lock); patches/latency-tracer-optimize-a-bit.patch0000664000077200007720000000124610655544572020327 0ustar mingomingo--- kernel/latency_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -92,7 +92,7 @@ static inline int DEBUG_WARN_ON(int cond #ifdef CONFIG_CRITICAL_IRQSOFF_TIMING # ifdef CONFIG_CRITICAL_PREEMPT_TIMING static DEFINE_PER_CPU(int, trace_cpu_idle); -# define irqs_off_preempt_count() (!__get_cpu_var(trace_cpu_idle) && preempt_count()) +# define irqs_off_preempt_count() (preempt_count() && !__get_cpu_var(trace_cpu_idle)) # else # define irqs_off_preempt_count() 0 # endif patches/replace-bugon-by-warn-on.patch0000664000077200007720000000102310655544571017264 0ustar mingomingo--- arch/i386/mm/highmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/arch/i386/mm/highmem.c =================================================================== --- linux.orig/arch/i386/mm/highmem.c +++ linux/arch/i386/mm/highmem.c @@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); - BUG_ON(!pte_none(*(kmap_pte-idx))); + WARN_ON_ONCE(!pte_none(*(kmap_pte-idx))); if (!PageHighMem(page)) return page_address(page); patches/trace-cpuidle.patch0000664000077200007720000001350610655544572015305 0ustar mingomingoFrom linux-rt-users-owner@vger.kernel.org Sat Jul 14 04:08:19 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.2 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by mail.tglx.de (Postfix) with ESMTP id BB2AB65C292; Sat, 14 Jul 2007 04:08:19 +0200 (CEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758741AbXGNCIT (ORCPT + 1 other); Fri, 13 Jul 2007 22:08:19 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1760025AbXGNCIT (ORCPT ); Fri, 13 Jul 2007 22:08:19 -0400 Received: from rwcrmhc11.comcast.net ([216.148.227.151]:38099 "EHLO rwcrmhc11.comcast.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1758741AbXGNCIR (ORCPT ); Fri, 13 Jul 2007 22:08:17 -0400 Received: from sx.thebigcorporation.com ([69.181.45.228]) by comcast.net (rwcrmhc11) with ESMTP id <20070714020816m1100999cee>; Sat, 14 Jul 2007 02:08:17 +0000 Received: from sx.thebigcorporation.com (localhost.localdomain [127.0.0.1]) by sx.thebigcorporation.com (8.14.1/8.13.8) with ESMTP id l6E28G5M018639; Fri, 13 Jul 2007 19:08:16 -0700 Received: (from sven@localhost) by sx.thebigcorporation.com (8.14.1/8.14.1/Submit) id l6E28FCT018638; Fri, 13 Jul 2007 19:08:15 -0700 X-Authentication-Warning: sx.thebigcorporation.com: sven set sender to sven@thebigcorporation.com using -f Subject: Re: [PATCH -rt 6/6] Compile fix for PREEMPT_TIMING on and IRQSOFF_TIMING off From: Sven-Thorsten Dietrich To: Kevin Hilman Cc: tglx@linutronix.de, mingo@elte.hu, linux-rt-users@vger.kernel.org, linux-kernel@vger.kernel.org In-Reply-To: <46980935.3060509@mvista.com> References: <20070713175214.336577416@mvista.com> <20070713175229.239602308@mvista.com> <46980935.3060509@mvista.com> Content-Type: text/plain Organization: The Big Corporation Date: Fri, 13 Jul 2007 19:08:14 -0700 Message-Id: <1184378894.16207.14.camel@sx.thebigcorporation.com> Mime-Version: 1.0 X-Mailer: Evolution 2.10.2 (2.10.2-3.fc7) Sender: linux-rt-users-owner@vger.kernel.org Precedence: bulk X-Mailing-List: linux-rt-users@vger.kernel.org X-Filter-To: .Kernel.rt-users X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit On Fri, 2007-07-13 at 16:22 -0700, Kevin Hilman wrote: > [Minor update to avoid a compiler warning in the case of DEBUG_KERNEL=n] > The resent patch (v2) had white space damage - Here is a reconstituted version that applies for me on 2.6.22-rt3 Acked-by: Sven-Thorsten Dietrich >From linux-rt-users-owner@vger.kernel.org Fri Jul 13 16:22:34 2007 Return-Path: Received: from sx.thebigcorporation.com ([unix socket]) by sx.thebigcorporation.com (Cyrus v2.3.8-Fedora-RPM-2.3.8-3.fc7) with LMTPA; Fri, 13 Jul 2007 16:22:34 -0700 X-Sieve: CMU Sieve 2.3 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by sx.thebigcorporation.com (8.14.1/8.13.8) with ESMTP id l6DNMXP6017382 for ; Fri, 13 Jul 2007 16:22:33 -0700 Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1759195AbXGMXW1 (ORCPT ); Fri, 13 Jul 2007 19:22:27 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1760623AbXGMXW1 (ORCPT ); Fri, 13 Jul 2007 19:22:27 -0400 Received: from h155.mvista.com ([63.81.120.158]:7301 "EHLO gateway-1237.mvista.com" rhost-flags-OK-FAIL-OK-OK) by vger.kernel.org with ESMTP id S1759161AbXGMXW1 (ORCPT ); Fri, 13 Jul 2007 19:22:27 -0400 Received: from [127.0.0.1] (asshur.mvista.com [10.0.0.11]) by hermes.mvista.com (Postfix) with ESMTP id 69F871DE39; Fri, 13 Jul 2007 16:22:25 -0700 (PDT) Message-ID: <46980935.3060509@mvista.com> Date: Fri, 13 Jul 2007 16:22:29 -0700 From: Kevin Hilman User-Agent: Thunderbird 1.5.0.12 (X11/20070604) MIME-Version: 1.0 To: Kevin Hilman Cc: tglx@linutronix.de, mingo@elte.hu, linux-rt-users@vger.kernel.org, linux-kernel@vger.kernel.org Subject: Re: [PATCH -rt 6/6] Compile fix for PREEMPT_TIMING on and IRQSOFF_TIMING off References: <20070713175214.336577416@mvista.com> <20070713175229.239602308@mvista.com> In-Reply-To: <20070713175229.239602308@mvista.com> Content-Type: text/plain; charset=ISO-8859-1 Sender: linux-rt-users-owner@vger.kernel.org Precedence: bulk X-Mailing-List: linux-rt-users@vger.kernel.org X-Evolution-Source: imap://sven@sx.thebigcorporation.com/ Content-Transfer-Encoding: 8bit [Minor update to avoid a compiler warning in the case of DEBUG_KERNEL=n] Compile fix for PREEMPT_TIMING on and IRQSOFF_TIMING off The per-cpu trace_cpu_idle variable is used when timing *either* IRQs-off or preempt sections. Signed-off-by: Kevin Hilman --- kernel/latency_trace.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -89,9 +89,13 @@ static inline int DEBUG_WARN_ON(int cond } #endif +#if defined(CONFIG_CRITICAL_IRQSOFF_TIMING) || \ + (defined(CONFIG_CRITICAL_PREEMPT_TIMING) && defined(CONFIG_TRACE_IRQFLAGS)) + static DEFINE_PER_CPU(int, trace_cpu_idle); +#endif + #ifdef CONFIG_CRITICAL_IRQSOFF_TIMING # ifdef CONFIG_CRITICAL_PREEMPT_TIMING - static DEFINE_PER_CPU(int, trace_cpu_idle); # define irqs_off_preempt_count() (preempt_count() && !__get_cpu_var(trace_cpu_idle)) # else # define irqs_off_preempt_count() 0 patches/ppc-read-persistent-clock.patch0000664000077200007720000001013210655544572017536 0ustar mingomingoFrom sshtylyov@ru.mvista.com Thu May 17 20:11:33 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from imap.sh.mvista.com (unknown [63.81.120.155]) by mail.tglx.de (Postfix) with ESMTP id 069DD65C065 for ; Thu, 17 May 2007 20:11:33 +0200 (CEST) Received: from wasted.dev.rtsoft.ru (unknown [10.150.0.9]) by imap.sh.mvista.com (Postfix) with ESMTP id 928713EC9; Thu, 17 May 2007 11:11:28 -0700 (PDT) From: Sergei Shtylyov (by way of Sergei Shtylyov ) Organization: MontaVista Software Inc. Subject: [PATCH 2.6.21-rt2] PowerPC: implement read_persistent_clock() Date: Thu, 17 May 2007 22:13:01 +0400 User-Agent: KMail/1.5 To: tglx@linutronix.de, mingo@elte.hu MIME-Version: 1.0 Content-Disposition: inline Cc: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org, johnstul@us.ibm.com Content-Type: text/plain; charset="iso-8859-1" Message-Id: <200705172213.01877.sshtylyov@ru.mvista.com> X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Here's the read_persistent_clock() implementation for PowerPC. I'm deliberately renaming get_boot_time() despite it's not static as it doesn't get called from anywhere else. Signed-off-by: Sergei Shtylyov --- Have almost forgotten about this one... :-) This patch hasn't received a good testing though -- at least it doesn't break without RTC... ;-) arch/powerpc/kernel/time.c | 62 ++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 34 deletions(-) Index: linux/arch/powerpc/kernel/time.c =================================================================== --- linux.orig/arch/powerpc/kernel/time.c +++ linux/arch/powerpc/kernel/time.c @@ -655,31 +655,46 @@ void __init generic_calibrate_decr(void) #endif } -unsigned long get_boot_time(void) +unsigned long read_persistent_clock(void) { - struct rtc_time tm; + unsigned long time = 0; + static int first = 1; + + if (first && ppc_md.time_init) { + timezone_offset = ppc_md.time_init(); + + /* If platform provided a timezone (pmac), we correct the time */ + if (timezone_offset) { + sys_tz.tz_minuteswest = -timezone_offset / 60; + sys_tz.tz_dsttime = 0; + } + } if (ppc_md.get_boot_time) - return ppc_md.get_boot_time(); - if (!ppc_md.get_rtc_time) - return 0; - ppc_md.get_rtc_time(&tm); - return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec); + time = ppc_md.get_boot_time(); + else if (ppc_md.get_rtc_time) { + struct rtc_time tm; + + ppc_md.get_rtc_time(&tm); + time = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec); + } + time -= timezone_offset; + + if (first) { + last_rtc_update = time; + first = 0; + } + return time; } /* This function is only called on the boot processor */ void __init time_init(void) { - unsigned long flags; - unsigned long tm = 0; struct div_result res; u64 scale, x; unsigned shift; - if (ppc_md.time_init != NULL) - timezone_offset = ppc_md.time_init(); - if (__USE_RTC()) { /* 601 processor: dec counts down by 128 every 128ns */ ppc_tb_freq = 1000000000; @@ -754,27 +769,6 @@ void __init time_init(void) /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ boot_tb = get_tb(); - tm = get_boot_time(); - - write_seqlock_irqsave(&xtime_lock, flags); - - /* If platform provided a timezone (pmac), we correct the time */ - if (timezone_offset) { - sys_tz.tz_minuteswest = -timezone_offset / 60; - sys_tz.tz_dsttime = 0; - tm -= timezone_offset; - } - - xtime.tv_sec = tm; - xtime.tv_nsec = 0; - - time_freq = 0; - - last_rtc_update = xtime.tv_sec; - set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); - write_sequnlock_irqrestore(&xtime_lock, flags); - /* Not exact, but the timer interrupt takes care of this */ set_dec(tb_ticks_per_jiffy); } patches/tasklet-busy-loop-hack.patch0000664000077200007720000000327110655544574017066 0ustar mingomingo--- include/linux/interrupt.h | 6 ++---- kernel/softirq.c | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/include/linux/interrupt.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/interrupt.h +++ linux-rt-rebase.q/include/linux/interrupt.h @@ -375,10 +375,8 @@ static inline void tasklet_unlock(struct clear_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline void tasklet_unlock_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } -} +extern void tasklet_unlock_wait(struct tasklet_struct *t); + #else # define tasklet_trylock(t) 1 # define tasklet_tryunlock(t) 1 Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -643,6 +644,25 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL); } +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) + +void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { + /* + * Hack for now to avoid this busy-loop: + */ +#ifdef CONFIG_PREEMPT_RT + msleep(1); +#else + barrier(); +#endif + } +} +EXPORT_SYMBOL(tasklet_unlock_wait); + +#endif + static int ksoftirqd(void * __data) { struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2 }; patches/ppc-mark-notrace-mainline.patch0000664000077200007720000000104210655544572017511 0ustar mingomingo--- arch/powerpc/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/arch/powerpc/kernel/irq.c =================================================================== --- linux.orig/arch/powerpc/kernel/irq.c +++ linux/arch/powerpc/kernel/irq.c @@ -113,7 +113,7 @@ static inline void set_soft_enabled(unsi : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } -void local_irq_restore(unsigned long en) +void notrace local_irq_restore(unsigned long en) { /* * get_paca()->soft_enabled = en; patches/percpu-locked-netfilter.patch0000664000077200007720000001034510655544574017313 0ustar mingomingo net/core/flow.c | 22 ++++++++++++++-------- net/ipv4/netfilter/arp_tables.c | 4 ++-- net/ipv4/netfilter/ip_tables.c | 2 +- 3 files changed, 17 insertions(+), 11 deletions(-) --- Index: linux-rt-rebase.q/net/core/flow.c =================================================================== --- linux-rt-rebase.q.orig/net/core/flow.c +++ linux-rt-rebase.q/net/core/flow.c @@ -40,9 +40,10 @@ atomic_t flow_cache_genid = ATOMIC_INIT( static u32 flow_hash_shift; #define flow_hash_size (1 << flow_hash_shift) -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; -#define flow_table(cpu) (per_cpu(flow_tables, cpu)) +static DEFINE_PER_CPU_LOCKED(struct flow_cache_entry **, flow_tables); + +#define flow_table(cpu) (per_cpu_var_locked(flow_tables, cpu)) static struct kmem_cache *flow_cachep __read_mostly; @@ -172,24 +173,24 @@ static int flow_key_compare(struct flowi void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, flow_resolve_t resolver) { - struct flow_cache_entry *fle, **head = NULL /* shut up GCC */; + struct flow_cache_entry **table, *fle, **head = NULL /* shut up GCC */; unsigned int hash; int cpu; local_bh_disable(); - cpu = smp_processor_id(); + table = get_cpu_var_locked(flow_tables, &cpu); fle = NULL; /* Packet really early in init? Making flow_cache_init a * pre-smp initcall would solve this. --RR */ - if (!flow_table(cpu)) + if (!table) goto nocache; if (flow_hash_rnd_recalc(cpu)) flow_new_hash_rnd(cpu); hash = flow_hash_code(key, cpu); - head = &flow_table(cpu)[hash]; + head = &table[hash]; for (fle = *head; fle; fle = fle->next) { if (fle->family == family && fle->dir == dir && @@ -199,6 +200,7 @@ void *flow_cache_lookup(struct flowi *ke if (ret) atomic_inc(fle->object_ref); + put_cpu_var_locked(flow_tables, cpu); local_bh_enable(); return ret; @@ -224,6 +226,8 @@ void *flow_cache_lookup(struct flowi *ke } nocache: + put_cpu_var_locked(flow_tables, cpu); + { int err; void *obj; @@ -253,14 +257,15 @@ nocache: static void flow_cache_flush_tasklet(unsigned long data) { struct flow_flush_info *info = (void *)data; + struct flow_cache_entry **table; int i; int cpu; - cpu = smp_processor_id(); + table = get_cpu_var_locked(flow_tables, &cpu); for (i = 0; i < flow_hash_size; i++) { struct flow_cache_entry *fle; - fle = flow_table(cpu)[i]; + fle = table[i]; for (; fle; fle = fle->next) { unsigned genid = atomic_read(&flow_cache_genid); @@ -271,6 +276,7 @@ static void flow_cache_flush_tasklet(uns atomic_dec(fle->object_ref); } } + put_cpu_var_locked(flow_tables, cpu); if (atomic_dec_and_test(&info->cpuleft)) complete(&info->completion); Index: linux-rt-rebase.q/net/ipv4/netfilter/arp_tables.c =================================================================== --- linux-rt-rebase.q.orig/net/ipv4/netfilter/arp_tables.c +++ linux-rt-rebase.q/net/ipv4/netfilter/arp_tables.c @@ -241,7 +241,7 @@ unsigned int arpt_do_table(struct sk_buf read_lock_bh(&table->lock); private = table->private; - table_base = (void *)private->entries[smp_processor_id()]; + table_base = (void *)private->entries[raw_smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); back = get_entry(table_base, private->underflow[hook]); @@ -951,7 +951,7 @@ static int do_add_counters(void __user * i = 0; /* Choose the copy that is on our node */ - loc_cpu_entry = private->entries[smp_processor_id()]; + loc_cpu_entry = private->entries[raw_smp_processor_id()]; ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, add_counter_to_entry, Index: linux-rt-rebase.q/net/ipv4/netfilter/ip_tables.c =================================================================== --- linux-rt-rebase.q.orig/net/ipv4/netfilter/ip_tables.c +++ linux-rt-rebase.q/net/ipv4/netfilter/ip_tables.c @@ -346,7 +346,7 @@ ipt_do_table(struct sk_buff **pskb, read_lock_bh(&table->lock); IP_NF_ASSERT(table->valid_hooks & (1 << hook)); private = table->private; - table_base = (void *)private->entries[smp_processor_id()]; + table_base = (void *)private->entries[raw_smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); /* For return from builtin chain */ patches/preempt-realtime-warn-and-bug-on-fix.patch0000664000077200007720000000165610655544575021524 0ustar mingomingo To fix the following compile error by enclosing it in ifndef __ASSEMBLY__/endif. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - include/asm-generic/bug.h include/asm-generic/bug.h: Assembler messages: include/asm-generic/bug.h:7: Error: Unrecognized opcode: `extern' - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Signed-off-by: Tsutomu Owa -- owa --- include/asm-generic/bug.h | 2 ++ 1 file changed, 2 insertions(+) Index: linux-rt-rebase.q/include/asm-generic/bug.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-generic/bug.h +++ linux-rt-rebase.q/include/asm-generic/bug.h @@ -3,7 +3,9 @@ #include +#ifndef __ASSEMBLY__ extern void __WARN_ON(const char *func, const char *file, const int line); +#endif /* __ASSEMBLY__ */ #ifdef CONFIG_BUG patches/preempt-irqs-i386-idle-poll-loop-fix.patch0000664000077200007720000000102110655544575021305 0ustar mingomingo--- arch/i386/kernel/process.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/i386/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/process.c +++ linux-rt-rebase.q/arch/i386/kernel/process.c @@ -136,7 +136,9 @@ EXPORT_SYMBOL(default_idle); */ static void poll_idle (void) { - cpu_relax(); + do { + cpu_relax(); + } while (!need_resched() && !need_resched_delayed()); } #ifdef CONFIG_HOTPLUG_CPU patches/latency-tracing-exclude-printk.patch0000664000077200007720000000173310655544572020603 0ustar mingomingo kernel/printk.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) Index: linux/kernel/printk.c =================================================================== --- linux.orig/kernel/printk.c +++ linux/kernel/printk.c @@ -328,8 +328,23 @@ static void __call_console_drivers(unsig for (con = console_drivers; con; con = con->next) { if ((con->flags & CON_ENABLED) && con->write && (cpu_online(smp_processor_id()) || - (con->flags & CON_ANYTIME))) + (con->flags & CON_ANYTIME))) { + /* + * Disable tracing of printk details - it just + * clobbers the trace output with lots of + * repetitive lines (especially if console is + * on a serial line): + */ +#ifdef CONFIG_EVENT_TRACE + int trace_save = trace_enabled; + + trace_enabled = 0; + con->write(con, &LOG_BUF(start), end - start); + trace_enabled = trace_save; +#else con->write(con, &LOG_BUF(start), end - start); +#endif + } } touch_critical_timing(); } patches/Add-dev-rmem-device-driver-for-real-time-JVM-testing.patch0000664000077200007720000001332710655544576024267 0ustar mingomingoAdd /dev/rmem device driver for real-time JVM testing From: Theodore Ts'o This kernel modules is needed for use by the TCK conformance test which tests the JVM's RTSJ implementation. Unfortunately, RTSJ requires that Java programs have direct access to physical memory, and /dev/mem does not allow mmap to work to anything beyond I/O mapped memory regions on the x86 platform. Since this is a spectacularly bad idea (so much for write once, debug everywehere) and could potentially destablize the kernel, set the TAINT_USER flag if available. Signed-off-by: "Theodore Ts'o" --- drivers/char/Kconfig | 11 ++++ drivers/char/Makefile | 1 drivers/char/rmem.c | 134 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 146 insertions(+) Index: linux-rt-rebase.q/drivers/char/Kconfig =================================================================== --- linux-rt-rebase.q.orig/drivers/char/Kconfig +++ linux-rt-rebase.q/drivers/char/Kconfig @@ -1096,6 +1096,17 @@ config TELCLOCK /sys/devices/platform/telco_clock, with a number of files for controlling the behavior of this hardware. +config RMEM + tristate "Access to physical memory via /dev/rmem" + default m + help + The /dev/mem device only allows mmap() memory available to + I/O mapped memory; it does not allow access to "real" + physical memory. The /dev/rmem device is a hack which does + allow access to physical memory. We use this instead of + patching /dev/mem because we don't expect this functionality + to ever be accepted into mainline. + config DEVPORT bool depends on !M68K Index: linux-rt-rebase.q/drivers/char/Makefile =================================================================== --- linux-rt-rebase.q.orig/drivers/char/Makefile +++ linux-rt-rebase.q/drivers/char/Makefile @@ -98,6 +98,7 @@ obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o obj-$(CONFIG_GPIO_TB0219) += tb0219.o obj-$(CONFIG_TELCLOCK) += tlclk.o +obj-$(CONFIG_RMEM) += rmem.o obj-$(CONFIG_MWAVE) += mwave/ obj-$(CONFIG_AGP) += agp/ Index: linux-rt-rebase.q/drivers/char/rmem.c =================================================================== --- /dev/null +++ linux-rt-rebase.q/drivers/char/rmem.c @@ -0,0 +1,134 @@ +/* + * Rmem - REALLY simple memory mapping demonstration. + * + * Copyright (C) 2005 by Theodore Ts'o + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int rmem_major = 0; +module_param(rmem_major, int, 0444); + +static struct class *rmem_class; + +MODULE_AUTHOR("Theodore Ts'o"); +MODULE_LICENSE("GPL"); + +struct page *rmem_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + struct page *pageptr; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long physaddr = address - vma->vm_start + offset; + unsigned long pageframe = physaddr >> PAGE_SHIFT; + + if (!pfn_valid(pageframe)) + return NOPAGE_SIGBUS; + pageptr = pfn_to_page(pageframe); + get_page(pageptr); + if (type) + *type = VM_FAULT_MINOR; + return pageptr; +} + +static struct vm_operations_struct rmem_nopage_vm_ops = { + .nopage = rmem_vma_nopage, +}; + +static int rmem_nopage_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + + if (offset >= __pa(high_memory) || (filp->f_flags & O_SYNC)) + vma->vm_flags |= VM_IO; + vma->vm_flags |= VM_RESERVED; + vma->vm_ops = &rmem_nopage_vm_ops; +#ifdef TAINT_USER + add_taint(TAINT_USER); +#endif + return 0; +} + +static struct file_operations rmem_nopage_ops = { + .owner = THIS_MODULE, + .mmap = rmem_nopage_mmap, +}; + +static struct cdev rmem_cdev = { + .kobj = {.name = "rmem", }, + .owner = THIS_MODULE, +}; + +static int __init rmem_init(void) +{ + int result; + dev_t dev = MKDEV(rmem_major, 0); + + /* Figure out our device number. */ + if (rmem_major) + result = register_chrdev_region(dev, 1, "rmem"); + else { + result = alloc_chrdev_region(&dev, 0, 1, "rmem"); + rmem_major = MAJOR(dev); + } + if (result < 0) { + printk(KERN_WARNING "rmem: unable to get major %d\n", rmem_major); + return result; + } + if (rmem_major == 0) + rmem_major = result; + + cdev_init(&rmem_cdev, &rmem_nopage_ops); + result = cdev_add(&rmem_cdev, dev, 1); + if (result) { + printk (KERN_NOTICE "Error %d adding /dev/rmem", result); + kobject_put(&rmem_cdev.kobj); + unregister_chrdev_region(dev, 1); + return 1; + } + + rmem_class = class_create(THIS_MODULE, "rmem"); + class_device_create(rmem_class, NULL, dev, NULL, "rmem"); + + return 0; +} + + +static void __exit rmem_cleanup(void) +{ + cdev_del(&rmem_cdev); + unregister_chrdev_region(MKDEV(rmem_major, 0), 1); + class_destroy(rmem_class); +} + + +module_init(rmem_init); +module_exit(rmem_cleanup); patches/ppc-highres-dyntick.patch0000664000077200007720000000541110655544572016434 0ustar mingomingoFrom sshtylyov@ru.mvista.com Thu May 17 19:45:16 2007 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.1.7-deb (2006-10-05) on debian X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=AWL autolearn=unavailable version=3.1.7-deb Received: from imap.sh.mvista.com (unknown [63.81.120.155]) by mail.tglx.de (Postfix) with ESMTP id E0E7965C065 for ; Thu, 17 May 2007 19:45:16 +0200 (CEST) Received: from wasted.dev.rtsoft.ru (unknown [10.150.0.9]) by imap.sh.mvista.com (Postfix) with ESMTP id 323023EC9; Thu, 17 May 2007 10:45:13 -0700 (PDT) From: Sergei Shtylyov Organization: MontaVista Software Inc. To: tglx@linutronix.de, mingo@elte.hu Subject: [PATCH 2.6.21-rt2] PowerPC: enable HRT and dynticks support Date: Thu, 17 May 2007 21:46:46 +0400 User-Agent: KMail/1.5 Cc: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org MIME-Version: 1.0 Content-Disposition: inline Content-Type: text/plain; charset="iso-8859-1" Message-Id: <200705172146.46769.sshtylyov@ru.mvista.com> X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit Enable HRT and dynamic ticks support for PowerPC. Signed-off-by: Sergei Shtylyov --- This patch has been reworked against the 2.6.21 clockevents framework. It has only been tested on the Book E 32-bit CPU this time, so re-testing on "classic" PowerPC CPUs is needed (there have been issues as of 2.6.18-rt7 but those should now be fixed)... arch/powerpc/Kconfig | 1 + arch/powerpc/kernel/idle.c | 6 ++++++ 2 files changed, 7 insertions(+) Index: linux/arch/powerpc/Kconfig =================================================================== --- linux.orig/arch/powerpc/Kconfig +++ linux/arch/powerpc/Kconfig @@ -171,6 +171,7 @@ config GENERIC_CLOCKEVENTS NOTE: This is not compatible with the deterministic time accounting option on PPC64. +source kernel/time/Kconfig source kernel/Kconfig.preempt source "fs/Kconfig.binfmt" Index: linux/arch/powerpc/kernel/idle.c =================================================================== --- linux.orig/arch/powerpc/kernel/idle.c +++ linux/arch/powerpc/kernel/idle.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -59,6 +60,8 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (1) { + tick_nohz_stop_sched_tick(); + while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); @@ -92,6 +95,9 @@ void cpu_idle(void) ppc64_runlatch_on(); if (cpu_should_die()) cpu_die(); + + tick_nohz_restart_sched_tick(); + preempt_enable_no_resched(); schedule(); preempt_disable(); patches/rt-mutex-drop-generic-TIF_NEED_RESCHED_DELAYED.patch0000664000077200007720000000145010655544574022421 0ustar mingomingoNo need for a generic TIF_NEED_RESCHED_DELAYED , since all the architectures patches should be applied by now. --- include/linux/preempt.h | 9 --------- 1 file changed, 9 deletions(-) Index: linux-rt-rebase.q/include/linux/preempt.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/preempt.h +++ linux-rt-rebase.q/include/linux/preempt.h @@ -68,15 +68,6 @@ do { \ preempt_schedule(); \ } while (0) - -/* - * If the architecture doens't have TIF_NEED_RESCHED_DELAYED - * help it out and define it back to TIF_NEED_RESCHED - */ -#ifndef TIF_NEED_RESCHED_DELAYED -# define TIF_NEED_RESCHED_DELAYED TIF_NEED_RESCHED -#endif - #define preempt_check_resched_delayed() \ do { \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED_DELAYED))) \ patches/rtmutex-debug.h-cleanup.patch0000664000077200007720000000275610655544571017237 0ustar mingomingoSubject: [patch] lock debugging: clean up rtmutex-debug.h From: Ingo Molnar style cleanups. Signed-off-by: Ingo Molnar --- kernel/rtmutex-debug.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) Index: linux/kernel/rtmutex-debug.h =================================================================== --- linux.orig/kernel/rtmutex-debug.h +++ linux/kernel/rtmutex-debug.h @@ -17,17 +17,17 @@ extern void debug_rt_mutex_free_waiter(s extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); extern void debug_rt_mutex_lock(struct rt_mutex *lock); extern void debug_rt_mutex_unlock(struct rt_mutex *lock); -extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, - struct task_struct *powner); +extern void +debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner); extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, struct rt_mutex *lock); extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); -# define debug_rt_mutex_reset_waiter(w) \ +# define debug_rt_mutex_reset_waiter(w) \ do { (w)->deadlock_lock = NULL; } while (0) -static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, - int detect) +static inline int +debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, int detect) { - return (waiter != NULL); + return waiter != NULL; } patches/lockdep-lock_set_subclass.patch0000664000077200007720000000711310655544572017702 0ustar mingomingoSubject: [patch] lockdep: lock_set_subclass - reset a held lock's subclass From: Peter Zijlstra this can be used to reset a held lock's subclass, for arbitrary-depth iterated data structures such as trees or lists which have per-node locks. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 4 ++ kernel/lockdep.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) Index: linux/include/linux/lockdep.h =================================================================== --- linux.orig/include/linux/lockdep.h +++ linux/include/linux/lockdep.h @@ -288,6 +288,9 @@ extern void lock_acquire(struct lockdep_ extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); +extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, + unsigned long ip); + # define INIT_LOCKDEP .lockdep_recursion = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -304,6 +307,7 @@ static inline void lockdep_on(void) # define lock_acquire(l, s, t, r, c, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) Index: linux/kernel/lockdep.c =================================================================== --- linux.orig/kernel/lockdep.c +++ linux/kernel/lockdep.c @@ -2537,6 +2537,55 @@ static int check_unlock(struct task_stru return 1; } +static int +__lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + struct task_struct *curr = current; + struct held_lock *hlock, *prev_hlock; + struct lock_class *class; + unsigned int depth; + int i; + + depth = curr->lockdep_depth; + if (DEBUG_LOCKS_WARN_ON(!depth)) + return 0; + + prev_hlock = NULL; + for (i = depth-1; i >= 0; i--) { + hlock = curr->held_locks + i; + /* + * We must not cross into another context: + */ + if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) + break; + if (hlock->instance == lock) + goto found_it; + prev_hlock = hlock; + } + return print_unlock_inbalance_bug(curr, lock, ip); + +found_it: + class = register_lock_class(lock, subclass, 0); + hlock->class = class; + + curr->lockdep_depth = i; + curr->curr_chain_key = hlock->prev_chain_key; + + for (; i < depth; i++) { + hlock = curr->held_locks + i; + if (!__lock_acquire(hlock->instance, + hlock->class->subclass, hlock->trylock, + hlock->read, hlock->check, hlock->hardirqs_off, + hlock->acquire_ip)) + return 0; + } + + if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) + return 0; + return 1; +} + /* * Remove the lock to the list of currently held locks in a * potentially non-nested (out of order) manner. This is a @@ -2695,6 +2744,26 @@ static notrace void check_flags(unsigned #endif } +void +lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + unsigned long flags; + + if (unlikely(current->lockdep_recursion)) + return; + + raw_local_irq_save(flags); + current->lockdep_recursion = 1; + check_flags(flags); + if (__lock_set_subclass(lock, subclass, ip)) + check_chain_key(current); + current->lockdep_recursion = 0; + raw_local_irq_restore(flags); +} + +EXPORT_SYMBOL_GPL(lock_set_subclass); + /* * We are not always called with irqs disabled - do that here, * and also avoid lockdep recursion: patches/preempt-irqs-timer.patch0000664000077200007720000001646210655544573016337 0ustar mingomingo--- include/linux/timer.h | 4 + kernel/timer.c | 127 +++++++++++++++++++++++++++++++++++++------------- 2 files changed, 98 insertions(+), 33 deletions(-) Index: linux-rt-rebase.q/include/linux/timer.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/timer.h +++ linux-rt-rebase.q/include/linux/timer.h @@ -146,10 +146,12 @@ static inline void add_timer(struct time __mod_timer(timer, timer->expires); } -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_SOFTIRQS) + extern int timer_pending_sync(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); extern int del_timer_sync(struct timer_list *timer); #else +# define timer_pending_sync(t) timer_pending(t) # define try_to_del_timer_sync(t) del_timer(t) # define del_timer_sync(t) del_timer(t) #endif Index: linux-rt-rebase.q/kernel/timer.c =================================================================== --- linux-rt-rebase.q.orig/kernel/timer.c +++ linux-rt-rebase.q/kernel/timer.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -68,6 +69,7 @@ typedef struct tvec_root_s { struct tvec_t_base_s { spinlock_t lock; struct timer_list *running_timer; + wait_queue_head_t wait_for_running_timer; unsigned long timer_jiffies; tvec_root_t tv1; tvec_t tv2; @@ -248,9 +250,7 @@ EXPORT_SYMBOL_GPL(round_jiffies_relative static inline void set_running_timer(tvec_base_t *base, struct timer_list *timer) { -#ifdef CONFIG_SMP base->running_timer = timer; -#endif } static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) @@ -394,7 +394,7 @@ int __mod_timer(struct timer_list *timer { tvec_base_t *base, *new_base; unsigned long flags; - int ret = 0; + int ret = 0, cpu; timer_stats_timer_set_start_info(timer); BUG_ON(!timer->function); @@ -406,7 +406,8 @@ int __mod_timer(struct timer_list *timer ret = 1; } - new_base = __get_cpu_var(tvec_bases); + cpu = raw_smp_processor_id(); + new_base = per_cpu(tvec_bases, cpu); if (base != new_base) { /* @@ -455,6 +456,17 @@ void add_timer_on(struct timer_list *tim spin_unlock_irqrestore(&base->lock, flags); } +/* + * Wait for a running timer + */ +void wait_for_running_timer(struct timer_list *timer) +{ + tvec_base_t *base = timer->base; + + if (base->running_timer == timer) + wait_event(base->wait_for_running_timer, + base->running_timer != timer); +} /** * mod_timer - modify a timer's timeout @@ -526,7 +538,35 @@ int del_timer(struct timer_list *timer) EXPORT_SYMBOL(del_timer); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_SOFTIRQS) +/* + * This function checks whether a timer is active and not running on any + * CPU. Upon successful (ret >= 0) exit the timer is not queued and the + * handler is not running on any CPU. + * + * It must not be called from interrupt contexts. + */ +int timer_pending_sync(struct timer_list *timer) +{ + tvec_base_t *base; + unsigned long flags; + int ret = -1; + + base = lock_timer_base(timer, &flags); + + if (base->running_timer == timer) + goto out; + + ret = 0; + if (timer_pending(timer)) + ret = 1; +out: + spin_unlock_irqrestore(&base->lock, flags); + + return ret; +} + + /** * try_to_del_timer_sync - Try to deactivate a timer * @timer: timer do del @@ -583,7 +623,7 @@ int del_timer_sync(struct timer_list *ti int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; - cpu_relax(); + wait_for_running_timer(timer); } } @@ -629,6 +669,20 @@ static inline void __run_timers(tvec_bas struct list_head *head = &work_list; int index = base->timer_jiffies & TVR_MASK; + if (softirq_need_resched()) { + spin_unlock_irq(&base->lock); + wake_up(&base->wait_for_running_timer); + cond_resched_softirq_context(); + cpu_relax(); + spin_lock_irq(&base->lock); + /* + * We can simply continue after preemption, nobody + * else can touch timer_jiffies so 'index' is still + * valid. Any new jiffy will be taken care of in + * subsequent loops: + */ + } + /* * Cascade timers: */ @@ -656,18 +710,17 @@ static inline void __run_timers(tvec_bas int preempt_count = preempt_count(); fn(data); if (preempt_count != preempt_count()) { - printk(KERN_WARNING "huh, entered %p " - "with preempt_count %08x, exited" - " with %08x?\n", - fn, preempt_count, - preempt_count()); - BUG(); + print_symbol("BUG: unbalanced timer-handler preempt count in %s!\n", (unsigned long) fn); + printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count()); + preempt_count() = preempt_count; } } + set_running_timer(base, NULL); + cond_resched_softirq_context(); spin_lock_irq(&base->lock); } } - set_running_timer(base, NULL); + wake_up(&base->wait_for_running_timer); spin_unlock_irq(&base->lock); } @@ -830,10 +883,10 @@ void update_process_times(int user_tick) account_user_time(p, jiffies_to_cputime(1)); else account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); + scheduler_tick(); run_local_timers(); if (rcu_pending(cpu)) rcu_check_callbacks(cpu, user_tick); - scheduler_tick(); run_posix_cpu_timers(p); } @@ -879,35 +932,45 @@ static inline void calc_load(unsigned lo } /* - * This function runs timers and the timer-tq in bottom half context. + * Called by the local, per-CPU timer interrupt on SMP. */ -static void run_timer_softirq(struct softirq_action *h) +void run_local_timers(void) { - tvec_base_t *base = __get_cpu_var(tvec_bases); - - hrtimer_run_queues(); - - if (time_after_eq(jiffies, base->timer_jiffies)) - __run_timers(base); + raise_softirq(TIMER_SOFTIRQ); + softlockup_tick(); } /* - * Called by the local, per-CPU timer interrupt on SMP. + * Time of day handling: */ -void run_local_timers(void) +static inline void update_times(void) { - raise_softirq(TIMER_SOFTIRQ); - softlockup_tick(); + static unsigned long last_tick = INITIAL_JIFFIES; + unsigned long ticks, flags; + + write_seqlock_irqsave(&xtime_lock, flags); + ticks = jiffies - last_tick; + if (ticks) { + last_tick += ticks; + update_wall_time(); + calc_load(ticks); + } + write_sequnlock_irqrestore(&xtime_lock, flags); } + /* - * Called by the timer interrupt. xtime_lock must already be taken - * by the timer IRQ! + * This function runs timers and the timer-tq in bottom half context. */ -static inline void update_times(unsigned long ticks) +static void run_timer_softirq(struct softirq_action *h) { - update_wall_time(); - calc_load(ticks); + tvec_base_t *base = __get_cpu_var(tvec_bases); + + update_times(); + hrtimer_run_queues(); + + if (time_after_eq(jiffies, base->timer_jiffies)) + __run_timers(base); } /* @@ -919,7 +982,6 @@ static inline void update_times(unsigned void do_timer(unsigned long ticks) { jiffies_64 += ticks; - update_times(ticks); } #ifdef __ARCH_WANT_SYS_ALARM @@ -1251,6 +1313,7 @@ static int __devinit init_timers_cpu(int spin_lock_init(&base->lock); lockdep_set_class(&base->lock, base_lock_keys + cpu); + init_waitqueue_head(&base->wait_for_running_timer); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); patches/preempt-irqs-ppc.patch0000664000077200007720000001202510655544573015770 0ustar mingomingo--- arch/powerpc/kernel/entry_32.S | 6 +++--- arch/powerpc/kernel/irq.c | 2 -- arch/powerpc/kernel/ppc_ksyms.c | 1 - arch/powerpc/platforms/iseries/setup.c | 6 ++++-- arch/powerpc/platforms/pseries/setup.c | 6 ++++-- include/asm-powerpc/thread_info.h | 5 +++++ 6 files changed, 16 insertions(+), 10 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/kernel/entry_32.S =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/entry_32.S +++ linux-rt-rebase.q/arch/powerpc/kernel/entry_32.S @@ -641,7 +641,7 @@ user_exc_return: /* r10 contains MSR_KE /* Check current_thread_info()->flags */ rlwinm r9,r1,0,0,(31-THREAD_SHIFT) lwz r9,TI_FLAGS(r9) - andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED) + andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) bne do_work restore_user: @@ -863,7 +863,7 @@ global_dbcr0: #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ do_work: /* r10 contains MSR_KERNEL here */ - andi. r0,r9,_TIF_NEED_RESCHED + andi. r0,r9,(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) beq do_user_signal do_resched: /* r10 contains MSR_KERNEL here */ @@ -877,7 +877,7 @@ recheck: MTMSRD(r10) /* disable interrupts */ rlwinm r9,r1,0,0,(31-THREAD_SHIFT) lwz r9,TI_FLAGS(r9) - andi. r0,r9,_TIF_NEED_RESCHED + andi. r0,r9,(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED) bne- do_resched andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK beq restore_user Index: linux-rt-rebase.q/arch/powerpc/kernel/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/irq.c +++ linux-rt-rebase.q/arch/powerpc/kernel/irq.c @@ -93,8 +93,6 @@ extern atomic_t ipi_sent; #endif #ifdef CONFIG_PPC64 -EXPORT_SYMBOL(irq_desc); - int distribute_irqs = 1; static inline unsigned long get_hard_enabled(void) Index: linux-rt-rebase.q/arch/powerpc/kernel/ppc_ksyms.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/ppc_ksyms.c +++ linux-rt-rebase.q/arch/powerpc/kernel/ppc_ksyms.c @@ -170,7 +170,6 @@ EXPORT_SYMBOL(screen_info); #ifdef CONFIG_PPC32 EXPORT_SYMBOL(timer_interrupt); -EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(tb_ticks_per_jiffy); EXPORT_SYMBOL(console_drivers); EXPORT_SYMBOL(cacheable_memcpy); Index: linux-rt-rebase.q/arch/powerpc/platforms/iseries/setup.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/iseries/setup.c +++ linux-rt-rebase.q/arch/powerpc/platforms/iseries/setup.c @@ -562,12 +562,14 @@ static void yield_shared_processor(void) static void iseries_shared_idle(void) { while (1) { - while (!need_resched() && !hvlpevent_is_pending()) { + while (!need_resched() && !need_resched_delayed() + && !hvlpevent_is_pending()) { local_irq_disable(); ppc64_runlatch_off(); /* Recheck with irqs off */ - if (!need_resched() && !hvlpevent_is_pending()) + if (!need_resched() && !need_resched_delayed() + && !hvlpevent_is_pending()) yield_shared_processor(); HMT_medium(); Index: linux-rt-rebase.q/arch/powerpc/platforms/pseries/setup.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/platforms/pseries/setup.c +++ linux-rt-rebase.q/arch/powerpc/platforms/pseries/setup.c @@ -418,7 +418,8 @@ static void pseries_dedicated_idle_sleep set_thread_flag(TIF_POLLING_NRFLAG); while (get_tb() < start_snooze) { - if (need_resched() || cpu_is_offline(cpu)) + if (need_resched() || need_resched_delayed() || + cpu_is_offline(cpu)) goto out; ppc64_runlatch_off(); HMT_low(); @@ -429,7 +430,8 @@ static void pseries_dedicated_idle_sleep clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); local_irq_disable(); - if (need_resched() || cpu_is_offline(cpu)) + if (need_resched() || need_resched_delayed() || + cpu_is_offline(cpu)) goto out; } Index: linux-rt-rebase.q/include/asm-powerpc/thread_info.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/thread_info.h +++ linux-rt-rebase.q/include/asm-powerpc/thread_info.h @@ -124,6 +124,9 @@ static inline struct thread_info *curren #define TIF_FREEZE 14 /* Freezing for suspend */ #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ #define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */ +#define TIF_NEED_RESCHED_DELAYED \ + 17 /* reschedule on return to userspace */ + /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< --- include/linux/pagemap.h | 62 ++++++++++++++++++++++++++++++++++++++++++++---- mm/filemap.c | 17 ++----------- 2 files changed, 60 insertions(+), 19 deletions(-) Index: linux-rt-rebase.q/include/linux/pagemap.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/pagemap.h +++ linux-rt-rebase.q/include/linux/pagemap.h @@ -15,6 +15,9 @@ #include #include /* for in_interrupt() */ #include +#include +#include +#include /* * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page @@ -65,6 +68,26 @@ static inline void mapping_set_gfp_mask( #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +/* + * In order to wait for pages to become available there must be + * waitqueues associated with pages. By using a hash table of + * waitqueues where the bucket discipline is to maintain all + * waiters on the same queue and wake all when any of the pages + * become available, and for the woken contexts to check to be + * sure the appropriate page became available, this saves space + * at a cost of "thundering herd" phenomena during rare hash + * collisions. + */ +static inline wait_queue_head_t *page_waitqueue(struct page *page) +{ + const struct zone *zone = page_zone(page); + + return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; +} + +extern int __sleep_on_page(void *); + +#ifndef CONFIG_PREEMPT_RT static inline void lock_page_ref(struct page *page) { bit_spin_lock(PG_nonewrefs, &page->flags); @@ -81,29 +104,58 @@ static inline void wait_on_page_ref(stru while (unlikely(test_bit(PG_nonewrefs, &page->flags))) cpu_relax(); } +#else // CONFIG_PREEMPT_RT +static inline void wait_on_page_ref(struct page *page) +{ + might_sleep(); + if (unlikely(PageNoNewRefs(page))) { + DEFINE_WAIT_BIT(wait, &page->flags, PG_nonewrefs); + __wait_on_bit(page_waitqueue(page), &wait, __sleep_on_page, + TASK_UNINTERRUPTIBLE); + } +} + +static inline void lock_page_ref(struct page *page) +{ + while (test_and_set_bit(PG_nonewrefs, &page->flags)) + wait_on_page_ref(page); + __acquire(bitlock); + smp_wmb(); +} + +static inline void unlock_page_ref(struct page *page) +{ + VM_BUG_ON(!PageNoNewRefs(page)); + smp_mb__before_clear_bit(); + ClearPageNoNewRefs(page); + smp_mb__after_clear_bit(); + __wake_up_bit(page_waitqueue(page), &page->flags, PG_nonewrefs); + __release(bitlock); +} +#endif // CONFIG_PREEMPT_RT #define lock_page_ref_irq(page) \ do { \ - local_irq_disable(); \ + local_irq_disable_nort(); \ lock_page_ref(page); \ } while (0) #define unlock_page_ref_irq(page) \ do { \ unlock_page_ref(page); \ - local_irq_enable(); \ + local_irq_enable_nort(); \ } while (0) #define lock_page_ref_irqsave(page, flags) \ do { \ - local_irq_save(flags); \ + local_irq_save_nort(flags); \ lock_page_ref(page); \ } while (0) #define unlock_page_ref_irqrestore(page, flags) \ do { \ unlock_page_ref(page); \ - local_irq_restore(flags); \ + local_irq_restore_nort(flags); \ } while (0) /* @@ -155,7 +207,7 @@ static inline int page_cache_get_specula { VM_BUG_ON(in_interrupt()); -#ifndef CONFIG_SMP +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT) # ifdef CONFIG_PREEMPT VM_BUG_ON(!in_atomic()); # endif Index: linux-rt-rebase.q/mm/filemap.c =================================================================== --- linux-rt-rebase.q.orig/mm/filemap.c +++ linux-rt-rebase.q/mm/filemap.c @@ -490,21 +490,10 @@ static int __sleep_on_page_lock(void *wo return 0; } -/* - * In order to wait for pages to become available there must be - * waitqueues associated with pages. By using a hash table of - * waitqueues where the bucket discipline is to maintain all - * waiters on the same queue and wake all when any of the pages - * become available, and for the woken contexts to check to be - * sure the appropriate page became available, this saves space - * at a cost of "thundering herd" phenomena during rare hash - * collisions. - */ -static wait_queue_head_t *page_waitqueue(struct page *page) +int __sleep_on_page(void *word) { - const struct zone *zone = page_zone(page); - - return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; + schedule(); + return 0; } static inline void wake_up_page(struct page *page, int bit) patches/ich-force-hpet-ich5-fix-a-bug-with-suspend-resume.patch0000664000077200007720000000237410655544570023711 0ustar mingomingoFrom: Venki Pallipadi A bugfix in ich5 hpet force detect which caused resumes to fail. Thanks to Udo A Steinberg for reporting the problem. Signed-off-by: Venkatesh Pallipadi Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andi Kleen Cc: john stultz Cc: Greg KH Signed-off-by: Andrew Morton --- arch/i386/kernel/quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux/arch/i386/kernel/quirks.c =================================================================== --- linux.orig/arch/i386/kernel/quirks.c +++ linux/arch/i386/kernel/quirks.c @@ -201,7 +201,6 @@ static void old_ich_force_enable_hpet(st force_hpet_address = 0xFED00000 | (val << 12); printk(KERN_DEBUG "HPET at base address 0x%lx\n", force_hpet_address); - cached_dev = dev; return; } @@ -223,6 +222,7 @@ static void old_ich_force_enable_hpet(st force_hpet_address = 0xFED00000 | (val << 12); printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", force_hpet_address); + cached_dev = dev; force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; return; } patches/latency-tracing-i386-paravirt-fastcall.patch0000664000077200007720000007313010655544571021752 0ustar mingomingoSubject: [patch] paravirt: mark assembly dependencies as fastcall From: Ingo Molnar the 'fastcall removal' changes to paravirt.c were over-eager: they removed fastcall annotations from functions that are (or might be) implemented in assembly. So if someone changes the compiler model, such as -pg which disables regparm, the kernel breaks in nasty ways. so this patch adds back fastcall annotations. This serves as documentation for assembly calling-convention dependencies as well. Signed-off-by: Ingo Molnar --- arch/i386/kernel/paravirt.c | 6 - arch/i386/kernel/smp.c | 7 + include/asm-i386/desc.h | 18 ++-- include/asm-i386/io.h | 2 include/asm-i386/irqflags.h | 12 +- include/asm-i386/msr.h | 13 +-- include/asm-i386/page.h | 21 ++--- include/asm-i386/paravirt.h | 156 +++++++++++++++++++------------------- include/asm-i386/pgtable-2level.h | 10 +- include/asm-i386/pgtable-3level.h | 18 ++-- include/asm-i386/pgtable.h | 2 include/asm-i386/processor.h | 8 - include/asm-i386/system.h | 22 ++--- include/asm-i386/time.h | 4 include/asm-i386/tlbflush.h | 4 15 files changed, 154 insertions(+), 149 deletions(-) Index: linux/arch/i386/kernel/paravirt.c =================================================================== --- linux.orig/arch/i386/kernel/paravirt.c +++ linux/arch/i386/kernel/paravirt.c @@ -208,7 +208,7 @@ void init_IRQ(void) paravirt_ops.init_IRQ(); } -static void native_flush_tlb(void) +static fastcall void native_flush_tlb(void) { __native_flush_tlb(); } @@ -217,12 +217,12 @@ static void native_flush_tlb(void) * Global pages have to be flushed a bit differently. Not a real * performance problem because this does not happen often. */ -static void native_flush_tlb_global(void) +static fastcall void native_flush_tlb_global(void) { __native_flush_tlb_global(); } -static void native_flush_tlb_single(unsigned long addr) +static fastcall void native_flush_tlb_single(unsigned long addr) { __native_flush_tlb_single(addr); } Index: linux/arch/i386/kernel/smp.c =================================================================== --- linux.orig/arch/i386/kernel/smp.c +++ linux/arch/i386/kernel/smp.c @@ -344,8 +344,9 @@ out: put_cpu_no_resched(); } -void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, - unsigned long va) +void fastcall +native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, + unsigned long va) { cpumask_t cpumask = *cpumaskp; @@ -470,6 +471,7 @@ void flush_tlb_all(void) */ static void native_smp_send_reschedule(int cpu) { + trace_special(cpu, 0, 0); WARN_ON(cpu_is_offline(cpu)); send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); } @@ -639,6 +641,7 @@ static void native_smp_send_stop(void) */ fastcall void smp_reschedule_interrupt(struct pt_regs *regs) { + trace_special(regs->eip, 0, 0); ack_APIC_irq(); } Index: linux/include/asm-i386/desc.h =================================================================== --- linux.orig/include/asm-i386/desc.h +++ linux/include/asm-i386/desc.h @@ -78,14 +78,14 @@ static inline void pack_gate(__u32 *a, _ #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) #endif -static inline void write_dt_entry(struct desc_struct *dt, +static inline fastcall void write_dt_entry(struct desc_struct *dt, int entry, u32 entry_low, u32 entry_high) { dt[entry].a = entry_low; dt[entry].b = entry_high; } -static inline void native_set_ldt(const void *addr, unsigned int entries) +static fastcall inline void native_set_ldt(const void *addr, unsigned int entries) { if (likely(entries == 0)) __asm__ __volatile__("lldt %w0"::"q" (0)); @@ -102,39 +102,39 @@ static inline void native_set_ldt(const } -static inline void native_load_tr_desc(void) +static fastcall inline void native_load_tr_desc(void) { asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); } -static inline void native_load_gdt(const struct Xgt_desc_struct *dtr) +static fastcall inline void native_load_gdt(const struct Xgt_desc_struct *dtr) { asm volatile("lgdt %0"::"m" (*dtr)); } -static inline void native_load_idt(const struct Xgt_desc_struct *dtr) +static fastcall inline void native_load_idt(const struct Xgt_desc_struct *dtr) { asm volatile("lidt %0"::"m" (*dtr)); } -static inline void native_store_gdt(struct Xgt_desc_struct *dtr) +static fastcall inline void native_store_gdt(struct Xgt_desc_struct *dtr) { asm ("sgdt %0":"=m" (*dtr)); } -static inline void native_store_idt(struct Xgt_desc_struct *dtr) +static fastcall inline void native_store_idt(struct Xgt_desc_struct *dtr) { asm ("sidt %0":"=m" (*dtr)); } -static inline unsigned long native_store_tr(void) +static fastcall inline unsigned long native_store_tr(void) { unsigned long tr; asm ("str %0":"=r" (tr)); return tr; } -static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) +static fastcall inline void native_load_tls(struct thread_struct *t, unsigned int cpu) { unsigned int i; struct desc_struct *gdt = get_cpu_gdt_table(cpu); Index: linux/include/asm-i386/io.h =================================================================== --- linux.orig/include/asm-i386/io.h +++ linux/include/asm-i386/io.h @@ -251,7 +251,7 @@ static inline void flush_write_buffers(v #endif /* __KERNEL__ */ -static inline void native_io_delay(void) +static fastcall inline void native_io_delay(void) { asm volatile("outb %%al,$0x80" : : : "memory"); } Index: linux/include/asm-i386/irqflags.h =================================================================== --- linux.orig/include/asm-i386/irqflags.h +++ linux/include/asm-i386/irqflags.h @@ -12,36 +12,36 @@ #include #ifndef __ASSEMBLY__ -static inline unsigned long native_save_fl(void) +static fastcall inline unsigned long native_save_fl(void) { unsigned long f; asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); return f; } -static inline void native_restore_fl(unsigned long f) +static fastcall inline void native_restore_fl(unsigned long f) { asm volatile("pushl %0 ; popfl": /* no output */ :"g" (f) :"memory", "cc"); } -static inline void native_irq_disable(void) +static fastcall inline void native_irq_disable(void) { asm volatile("cli": : :"memory"); } -static inline void native_irq_enable(void) +static fastcall inline void native_irq_enable(void) { asm volatile("sti": : :"memory"); } -static inline void native_safe_halt(void) +static fastcall inline void native_safe_halt(void) { asm volatile("sti; hlt": : :"memory"); } -static inline void native_halt(void) +static fastcall inline void native_halt(void) { asm volatile("hlt": : :"memory"); } Index: linux/include/asm-i386/msr.h =================================================================== --- linux.orig/include/asm-i386/msr.h +++ linux/include/asm-i386/msr.h @@ -1,6 +1,7 @@ #ifndef __ASM_MSR_H #define __ASM_MSR_H +#include #include #ifdef __KERNEL__ @@ -8,7 +9,7 @@ #include -static inline unsigned long long native_read_msr(unsigned int msr) +static fastcall inline unsigned long long native_read_msr(unsigned int msr) { unsigned long long val; @@ -16,7 +17,7 @@ static inline unsigned long long native_ return val; } -static inline unsigned long long native_read_msr_safe(unsigned int msr, +static fastcall inline unsigned long long native_read_msr_safe(unsigned int msr, int *err) { unsigned long long val; @@ -36,12 +37,12 @@ static inline unsigned long long native_ return val; } -static inline void native_write_msr(unsigned int msr, unsigned long long val) +static fastcall inline void native_write_msr(unsigned int msr, unsigned long long val) { asm volatile("wrmsr" : : "c" (msr), "A"(val)); } -static inline int native_write_msr_safe(unsigned int msr, +static fastcall inline int native_write_msr_safe(unsigned int msr, unsigned long long val) { int err; @@ -60,14 +61,14 @@ static inline int native_write_msr_safe( return err; } -static inline unsigned long long native_read_tsc(void) +static fastcall inline unsigned long long native_read_tsc(void) { unsigned long long val; asm volatile("rdtsc" : "=A" (val)); return val; } -static inline unsigned long long native_read_pmc(void) +static fastcall inline unsigned long long native_read_pmc(void) { unsigned long long val; asm volatile("rdpmc" : "=A" (val)); Index: linux/include/asm-i386/page.h =================================================================== --- linux.orig/include/asm-i386/page.h +++ linux/include/asm-i386/page.h @@ -11,6 +11,7 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ +#include #ifdef CONFIG_X86_USE_3DNOW @@ -49,32 +50,32 @@ typedef struct { unsigned long long pmd; typedef struct { unsigned long long pgd; } pgd_t; typedef struct { unsigned long long pgprot; } pgprot_t; -static inline unsigned long long native_pgd_val(pgd_t pgd) +static fastcall inline unsigned long long native_pgd_val(pgd_t pgd) { return pgd.pgd; } -static inline unsigned long long native_pmd_val(pmd_t pmd) +static fastcall inline unsigned long long native_pmd_val(pmd_t pmd) { return pmd.pmd; } -static inline unsigned long long native_pte_val(pte_t pte) +static fastcall inline unsigned long long native_pte_val(pte_t pte) { return pte.pte_low | ((unsigned long long)pte.pte_high << 32); } -static inline pgd_t native_make_pgd(unsigned long long val) +static fastcall inline pgd_t native_make_pgd(unsigned long long val) { return (pgd_t) { val }; } -static inline pmd_t native_make_pmd(unsigned long long val) +static fastcall inline pmd_t native_make_pmd(unsigned long long val) { return (pmd_t) { val }; } -static inline pte_t native_make_pte(unsigned long long val) +static fastcall inline pte_t native_make_pte(unsigned long long val) { return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ; } @@ -92,22 +93,22 @@ typedef struct { unsigned long pgd; } pg typedef struct { unsigned long pgprot; } pgprot_t; #define boot_pte_t pte_t /* or would you rather have a typedef */ -static inline unsigned long native_pgd_val(pgd_t pgd) +static fastcall inline unsigned long native_pgd_val(pgd_t pgd) { return pgd.pgd; } -static inline unsigned long native_pte_val(pte_t pte) +static fastcall inline unsigned long native_pte_val(pte_t pte) { return pte.pte_low; } -static inline pgd_t native_make_pgd(unsigned long val) +static fastcall inline pgd_t native_make_pgd(unsigned long val) { return (pgd_t) { val }; } -static inline pte_t native_make_pte(unsigned long val) +static fastcall inline pte_t native_make_pte(unsigned long val) { return (pte_t) { .pte_low = val }; } Index: linux/include/asm-i386/paravirt.h =================================================================== --- linux.orig/include/asm-i386/paravirt.h +++ linux/include/asm-i386/paravirt.h @@ -70,31 +70,31 @@ struct paravirt_ops void (*banner)(void); /* Set and set time of day */ - unsigned long (*get_wallclock)(void); - int (*set_wallclock)(unsigned long); + unsigned long (fastcall *get_wallclock)(void); + int (fastcall *set_wallclock)(unsigned long); /* cpuid emulation, mostly so that caps bits can be disabled */ - void (*cpuid)(unsigned int *eax, unsigned int *ebx, + void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx); /* hooks for various privileged instructions */ - unsigned long (*get_debugreg)(int regno); - void (*set_debugreg)(int regno, unsigned long value); + unsigned long (fastcall *get_debugreg)(int regno); + void (fastcall *set_debugreg)(int regno, unsigned long value); - void (*clts)(void); + void (fastcall *clts)(void); - unsigned long (*read_cr0)(void); - void (*write_cr0)(unsigned long); + unsigned long (fastcall *read_cr0)(void); + void (fastcall *write_cr0)(unsigned long); - unsigned long (*read_cr2)(void); - void (*write_cr2)(unsigned long); + unsigned long (fastcall *read_cr2)(void); + void (fastcall *write_cr2)(unsigned long); - unsigned long (*read_cr3)(void); - void (*write_cr3)(unsigned long); + unsigned long (fastcall *read_cr3)(void); + void (fastcall *write_cr3)(unsigned long); - unsigned long (*read_cr4_safe)(void); - unsigned long (*read_cr4)(void); - void (*write_cr4)(unsigned long); + unsigned long (fastcall *read_cr4_safe)(void); + unsigned long (fastcall *read_cr4)(void); + void (fastcall *write_cr4)(unsigned long); /* * Get/set interrupt state. save_fl and restore_fl are only @@ -102,44 +102,44 @@ struct paravirt_ops * returned from save_fl are undefined, and may be ignored by * restore_fl. */ - unsigned long (*save_fl)(void); - void (*restore_fl)(unsigned long); - void (*irq_disable)(void); - void (*irq_enable)(void); - void (*safe_halt)(void); - void (*halt)(void); + unsigned long (fastcall *save_fl)(void); + void (fastcall *restore_fl)(unsigned long); + void (fastcall *irq_disable)(void); + void (fastcall *irq_enable)(void); + void (fastcall *safe_halt)(void); + void (fastcall *halt)(void); - void (*wbinvd)(void); + void (fastcall *wbinvd)(void); /* MSR, PMC and TSR operations. err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ - u64 (*read_msr)(unsigned int msr, int *err); - int (*write_msr)(unsigned int msr, u64 val); + u64 (fastcall *read_msr)(unsigned int msr, int *err); + int (fastcall *write_msr)(unsigned int msr, u64 val); - u64 (*read_tsc)(void); - u64 (*read_pmc)(void); + u64 (fastcall *read_tsc)(void); + u64 (fastcall *read_pmc)(void); unsigned long long (*sched_clock)(void); unsigned long (*get_cpu_khz)(void); /* Segment descriptor handling */ - void (*load_tr_desc)(void); - void (*load_gdt)(const struct Xgt_desc_struct *); - void (*load_idt)(const struct Xgt_desc_struct *); - void (*store_gdt)(struct Xgt_desc_struct *); - void (*store_idt)(struct Xgt_desc_struct *); - void (*set_ldt)(const void *desc, unsigned entries); - unsigned long (*store_tr)(void); - void (*load_tls)(struct thread_struct *t, unsigned int cpu); - void (*write_ldt_entry)(struct desc_struct *, + void (fastcall *load_tr_desc)(void); + void (fastcall *load_gdt)(const struct Xgt_desc_struct *); + void (fastcall *load_idt)(const struct Xgt_desc_struct *); + void (fastcall *store_gdt)(struct Xgt_desc_struct *); + void (fastcall *store_idt)(struct Xgt_desc_struct *); + void (fastcall *set_ldt)(const void *desc, unsigned entries); + unsigned long (fastcall *store_tr)(void); + void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu); + void (fastcall *write_ldt_entry)(struct desc_struct *, int entrynum, u32 low, u32 high); - void (*write_gdt_entry)(struct desc_struct *, + void (fastcall *write_gdt_entry)(struct desc_struct *, int entrynum, u32 low, u32 high); - void (*write_idt_entry)(struct desc_struct *, + void (fastcall *write_idt_entry)(struct desc_struct *, int entrynum, u32 low, u32 high); - void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); + void (fastcall *load_esp0)(struct tss_struct *tss, struct thread_struct *t); - void (*set_iopl_mask)(unsigned mask); - void (*io_delay)(void); + void (fastcall *set_iopl_mask)(unsigned mask); + void (fastcall *io_delay)(void); /* * Hooks for intercepting the creation/use/destruction of an @@ -156,9 +156,9 @@ struct paravirt_ops * Direct APIC operations, principally for VMI. Ideally * these shouldn't be in this interface. */ - void (*apic_write)(unsigned long reg, unsigned long v); - void (*apic_write_atomic)(unsigned long reg, unsigned long v); - unsigned long (*apic_read)(unsigned long reg); + void (fastcall *apic_write)(unsigned long reg, unsigned long v); + void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v); + unsigned long (fastcall *apic_read)(unsigned long reg); void (*setup_boot_clock)(void); void (*setup_secondary_clock)(void); @@ -168,56 +168,56 @@ struct paravirt_ops #endif /* TLB operations */ - void (*flush_tlb_user)(void); - void (*flush_tlb_kernel)(void); - void (*flush_tlb_single)(unsigned long addr); - void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, + void (fastcall *flush_tlb_user)(void); + void (fastcall *flush_tlb_kernel)(void); + void (fastcall *flush_tlb_single)(unsigned long addr); + void (fastcall *flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, unsigned long va); /* Hooks for allocating/releasing pagetable pages */ - void (*alloc_pt)(struct mm_struct *mm, u32 pfn); - void (*alloc_pd)(u32 pfn); - void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); - void (*release_pt)(u32 pfn); - void (*release_pd)(u32 pfn); + void (fastcall *alloc_pt)(struct mm_struct *mm, u32 pfn); + void (fastcall *alloc_pd)(u32 pfn); + void (fastcall *alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); + void (fastcall *release_pt)(u32 pfn); + void (fastcall *release_pd)(u32 pfn); /* Pagetable manipulation functions */ - void (*set_pte)(pte_t *ptep, pte_t pteval); - void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, + void (fastcall *set_pte)(pte_t *ptep, pte_t pteval); + void (fastcall *set_pte_at)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); - void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); - void (*pte_update_defer)(struct mm_struct *mm, + void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval); + void (fastcall *pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (fastcall *pte_update_defer)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); #ifdef CONFIG_HIGHPTE - void *(*kmap_atomic_pte)(struct page *page, enum km_type type); + void *(fastcall *kmap_atomic_pte)(struct page *page, enum km_type type); #endif #ifdef CONFIG_X86_PAE - void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); - void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); - void (*set_pud)(pud_t *pudp, pud_t pudval); - void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); - void (*pmd_clear)(pmd_t *pmdp); - - unsigned long long (*pte_val)(pte_t); - unsigned long long (*pmd_val)(pmd_t); - unsigned long long (*pgd_val)(pgd_t); - - pte_t (*make_pte)(unsigned long long pte); - pmd_t (*make_pmd)(unsigned long long pmd); - pgd_t (*make_pgd)(unsigned long long pgd); + void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval); + void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); + void (fastcall *set_pud)(pud_t *pudp, pud_t pudval); + void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (fastcall *pmd_clear)(pmd_t *pmdp); + + unsigned long long (fastcall *pte_val)(pte_t); + unsigned long long (fastcall *pmd_val)(pmd_t); + unsigned long long (fastcall *pgd_val)(pgd_t); + + pte_t (fastcall *make_pte)(unsigned long long pte); + pmd_t (fastcall *make_pmd)(unsigned long long pmd); + pgd_t (fastcall *make_pgd)(unsigned long long pgd); #else - unsigned long (*pte_val)(pte_t); - unsigned long (*pgd_val)(pgd_t); + unsigned long (fastcall *pte_val)(pte_t); + unsigned long (fastcall *pgd_val)(pgd_t); - pte_t (*make_pte)(unsigned long pte); - pgd_t (*make_pgd)(unsigned long pgd); + pte_t (fastcall *make_pte)(unsigned long pte); + pgd_t (fastcall *make_pgd)(unsigned long pgd); #endif /* Set deferred update mode, used for batching operations. */ - void (*set_lazy_mode)(enum paravirt_lazy_mode mode); + void (fastcall *set_lazy_mode)(enum paravirt_lazy_mode mode); /* These two are jmp to, not actually called. */ void (*irq_enable_sysexit)(void); @@ -415,12 +415,12 @@ static inline void load_esp0(struct tss_ } #define ARCH_SETUP paravirt_ops.arch_setup(); -static inline unsigned long get_wallclock(void) +static fastcall inline unsigned long get_wallclock(void) { return PVOP_CALL0(unsigned long, get_wallclock); } -static inline int set_wallclock(unsigned long nowtime) +static fastcall inline int set_wallclock(unsigned long nowtime) { return PVOP_CALL1(int, set_wallclock, nowtime); } Index: linux/include/asm-i386/pgtable-2level.h =================================================================== --- linux.orig/include/asm-i386/pgtable-2level.h +++ linux/include/asm-i386/pgtable-2level.h @@ -11,16 +11,16 @@ * within a page table are directly modified. Thus, the following * hook is made available. */ -static inline void native_set_pte(pte_t *ptep , pte_t pte) +static fastcall inline void native_set_pte(pte_t *ptep , pte_t pte) { *ptep = pte; } -static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, +static fastcall inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep , pte_t pte) { native_set_pte(ptep, pte); } -static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) +static fastcall inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; } @@ -36,13 +36,13 @@ static inline void native_set_pmd(pmd_t #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) +static fastcall inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) { *xp = __pte(0); } #ifdef CONFIG_SMP -static inline pte_t native_ptep_get_and_clear(pte_t *xp) +static fastcall inline pte_t native_ptep_get_and_clear(pte_t *xp) { return __pte(xchg(&xp->pte_low, 0)); } Index: linux/include/asm-i386/pgtable-3level.h =================================================================== --- linux.orig/include/asm-i386/pgtable-3level.h +++ linux/include/asm-i386/pgtable-3level.h @@ -33,13 +33,13 @@ static inline int pte_exec_kernel(pte_t * not possible, use pte_get_and_clear to obtain the old pte * value and then use set_pte to update it. -ben */ -static inline void native_set_pte(pte_t *ptep, pte_t pte) +static fastcall inline void native_set_pte(pte_t *ptep, pte_t pte) { ptep->pte_high = pte.pte_high; smp_wmb(); ptep->pte_low = pte.pte_low; } -static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, +static fastcall inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep , pte_t pte) { native_set_pte(ptep, pte); @@ -51,7 +51,7 @@ static inline void native_set_pte_at(str * we are justified in merely clearing the PTE present bit, followed * by a set. The ordering here is important. */ -static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, +static fastcall inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { ptep->pte_low = 0; @@ -61,15 +61,15 @@ static inline void native_set_pte_presen ptep->pte_low = pte.pte_low; } -static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) +static fastcall inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) { set_64bit((unsigned long long *)(ptep),native_pte_val(pte)); } -static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) +static fastcall inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); } -static inline void native_set_pud(pud_t *pudp, pud_t pud) +static fastcall inline void native_set_pud(pud_t *pudp, pud_t pud) { *pudp = pud; } @@ -79,14 +79,14 @@ static inline void native_set_pud(pud_t * entry, so clear the bottom half first and enforce ordering with a compiler * barrier. */ -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static fastcall inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { ptep->pte_low = 0; smp_wmb(); ptep->pte_high = 0; } -static inline void native_pmd_clear(pmd_t *pmd) +static fastcall inline void native_pmd_clear(pmd_t *pmd) { u32 *tmp = (u32 *)pmd; *tmp = 0; @@ -125,7 +125,7 @@ static inline void pud_clear (pud_t * pu pmd_index(address)) #ifdef CONFIG_SMP -static inline pte_t native_ptep_get_and_clear(pte_t *ptep) +static fastcall inline pte_t native_ptep_get_and_clear(pte_t *ptep) { pte_t res; Index: linux/include/asm-i386/pgtable.h =================================================================== --- linux.orig/include/asm-i386/pgtable.h +++ linux/include/asm-i386/pgtable.h @@ -261,7 +261,7 @@ static inline pte_t pte_mkhuge(pte_t pte #endif /* local pte updates need not use xchg for locking */ -static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) +static fastcall inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) { pte_t res = *ptep; Index: linux/include/asm-i386/processor.h =================================================================== --- linux.orig/include/asm-i386/processor.h +++ linux/include/asm-i386/processor.h @@ -492,7 +492,7 @@ static inline void rep_nop(void) #define cpu_relax() rep_nop() -static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) +static inline void fastcall native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) { tss->x86_tss.esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ @@ -503,7 +503,7 @@ static inline void native_load_esp0(stru } -static inline unsigned long native_get_debugreg(int regno) +static inline unsigned long fastcall native_get_debugreg(int regno) { unsigned long val = 0; /* Damn you, gcc! */ @@ -526,7 +526,7 @@ static inline unsigned long native_get_d return val; } -static inline void native_set_debugreg(int regno, unsigned long value) +static inline void fastcall native_set_debugreg(int regno, unsigned long value) { switch (regno) { case 0: @@ -555,7 +555,7 @@ static inline void native_set_debugreg(i /* * Set IOPL bits in EFLAGS from given mask */ -static inline void native_set_iopl_mask(unsigned mask) +static inline void fastcall native_set_iopl_mask(unsigned mask) { unsigned int reg; __asm__ __volatile__ ("pushfl;" Index: linux/include/asm-i386/system.h =================================================================== --- linux.orig/include/asm-i386/system.h +++ linux/include/asm-i386/system.h @@ -89,55 +89,55 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" asm volatile("mov %%" #seg ",%0":"=rm" (value)) -static inline void native_clts(void) +static fastcall inline void native_clts(void) { asm volatile ("clts"); } -static inline unsigned long native_read_cr0(void) +static fastcall inline unsigned long native_read_cr0(void) { unsigned long val; asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); return val; } -static inline void native_write_cr0(unsigned long val) +static fastcall inline void native_write_cr0(unsigned long val) { asm volatile("movl %0,%%cr0": :"r" (val)); } -static inline unsigned long native_read_cr2(void) +static fastcall inline unsigned long native_read_cr2(void) { unsigned long val; asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); return val; } -static inline void native_write_cr2(unsigned long val) +static fastcall inline void native_write_cr2(unsigned long val) { asm volatile("movl %0,%%cr2": :"r" (val)); } -static inline unsigned long native_read_cr3(void) +static fastcall inline unsigned long native_read_cr3(void) { unsigned long val; asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); return val; } -static inline void native_write_cr3(unsigned long val) +static fastcall inline void native_write_cr3(unsigned long val) { asm volatile("movl %0,%%cr3": :"r" (val)); } -static inline unsigned long native_read_cr4(void) +static fastcall inline unsigned long native_read_cr4(void) { unsigned long val; asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); return val; } -static inline unsigned long native_read_cr4_safe(void) +static fastcall inline unsigned long native_read_cr4_safe(void) { unsigned long val; /* This could fault if %cr4 does not exist */ @@ -150,12 +150,12 @@ static inline unsigned long native_read_ return val; } -static inline void native_write_cr4(unsigned long val) +static fastcall inline void native_write_cr4(unsigned long val) { asm volatile("movl %0,%%cr4": :"r" (val)); } -static inline void native_wbinvd(void) +static fastcall inline void native_wbinvd(void) { asm volatile("wbinvd": : :"memory"); } Index: linux/include/asm-i386/time.h =================================================================== --- linux.orig/include/asm-i386/time.h +++ linux/include/asm-i386/time.h @@ -4,7 +4,7 @@ #include #include "mach_time.h" -static inline unsigned long native_get_wallclock(void) +static fastcall inline unsigned long native_get_wallclock(void) { unsigned long retval; @@ -16,7 +16,7 @@ static inline unsigned long native_get_w return retval; } -static inline int native_set_wallclock(unsigned long nowtime) +static fastcall inline int native_set_wallclock(unsigned long nowtime) { int retval; Index: linux/include/asm-i386/tlbflush.h =================================================================== --- linux.orig/include/asm-i386/tlbflush.h +++ linux/include/asm-i386/tlbflush.h @@ -116,7 +116,7 @@ static inline void flush_tlb_range(struc __flush_tlb(); } -static inline void native_flush_tlb_others(const cpumask_t *cpumask, +static fastcall inline void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, unsigned long va) { } @@ -140,7 +140,7 @@ static inline void flush_tlb_range(struc flush_tlb_mm(vma->vm_mm); } -void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, +void fastcall native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, unsigned long va); #define TLBSTATE_OK 1 patches/rt-mutex-arm-fix.patch0000664000077200007720000000173210655544573015711 0ustar mingomingo--- arch/arm/kernel/semaphore.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/arch/arm/kernel/semaphore.c =================================================================== --- linux-rt-rebase.q.orig/arch/arm/kernel/semaphore.c +++ linux-rt-rebase.q/arch/arm/kernel/semaphore.c @@ -154,7 +154,7 @@ EXPORT_SYMBOL(__compat_down_interruptibl * single "cmpxchg" without failure cases, * but then it wouldn't work on a 386. */ -fastcall int __attribute_used__ __compat_down_trylock(struct compat_semaphore * sem) +fastcall int __attribute_used__ __sched __compat_down_trylock(struct compat_semaphore * sem) { int sleepers; unsigned long flags; @@ -176,7 +176,7 @@ fastcall int __attribute_used__ __compat EXPORT_SYMBOL(__compat_down_trylock); -fastcall int compat_sem_is_locked(struct compat_semaphore *sem) +fastcall int __sched compat_sem_is_locked(struct compat_semaphore *sem) { return (int) atomic_read(&sem->count) < 0; } patches/preempt-realtime-powerpc-b2.patch0000664000077200007720000000510010655544574020007 0ustar mingomingo To convert the spinlocks into the raw onces to fix the following warnings/errors. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Badness at arch/powerpc/kernel/entry_64.S:651 Call Trace: [C0000000006133E0] [C00000000000FAAC] show_stack+0x68/0x1b0 (unreliable) [C000000000613480] [C0000000001EF004] .repor000001EF004] .report_bug+0x94/0xe8 [C000000000613510] [C0000000003EAD58] .program_check_exception+0x170/0x5a8 [C00000000000487C] program_check_common+0xfc/0x100 --- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/mm/hash_native_64.c | 2 +- include/asm-powerpc/rtas.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) Index: linux-rt-rebase.q/arch/powerpc/kernel/irq.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/irq.c +++ linux-rt-rebase.q/arch/powerpc/kernel/irq.c @@ -403,7 +403,7 @@ EXPORT_SYMBOL(do_softirq); #ifdef CONFIG_PPC_MERGE static LIST_HEAD(irq_hosts); -static DEFINE_SPINLOCK(irq_big_lock); +static DEFINE_RAW_SPINLOCK(irq_big_lock); static DEFINE_PER_CPU(unsigned int, irq_radix_reader); static unsigned int irq_radix_writer; struct irq_map_entry irq_map[NR_IRQS]; Index: linux-rt-rebase.q/arch/powerpc/kernel/rtas.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/kernel/rtas.c +++ linux-rt-rebase.q/arch/powerpc/kernel/rtas.c @@ -36,7 +36,7 @@ #include struct rtas_t rtas = { - .lock = SPIN_LOCK_UNLOCKED + .lock = RAW_SPIN_LOCK_UNLOCKED(lock) }; EXPORT_SYMBOL(rtas); Index: linux-rt-rebase.q/arch/powerpc/mm/hash_native_64.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/mm/hash_native_64.c +++ linux-rt-rebase.q/arch/powerpc/mm/hash_native_64.c @@ -36,7 +36,7 @@ #define HPTE_LOCK_BIT 3 -static DEFINE_SPINLOCK(native_tlbie_lock); +static DEFINE_RAW_SPINLOCK(native_tlbie_lock); static inline void __tlbie(unsigned long va, unsigned int psize) { Index: linux-rt-rebase.q/include/asm-powerpc/rtas.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-powerpc/rtas.h +++ linux-rt-rebase.q/include/asm-powerpc/rtas.h @@ -58,7 +58,7 @@ struct rtas_t { unsigned long entry; /* physical address pointer */ unsigned long base; /* physical address pointer */ unsigned long size; - spinlock_t lock; + raw_spinlock_t lock; struct rtas_args args; struct device_node *dev; /* virtual address pointer */ }; patches/highmem-revert-mainline.patch0000664000077200007720000000123010655544576017274 0ustar mingomingo--- mm/highmem.c | 9 --------- 1 file changed, 9 deletions(-) Index: linux-rt-rebase.q/mm/highmem.c =================================================================== --- linux-rt-rebase.q.orig/mm/highmem.c +++ linux-rt-rebase.q/mm/highmem.c @@ -104,15 +104,6 @@ static void flush_all_zero_pkmaps(void) flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); } -/* Flush all unused kmap mappings in order to remove stray - mappings. */ -void kmap_flush_unused(void) -{ - spin_lock(&kmap_lock); - flush_all_zero_pkmaps(); - spin_unlock(&kmap_lock); -} - static inline unsigned long map_new_virtual(struct page *page) { unsigned long vaddr; patches/select-error-leak-fix.patch0000664000077200007720000000264710655544576016700 0ustar mingomingoAs it is currently written, sys_select checks its return code to convert ERESTARTNOHAND to EINTR. However, the check is within an if (tvp) clause, and so if select is called from userspace with a NULL timeval, then it is possible for the ERESTARTNOHAND errno to leak into userspace, which is incorrect. This patch moves that check outside of the conditional, and prevents the errno leak. Thanks & Regards Neil Signed-Off-By: Neil Horman fs/select.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) Index: linux-rt-rebase.q/fs/select.c =================================================================== --- linux-rt-rebase.q.orig/fs/select.c +++ linux-rt-rebase.q/fs/select.c @@ -414,20 +414,12 @@ asmlinkage long sys_select(int n, fd_set rtv.tv_sec = timeout; if (timeval_compare(&rtv, &tv) >= 0) rtv = tv; - if (copy_to_user(tvp, &rtv, sizeof(rtv))) { -sticky: - /* - * If an application puts its timeval in read-only - * memory, we don't want the Linux-specific update to - * the timeval to cause a fault after the select has - * completed successfully. However, because we're not - * updating the timeval, we can't restart the system - * call. - */ - if (ret == -ERESTARTNOHAND) - ret = -EINTR; - } + if (copy_to_user(tvp, &rtv, sizeof(rtv))) + return -EFAULT; } +sticky: + if (ret == -ERESTARTNOHAND) + ret = -EINTR; return ret; } patches/ppc-rename-xmon-mcount.patch0000664000077200007720000000460310655544571017072 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Mon May 14 17:19:36 2007 Date: Mon, 14 May 2007 17:19:36 +0900 From: Tsutomu OWA To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [patch 4/5] powerpc 2.6.21-rt1: rename mcount variable in xmon to xmon_mcount Rename variable name "mcount" in xmon to xmon_mcount, since it conflicts with mcount() function used by latency trace function. Signed-off-by: Tsutomu OWA -- owa --- From tsutomu.owa@toshiba.co.jp Mon May 14 17:19:36 2007 Date: Mon, 14 May 2007 17:19:36 +0900 From: Tsutomu OWA To: linuxppc-dev@ozlabs.org, linux-kernel@vger.kernel.org Cc: mingo@elte.hu, tglx@linutronix.de Subject: Re: [patch 4/5] powerpc 2.6.21-rt1: rename mcount variable in xmon to xmon_mcount Rename variable name "mcount" in xmon to xmon_mcount, since it conflicts with mcount() function used by latency trace function. Signed-off-by: Tsutomu OWA -- owa --- arch/powerpc/xmon/xmon.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) Index: linux/arch/powerpc/xmon/xmon.c =================================================================== --- linux.orig/arch/powerpc/xmon/xmon.c +++ linux/arch/powerpc/xmon/xmon.c @@ -2129,7 +2129,7 @@ print_address(unsigned long addr) static unsigned long mdest; /* destination address */ static unsigned long msrc; /* source address */ static unsigned long mval; /* byte value to set memory to */ -static unsigned long mcount; /* # bytes to affect */ +static unsigned long xmon_mcount; /* # bytes to affect */ static unsigned long mdiffs; /* max # differences to print */ void @@ -2141,19 +2141,20 @@ memops(int cmd) scanhex((void *)(cmd == 's'? &mval: &msrc)); if( termch != '\n' ) termch = 0; - scanhex((void *)&mcount); + scanhex((void *)&xmon_mcount); switch( cmd ){ case 'm': - memmove((void *)mdest, (void *)msrc, mcount); + memmove((void *)mdest, (void *)msrc, xmon_mcount); break; case 's': - memset((void *)mdest, mval, mcount); + memset((void *)mdest, mval, xmon_mcount); break; case 'd': if( termch != '\n' ) termch = 0; scanhex((void *)&mdiffs); - memdiffs((unsigned char *)mdest, (unsigned char *)msrc, mcount, mdiffs); + memdiffs((unsigned char *)mdest, (unsigned char *)msrc, + xmon_mcount, mdiffs); break; } } patches/2.6.21-rc6-lockless7-lockless-pagecache-lookups.patch0000664000077200007720000001470410655544576023120 0ustar mingomingoFrom: Nick Piggin Subject: [patch 7/9] mm: lockless pagecache lookups Combine page_cache_get_speculative with lockless radix tree lookups to introduce lockless page cache lookups (ie. no mapping->tree_lock on the read-side). The only atomicity changes this introduces is that the gang pagecache lookup functions now behave as if they are implemented with multiple find_get_page calls, rather than operating on a snapshot of the pages. In practice, this atomicity guarantee is not used anyway, and it is difficult to see how it could be. Gang pagecache lookups are designed to replace individual lookups, so these semantics are natural. Signed-off-by: Nick Piggin --- mm/filemap.c | 174 ++++++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 132 insertions(+), 42 deletions(-) Index: linux-rt-rebase.q/mm/filemap.c =================================================================== --- linux-rt-rebase.q.orig/mm/filemap.c +++ linux-rt-rebase.q/mm/filemap.c @@ -595,15 +595,35 @@ void fastcall __lock_page_nosync(struct * Is there a pagecache struct page at the given (mapping, offset) tuple? * If yes, increment its refcount and return it; if no, return NULL. */ -struct page * find_get_page(struct address_space *mapping, unsigned long offset) +struct page *find_get_page(struct address_space *mapping, unsigned long offset) { + void **pagep; struct page *page; - read_lock_irq(&mapping->tree_lock); - page = radix_tree_lookup(&mapping->page_tree, offset); - if (page) - page_cache_get(page); - read_unlock_irq(&mapping->tree_lock); + rcu_read_lock(); +repeat: + page = NULL; + pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); + if (pagep) { + page = radix_tree_deref_slot(pagep); + if (unlikely(!page || page == RADIX_TREE_RETRY)) + goto repeat; + + if (!page_cache_get_speculative(page)) + goto repeat; + + /* + * Has the page moved? + * This is part of the lockless pagecache protocol. See + * include/linux/pagemap.h for details. + */ + if (unlikely(page != *pagep)) { + page_cache_release(page); + goto repeat; + } + } + rcu_read_unlock(); + return page; } EXPORT_SYMBOL(find_get_page); @@ -624,25 +644,16 @@ struct page *find_lock_page(struct addre struct page *page; repeat: - read_lock_irq(&mapping->tree_lock); - page = radix_tree_lookup(&mapping->page_tree, offset); + page = find_get_page(mapping, offset); if (page) { - page_cache_get(page); - if (TestSetPageLocked(page)) { - read_unlock_irq(&mapping->tree_lock); - __lock_page(page); - - /* Has the page been truncated while we slept? */ - if (unlikely(page->mapping != mapping)) { - unlock_page(page); - page_cache_release(page); - goto repeat; - } - goto out; + lock_page(page); + /* Has the page been truncated? */ + if (unlikely(page->mapping != mapping)) { + unlock_page(page); + page_cache_release(page); + goto repeat; } } - read_unlock_irq(&mapping->tree_lock); -out: return page; } EXPORT_SYMBOL(find_lock_page); @@ -713,13 +724,39 @@ unsigned find_get_pages(struct address_s { unsigned int i; unsigned int ret; + unsigned int nr_found; + + rcu_read_lock(); +restart: + nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, + (void ***)pages, start, nr_pages); + ret = 0; + for (i = 0; i < nr_found; i++) { + struct page *page; +repeat: + page = radix_tree_deref_slot((void **)pages[i]); + if (unlikely(!page)) + continue; + /* + * this can only trigger if nr_found == 1, making livelock + * a non issue. + */ + if (unlikely(page == RADIX_TREE_RETRY)) + goto restart; - read_lock_irq(&mapping->tree_lock); - ret = radix_tree_gang_lookup(&mapping->page_tree, - (void **)pages, start, nr_pages); - for (i = 0; i < ret; i++) - page_cache_get(pages[i]); - read_unlock_irq(&mapping->tree_lock); + if (!page_cache_get_speculative(page)) + goto repeat; + + /* Has the page moved? */ + if (unlikely(page != *((void **)pages[i]))) { + page_cache_release(page); + goto repeat; + } + + pages[ret] = page; + ret++; + } + rcu_read_unlock(); return ret; } @@ -740,19 +777,44 @@ unsigned find_get_pages_contig(struct ad { unsigned int i; unsigned int ret; + unsigned int nr_found; + + rcu_read_lock(); +restart: + nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, + (void ***)pages, index, nr_pages); + ret = 0; + for (i = 0; i < nr_found; i++) { + struct page *page; +repeat: + page = radix_tree_deref_slot((void **)pages[i]); + if (unlikely(!page)) + continue; + /* + * this can only trigger if nr_found == 1, making livelock + * a non issue. + */ + if (unlikely(page == RADIX_TREE_RETRY)) + goto restart; - read_lock_irq(&mapping->tree_lock); - ret = radix_tree_gang_lookup(&mapping->page_tree, - (void **)pages, index, nr_pages); - for (i = 0; i < ret; i++) { - if (pages[i]->mapping == NULL || pages[i]->index != index) + if (page->mapping == NULL || page->index != index) break; - page_cache_get(pages[i]); + if (!page_cache_get_speculative(page)) + goto repeat; + + /* Has the page moved? */ + if (unlikely(page != *((void **)pages[i]))) { + page_cache_release(page); + goto repeat; + } + + pages[ret] = page; + ret++; index++; } - read_unlock_irq(&mapping->tree_lock); - return i; + rcu_read_unlock(); + return ret; } EXPORT_SYMBOL(find_get_pages_contig); @@ -772,15 +834,43 @@ unsigned find_get_pages_tag(struct addre { unsigned int i; unsigned int ret; + unsigned int nr_found; + + rcu_read_lock(); +restart: + nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree, + (void ***)pages, *index, nr_pages, tag); + ret = 0; + for (i = 0; i < nr_found; i++) { + struct page *page; +repeat: + page = radix_tree_deref_slot((void **)pages[i]); + if (unlikely(!page)) + continue; + /* + * this can only trigger if nr_found == 1, making livelock + * a non issue. + */ + if (unlikely(page == RADIX_TREE_RETRY)) + goto restart; + + if (!page_cache_get_speculative(page)) + goto repeat; + + /* Has the page moved? */ + if (unlikely(page != *((void **)pages[i]))) { + page_cache_release(page); + goto repeat; + } + + pages[ret] = page; + ret++; + } + rcu_read_unlock(); - read_lock_irq(&mapping->tree_lock); - ret = radix_tree_gang_lookup_tag(&mapping->page_tree, - (void **)pages, *index, nr_pages, tag); - for (i = 0; i < ret; i++) - page_cache_get(pages[i]); if (ret) *index = pages[ret - 1]->index + 1; - read_unlock_irq(&mapping->tree_lock); + return ret; } EXPORT_SYMBOL(find_get_pages_tag); patches/preempt-irqs-i386.patch0000664000077200007720000001343710655544573015707 0ustar mingomingo--- arch/i386/kernel/i8259.c | 10 ++++++---- arch/i386/kernel/io_apic.c | 23 +++++++---------------- arch/i386/mach-default/setup.c | 4 ++-- arch/i386/mach-visws/visws_apic.c | 2 ++ arch/i386/mach-voyager/setup.c | 4 ++-- 5 files changed, 19 insertions(+), 24 deletions(-) Index: linux-rt-rebase.q/arch/i386/kernel/i8259.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/i8259.c +++ linux-rt-rebase.q/arch/i386/kernel/i8259.c @@ -170,6 +170,8 @@ static void mask_and_ack_8259A(unsigned */ if (cached_irq_mask & irqmask) goto spurious_8259A_irq; + if (irq & 8) + outb(0x60+(irq&7),PIC_SLAVE_CMD); /* 'Specific EOI' to slave */ cached_irq_mask |= irqmask; handle_real_irq: @@ -297,10 +299,10 @@ void init_8259A(int auto_eoi) outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ outb_p(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */ outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ - if (auto_eoi) /* master does Auto EOI */ - outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); - else /* master expects normal EOI */ + if (!auto_eoi) /* master expects normal EOI */ outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); + else /* master does Auto EOI */ + outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ outb_p(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */ @@ -350,7 +352,7 @@ static irqreturn_t math_error_irq(int cp * New motherboards sometimes make IRQ 13 be a PCI interrupt, * so allow interrupt sharing. */ -static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL }; +static struct irqaction fpu_irq = { math_error_irq, IRQF_NODELAY, CPU_MASK_NONE, "fpu", NULL, NULL }; void __init init_ISA_irqs (void) { Index: linux-rt-rebase.q/arch/i386/kernel/io_apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/io_apic.c +++ linux-rt-rebase.q/arch/i386/kernel/io_apic.c @@ -261,18 +261,6 @@ static void __unmask_IO_APIC_irq (unsign __modify_IO_APIC_irq(irq, 0, 0x00010000); } -/* mask = 1, trigger = 0 */ -static void __mask_and_edge_IO_APIC_irq (unsigned int irq) -{ - __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000); -} - -/* mask = 0, trigger = 1 */ -static void __unmask_and_level_IO_APIC_irq (unsigned int irq) -{ - __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000); -} - static void mask_IO_APIC_irq (unsigned int irq) { unsigned long flags; @@ -1259,9 +1247,10 @@ static void ioapic_register_intr(int irq trigger == IOAPIC_LEVEL) set_irq_chip_and_handler_name(irq, &ioapic_chip, handle_fasteoi_irq, "fasteoi"); - else + else { set_irq_chip_and_handler_name(irq, &ioapic_chip, handle_edge_irq, "edge"); + } set_intr_gate(vector, interrupt[irq]); } @@ -1496,7 +1485,7 @@ void __init print_IO_APIC(void) return; } -#if 0 +#if 1 static void print_APIC_bitfield (int base) { @@ -1989,8 +1978,10 @@ static void ack_ioapic_quirk_irq(unsigne if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); spin_lock(&ioapic_lock); - __mask_and_edge_IO_APIC_irq(irq); - __unmask_and_level_IO_APIC_irq(irq); + /* mask = 1, trigger = 0 */ + __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000); + /* mask = 0, trigger = 1 */ + __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000); spin_unlock(&ioapic_lock); } } Index: linux-rt-rebase.q/arch/i386/mach-default/setup.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mach-default/setup.c +++ linux-rt-rebase.q/arch/i386/mach-default/setup.c @@ -35,7 +35,7 @@ void __init pre_intr_init_hook(void) /* * IRQ2 is cascade interrupt to second interrupt controller */ -static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; +static struct irqaction irq2 = { no_action, IRQF_NODELAY, CPU_MASK_NONE, "cascade", NULL, NULL}; /** * intr_init_hook - post gate setup interrupt initialisation @@ -81,7 +81,7 @@ void __init trap_init_hook(void) static struct irqaction irq0 = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_NODELAY, .mask = CPU_MASK_NONE, .name = "timer" }; Index: linux-rt-rebase.q/arch/i386/mach-visws/visws_apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mach-visws/visws_apic.c +++ linux-rt-rebase.q/arch/i386/mach-visws/visws_apic.c @@ -257,11 +257,13 @@ out_unlock: static struct irqaction master_action = { .handler = piix4_master_intr, .name = "PIIX4-8259", + .flags = IRQF_NODELAY, }; static struct irqaction cascade_action = { .handler = no_action, .name = "cascade", + .flags = IRQF_NODELAY, }; Index: linux-rt-rebase.q/arch/i386/mach-voyager/setup.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/mach-voyager/setup.c +++ linux-rt-rebase.q/arch/i386/mach-voyager/setup.c @@ -18,7 +18,7 @@ void __init pre_intr_init_hook(void) /* * IRQ2 is cascade interrupt to second interrupt controller */ -static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; +static struct irqaction irq2 = { no_action, IRQF_NODELAY, CPU_MASK_NONE, "cascade", NULL, NULL}; void __init intr_init_hook(void) { @@ -42,7 +42,7 @@ void __init trap_init_hook(void) static struct irqaction irq0 = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_NODELAY, .mask = CPU_MASK_NONE, .name = "timer" }; patches/irq-mask-fix.patch0000664000077200007720000000753410655544577015105 0ustar mingomingoSubject: genirq: fix simple and fasteoi irq handlers From: Jarek Poplawski After the "genirq: do not mask interrupts by default" patch interrupts should be disabled not immediately upon request, but after they happen. But, handle_simple_irq() and handle_fasteoi_irq() can skip this once or more if an irq is just serviced (IRQ_INPROGRESS), possibly disrupting a driver's work. The main reason of problems here, pointing the broken patch and making the first patch which can fix this was done by Marcin Slusarz. Additional test patches of Thomas Gleixner and Ingo Molnar tested by Marcin Slusarz helped to narrow possible reasons even more. Thanks. PS: this patch fixes only one evident error here, but there could be more places affected by above-mentioned change in irq handling. PS 2: After rethinking, IMHO, there are two most probable scenarios here: 1. After hw resend there could be a conflict between retriggered edge type irq and the next level type one: e.g. if this level type irq (io_apic is enabled then) is triggered while retriggered irq is serviced (IRQ_INPROGRESS) there is goto out with eoi, and probably the next such levels are triggered and looping, so probably kind of flood in io_apic until this retriggered edge service has ended. 2. There is something wrong with ioapic_retrigger_irq (less probable because this should be probably seen with 'normal' edge retriggers, but on the other hand, they could be less common). So, if there is #1, this fixed patch should work. But, since level types don't need this retriggers too much I think this "don't mask interrupts by default" idea should be rethinked: is there enough gain to risk such hard to diagnose errors? So, IMHO, there should be at least possibility to turn this off for level types in config (it should be a visible option, so people could find & try this before writing for help or changing a network card). Signed-off-by: Jarek Poplawski Signed-off-by: Ingo Molnar --- kernel/irq/chip.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) Index: linux-rt-rebase.q/kernel/irq/chip.c =================================================================== --- linux-rt-rebase.q.orig/kernel/irq/chip.c +++ linux-rt-rebase.q/kernel/irq/chip.c @@ -297,12 +297,11 @@ handle_simple_irq(unsigned int irq, stru spin_lock(&desc->lock); - if (unlikely(desc->status & IRQ_INPROGRESS)) - goto out_unlock; kstat_cpu(cpu).irqs[irq]++; action = desc->action; - if (unlikely(!action || (desc->status & IRQ_DISABLED))) { + if (unlikely(!action || (desc->status & (IRQ_INPROGRESS | + IRQ_DISABLED)))) { if (desc->chip->mask) desc->chip->mask(irq); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); @@ -325,6 +324,8 @@ handle_simple_irq(unsigned int irq, stru spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; + if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) + desc->chip->unmask(irq); out_unlock: spin_unlock(&desc->lock); } @@ -404,18 +405,16 @@ handle_fasteoi_irq(unsigned int irq, str spin_lock(&desc->lock); - if (unlikely(desc->status & IRQ_INPROGRESS)) - goto out; - desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); kstat_cpu(cpu).irqs[irq]++; /* - * If its disabled or no action available + * If it's running, disabled or no action available * then mask it and get out of here: */ action = desc->action; - if (unlikely(!action || (desc->status & IRQ_DISABLED))) { + if (unlikely(!action || (desc->status & (IRQ_INPROGRESS | + IRQ_DISABLED)))) { desc->status |= IRQ_PENDING; if (desc->chip->mask) desc->chip->mask(irq); @@ -442,6 +441,8 @@ handle_fasteoi_irq(unsigned int irq, str spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; + if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) + desc->chip->unmask(irq); out: desc->chip->eoi(irq); spin_unlock(&desc->lock); patches/preempt-realtime-powerpc-tlb-batching.patch0000664000077200007720000000377210655544574022057 0ustar mingomingoFrom tsutomu.owa@toshiba.co.jp Tue May 15 15:27:26 2007 Date: Tue, 15 May 2007 15:27:26 +0900 From: Tsutomu OWA To: Arnd Bergmann Cc: linuxppc-dev@ozlabs.org, Thomas Gleixner , mingo@elte.hu, linux-kernel@vger.kernel.org Subject: Re: [patch 4/4] powerpc 2.6.21-rt1: reduce scheduling latency by changing tlb flush size At Mon, 14 May 2007 16:40:02 +0200, Arnd Bergmann wrote: > > +#if defined(CONFIG_PPC_CELLEB) && defined(CONFIG_PREEMPT_RT) > > +/* Since tlb flush takes long time on Celleb, reduce it to 1 when Celleb && RT */ > > +#define PPC64_TLB_BATCH_NR 1 > With this code, you get silent side-effects of enabling PPC_CELLEB > along with another platform. > Maybe instead you should change the hpte_need_flush() to always flush > when running on the celleb platform and PREEMPT_RT is enabled. OK, how about this one? thanks a lot! Since flushing tlb needs expensive hypervisor call(s) on celleb, always flush it on RT to reduce scheduling latency. Signed-off-by: Tsutomu OWA -- owa --- arch/powerpc/mm/tlb_64.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) Index: linux-rt-rebase.q/arch/powerpc/mm/tlb_64.c =================================================================== --- linux-rt-rebase.q.orig/arch/powerpc/mm/tlb_64.c +++ linux-rt-rebase.q/arch/powerpc/mm/tlb_64.c @@ -30,6 +30,7 @@ #include #include #include +#include DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); @@ -203,6 +204,18 @@ void hpte_need_flush(struct mm_struct *m batch->pte[i] = rpte; batch->vaddr[i] = vaddr; batch->index = ++i; + +#ifdef CONFIG_PREEMPT_RT + /* + * Since flushing tlb needs expensive hypervisor call(s) on celleb, + * always flush it on RT to reduce scheduling latency. + */ + if (machine_is(celleb)) { + flush_tlb_pending(); + return; + } +#endif /* CONFIG_PREEMPT_RT */ + if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); } patches/fix-softirq-checks-for-non-rt-preempt-hardirq.patch0000664000077200007720000000173510655544576023403 0ustar mingomingo--- include/linux/bottom_half.h | 2 +- kernel/softirq.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) Index: linux-rt-rebase.q/include/linux/bottom_half.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/bottom_half.h +++ linux-rt-rebase.q/include/linux/bottom_half.h @@ -1,7 +1,7 @@ #ifndef _LINUX_BH_H #define _LINUX_BH_H -#ifdef CONFIG_PREEMPT_RT +#ifdef CONFIG_PREEMPT_HARDIRQS # define local_bh_disable() do { } while (0) # define __local_bh_disable(ip) do { } while (0) # define _local_bh_enable() do { } while (0) Index: linux-rt-rebase.q/kernel/softirq.c =================================================================== --- linux-rt-rebase.q.orig/kernel/softirq.c +++ linux-rt-rebase.q/kernel/softirq.c @@ -139,7 +139,7 @@ static void trigger_softirqs(void) } } -#ifndef CONFIG_PREEMPT_RT +#ifndef CONFIG_PREEMPT_HARDIRQS /* * This one is for softirq.c-internal use, patches/serial-locking-rt-cleanup.patch0000664000077200007720000000217410655544574017540 0ustar mingomingo drivers/serial/8250.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) Index: linux-rt-rebase.q/drivers/serial/8250.c =================================================================== --- linux-rt-rebase.q.orig/drivers/serial/8250.c +++ linux-rt-rebase.q/drivers/serial/8250.c @@ -2456,14 +2456,10 @@ serial8250_console_write(struct console touch_nmi_watchdog(); - local_irq_save(flags); - if (up->port.sysrq) { - /* serial8250_handle_port() already took the lock */ - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&up->port.lock); - } else - spin_lock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); /* * First save the IER then disable the interrupts @@ -2485,8 +2481,7 @@ serial8250_console_write(struct console serial_out(up, UART_IER, ier); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int __init serial8250_console_setup(struct console *co, char *options) patches/mips-gtod_clocksource.patch0000664000077200007720000000210310655544574017054 0ustar mingomingo arch/mips/kernel/time.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) Index: linux-rt-rebase.q/arch/mips/kernel/time.c =================================================================== --- linux-rt-rebase.q.orig/arch/mips/kernel/time.c +++ linux-rt-rebase.q/arch/mips/kernel/time.c @@ -287,6 +287,29 @@ void (*mips_timer_ack)(void); /* last time when xtime and rtc are sync'ed up */ static long last_rtc_update; +unsigned long read_persistent_clock(void) +{ + unsigned long sec; + sec = rtc_mips_get_time(); + return sec; +} + +void sync_persistent_clock(struct timespec ts) +{ + if (ntp_synced() && + xtime.tv_sec > last_rtc_update + 660 && + (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && + (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { + if (rtc_mips_set_mmss(xtime.tv_sec) == 0) { + last_rtc_update = xtime.tv_sec; + } + else { + /* do it again in 60 s */ + last_rtc_update = xtime.tv_sec - 600; + } + } +} + /* * local_timer_interrupt() does profiling and process accounting * on a per-CPU basis. patches/rt-mutex-trivial-route-cast-fix.patch0000664000077200007720000000100210655544573020656 0ustar mingomingo--- net/ipv4/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-rt-rebase.q/net/ipv4/route.c =================================================================== --- linux-rt-rebase.q.orig/net/ipv4/route.c +++ linux-rt-rebase.q/net/ipv4/route.c @@ -237,7 +237,7 @@ static spinlock_t *rt_hash_locks; spin_lock_init(&rt_hash_locks[i]); \ } #else -# define rt_hash_lock_addr(slot) NULL +# define rt_hash_lock_addr(slot) ((spinlock_t *)NULL) # define rt_hash_lock_init() #endif patches/ich-force-hpet-ich7-or-later-quirk-to-force-detect-enable.patch0000664000077200007720000001104610655544570025266 0ustar mingomingoFrom: Venki Pallipadi Force detect and/or enable HPET on ICH chipsets. This patch just handles the detection part and following patches use this information. Adds a function to repeat the force enabling during resume time. Using HPET this way, instead of PIT increases the time CPUs can reside in C-state when system is totally idle. On my test system with Core 2 Duo, average C-state residency goes up from ~20mS to ~80mS. Signed-off-by: Venkatesh Pallipadi Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andi Kleen Cc: john stultz Cc: Greg KH Signed-off-by: Andrew Morton --- arch/i386/kernel/quirks.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++ include/asm-i386/hpet.h | 1 2 files changed, 102 insertions(+) Index: linux/arch/i386/kernel/quirks.c =================================================================== --- linux.orig/arch/i386/kernel/quirks.c +++ linux/arch/i386/kernel/quirks.c @@ -4,6 +4,8 @@ #include #include +#include + #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) @@ -47,3 +49,102 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IN DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); #endif + +#if defined(CONFIG_HPET_TIMER) +unsigned long force_hpet_address; + +static void __iomem *rcba_base; + +void ich_force_hpet_resume(void) +{ + u32 val; + + if (!force_hpet_address) + return; + + if (rcba_base == NULL) + BUG(); + + /* read the Function Disable register, dword mode only */ + val = readl(rcba_base + 0x3404); + if (!(val & 0x80)) { + /* HPET disabled in HPTC. Trying to enable */ + writel(val | 0x80, rcba_base + 0x3404); + } + + val = readl(rcba_base + 0x3404); + if (!(val & 0x80)) + BUG(); + else + printk(KERN_DEBUG "Force enabled HPET at resume\n"); + + return; +} + +static void ich_force_enable_hpet(struct pci_dev *dev) +{ + u32 val, rcba; + int err = 0; + + if (hpet_address || force_hpet_address) + return; + + pci_read_config_dword(dev, 0xF0, &rcba); + rcba &= 0xFFFFC000; + if (rcba == 0) { + printk(KERN_DEBUG "RCBA disabled. Cannot force enable HPET\n"); + return; + } + + /* use bits 31:14, 16 kB aligned */ + rcba_base = ioremap_nocache(rcba, 0x4000); + if (rcba_base == NULL) { + printk(KERN_DEBUG "ioremap failed. Cannot force enable HPET\n"); + return; + } + + /* read the Function Disable register, dword mode only */ + val = readl(rcba_base + 0x3404); + + if (val & 0x80) { + /* HPET is enabled in HPTC. Just not reported by BIOS */ + val = val & 0x3; + force_hpet_address = 0xFED00000 | (val << 12); + printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", + force_hpet_address); + iounmap(rcba_base); + return; + } + + /* HPET disabled in HPTC. Trying to enable */ + writel(val | 0x80, rcba_base + 0x3404); + + val = readl(rcba_base + 0x3404); + if (!(val & 0x80)) { + err = 1; + } else { + val = val & 0x3; + force_hpet_address = 0xFED00000 | (val << 12); + } + + if (err) { + force_hpet_address = 0; + iounmap(rcba_base); + printk(KERN_DEBUG "Failed to force enable HPET\n"); + } else { + printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", + force_hpet_address); + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, + ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, + ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, + ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, + ich_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, + ich_force_enable_hpet); +#endif Index: linux/include/asm-i386/hpet.h =================================================================== --- linux.orig/include/asm-i386/hpet.h +++ linux/include/asm-i386/hpet.h @@ -67,6 +67,7 @@ extern unsigned long hpet_address; extern int is_hpet_enabled(void); extern int hpet_enable(void); extern unsigned long hpet_readl(unsigned long a); +extern void ich_force_hpet_resume(void); #ifdef CONFIG_HPET_EMULATE_RTC patches/jbd_assertions_smp_only.patch0000664000077200007720000000375410655544574017523 0ustar mingomingo fs/jbd/transaction.c | 6 +++--- include/linux/jbd.h | 9 +++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) Index: linux-rt-rebase.q/fs/jbd/transaction.c =================================================================== --- linux-rt-rebase.q.orig/fs/jbd/transaction.c +++ linux-rt-rebase.q/fs/jbd/transaction.c @@ -1506,7 +1506,7 @@ static void __journal_temp_unlink_buffer transaction_t *transaction; struct buffer_head *bh = jh2bh(jh); - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); + J_ASSERT_JH_SMP(jh, jbd_is_locked_bh_state(bh)); transaction = jh->b_transaction; if (transaction) assert_spin_locked(&transaction->t_journal->j_list_lock); @@ -1949,7 +1949,7 @@ void __journal_file_buffer(struct journa int was_dirty = 0; struct buffer_head *bh = jh2bh(jh); - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); + J_ASSERT_JH_SMP(jh, jbd_is_locked_bh_state(bh)); assert_spin_locked(&transaction->t_journal->j_list_lock); J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); @@ -2038,7 +2038,7 @@ void __journal_refile_buffer(struct jour int was_dirty; struct buffer_head *bh = jh2bh(jh); - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); + J_ASSERT_JH_SMP(jh, jbd_is_locked_bh_state(bh)); if (jh->b_transaction) assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); Index: linux-rt-rebase.q/include/linux/jbd.h =================================================================== --- linux-rt-rebase.q.orig/include/linux/jbd.h +++ linux-rt-rebase.q/include/linux/jbd.h @@ -276,6 +276,15 @@ void buffer_assertion_failure(struct buf #define J_ASSERT(assert) do { } while (0) #endif /* JBD_ASSERTIONS */ +/* + * For assertions that are only valid on SMP (e.g. spin_is_locked()): + */ +#ifdef CONFIG_SMP +# define J_ASSERT_JH_SMP(jh, expr) J_ASSERT_JH(jh, expr) +#else +# define J_ASSERT_JH_SMP(jh, assert) do { } while (0) +#endif + #if defined(JBD_PARANOID_IOFAIL) #define J_EXPECT(expr, why...) J_ASSERT(expr) #define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr) patches/x86_64-remove-unused-code.patch0000664000077200007720000004445510655544570017233 0ustar mingomingoSubject: x86_64: remove now unused code Remove the unused code after the switch to clock events. Signed-off-by: Thomas Gleixner Signed-off-by: Chris Wright Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 108 ---------- arch/x86_64/kernel/hpet.c | 444 --------------------------------------------- arch/x86_64/kernel/time.c | 42 ---- include/asm-x86_64/apic.h | 6 include/asm-x86_64/proto.h | 7 5 files changed, 1 insertion(+), 606 deletions(-) Index: linux/arch/x86_64/kernel/apic.c =================================================================== --- linux.orig/arch/x86_64/kernel/apic.c +++ linux/arch/x86_64/kernel/apic.c @@ -42,9 +42,7 @@ int apic_mapped; int apic_verbosity; -int apic_runs_main_timer; int apic_calibrate_pmtmr __initdata; - int disable_apic_timer __initdata; /* Local APIC timer works in C2? */ @@ -130,15 +128,6 @@ static void lapic_timer_broadcast(cpumas #endif } -/* - * cpu_mask that denotes the CPUs that needs timer interrupt coming in as - * IPIs in place of local APIC timers - */ -static cpumask_t timer_interrupt_broadcast_ipi_mask; - -/* Using APIC to generate smp_local_timer_interrupt? */ -int using_apic_timer __read_mostly = 0; - static void apic_pm_activate(void); void apic_wait_icr_idle(void) @@ -974,84 +963,6 @@ void __cpuinit setup_secondary_APIC_cloc setup_APIC_timer(); } -void disable_APIC_timer(void) -{ - if (using_apic_timer) { - unsigned long v; - - v = apic_read(APIC_LVTT); - /* - * When an illegal vector value (0-15) is written to an LVT - * entry and delivery mode is Fixed, the APIC may signal an - * illegal vector error, with out regard to whether the mask - * bit is set or whether an interrupt is actually seen on input. - * - * Boot sequence might call this function when the LVTT has - * '0' vector value. So make sure vector field is set to - * valid value. - */ - v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); - apic_write(APIC_LVTT, v); - } -} - -void enable_APIC_timer(void) -{ - int cpu = smp_processor_id(); - - if (using_apic_timer && - !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) { - unsigned long v; - - v = apic_read(APIC_LVTT); - apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED); - } -} - -void switch_APIC_timer_to_ipi(void *cpumask) -{ - cpumask_t mask = *(cpumask_t *)cpumask; - int cpu = smp_processor_id(); - - if (cpu_isset(cpu, mask) && - !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) { - disable_APIC_timer(); - cpu_set(cpu, timer_interrupt_broadcast_ipi_mask); - } -} -EXPORT_SYMBOL(switch_APIC_timer_to_ipi); - -void smp_send_timer_broadcast_ipi(void) -{ - int cpu = smp_processor_id(); - cpumask_t mask; - - cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask); - - if (cpu_isset(cpu, mask)) { - cpu_clear(cpu, mask); - add_pda(apic_timer_irqs, 1); - smp_local_timer_interrupt(); - } - - if (!cpus_empty(mask)) { - send_IPI_mask(mask, LOCAL_TIMER_VECTOR); - } -} - -void switch_ipi_to_APIC_timer(void *cpumask) -{ - cpumask_t mask = *(cpumask_t *)cpumask; - int cpu = smp_processor_id(); - - if (cpu_isset(cpu, mask) && - cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) { - cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask); - enable_APIC_timer(); - } -} -EXPORT_SYMBOL(switch_ipi_to_APIC_timer); - int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; @@ -1199,7 +1110,6 @@ asmlinkage void smp_spurious_interrupt(v v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); - irq_exit(); } @@ -1298,21 +1208,7 @@ static __init int setup_noapictimer(char disable_apic_timer = 1; return 1; } - -static __init int setup_apicmaintimer(char *str) -{ - apic_runs_main_timer = 1; - - return 1; -} -__setup("apicmaintimer", setup_apicmaintimer); - -static __init int setup_noapicmaintimer(char *str) -{ - apic_runs_main_timer = -1; - return 1; -} -__setup("noapicmaintimer", setup_noapicmaintimer); +__setup("noapictimer", setup_noapictimer); static __init int setup_apicpmtimer(char *s) { @@ -1322,5 +1218,3 @@ static __init int setup_apicpmtimer(char } __setup("apicpmtimer", setup_apicpmtimer); -__setup("noapictimer", setup_noapictimer); - Index: linux/arch/x86_64/kernel/hpet.c =================================================================== --- linux.orig/arch/x86_64/kernel/hpet.c +++ /dev/null @@ -1,444 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define HPET_MASK 0xFFFFFFFF -#define HPET_SHIFT 22 - -/* FSEC = 10^-15 NSEC = 10^-9 */ -#define FSEC_PER_NSEC 1000000 - -int nohpet __initdata; - -unsigned long hpet_address; -unsigned long hpet_period; /* fsecs / HPET clock */ -unsigned long hpet_tick; /* HPET clocks / interrupt */ - -int hpet_use_timer; /* Use counter of hpet for time keeping, - * otherwise PIT - */ - -#ifdef CONFIG_HPET -static __init int late_hpet_init(void) -{ - struct hpet_data hd; - unsigned int ntimer; - - if (!hpet_address) - return 0; - - memset(&hd, 0, sizeof(hd)); - - ntimer = hpet_readl(HPET_ID); - ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT; - ntimer++; - - /* - * Register with driver. - * Timer0 and Timer1 is used by platform. - */ - hd.hd_phys_address = hpet_address; - hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE); - hd.hd_nirqs = ntimer; - hd.hd_flags = HPET_DATA_PLATFORM; - hpet_reserve_timer(&hd, 0); -#ifdef CONFIG_HPET_EMULATE_RTC - hpet_reserve_timer(&hd, 1); -#endif - hd.hd_irq[0] = HPET_LEGACY_8254; - hd.hd_irq[1] = HPET_LEGACY_RTC; - if (ntimer > 2) { - struct hpet *hpet; - struct hpet_timer *timer; - int i; - - hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE); - timer = &hpet->hpet_timers[2]; - for (i = 2; i < ntimer; timer++, i++) - hd.hd_irq[i] = (timer->hpet_config & - Tn_INT_ROUTE_CNF_MASK) >> - Tn_INT_ROUTE_CNF_SHIFT; - - } - - hpet_alloc(&hd); - return 0; -} -fs_initcall(late_hpet_init); -#endif - -int hpet_timer_stop_set_go(unsigned long tick) -{ - unsigned int cfg; - -/* - * Stop the timers and reset the main counter. - */ - - cfg = hpet_readl(HPET_CFG); - cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); - hpet_writel(cfg, HPET_CFG); - hpet_writel(0, HPET_COUNTER); - hpet_writel(0, HPET_COUNTER + 4); - -/* - * Set up timer 0, as periodic with first interrupt to happen at hpet_tick, - * and period also hpet_tick. - */ - if (hpet_use_timer) { - hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | - HPET_TN_32BIT, HPET_T0_CFG); - hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */ - hpet_writel(hpet_tick, HPET_T0_CMP); /* period */ - cfg |= HPET_CFG_LEGACY; - } -/* - * Go! - */ - - cfg |= HPET_CFG_ENABLE; - hpet_writel(cfg, HPET_CFG); - - return 0; -} - -static cycle_t read_hpet(void) -{ - return (cycle_t)hpet_readl(HPET_COUNTER); -} - -static cycle_t __vsyscall_fn vread_hpet(void) -{ - return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); -} - -struct clocksource clocksource_hpet = { - .name = "hpet", - .rating = 250, - .read = read_hpet, - .mask = (cycle_t)HPET_MASK, - .mult = 0, /* set below */ - .shift = HPET_SHIFT, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .vread = vread_hpet, -}; - -int __init hpet_arch_init(void) -{ - unsigned int id; - u64 tmp; - - if (!hpet_address) - return -1; - set_fixmap_nocache(FIX_HPET_BASE, hpet_address); - __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); - -/* - * Read the period, compute tick and quotient. - */ - - id = hpet_readl(HPET_ID); - - if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER)) - return -1; - - hpet_period = hpet_readl(HPET_PERIOD); - if (hpet_period < 100000 || hpet_period > 100000000) - return -1; - - hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period; - - hpet_use_timer = (id & HPET_ID_LEGSUP); - - /* - * hpet period is in femto seconds per cycle - * so we need to convert this to ns/cyc units - * aproximated by mult/2^shift - * - * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift - * fsec/cyc * 1ns/1000000fsec * 2^shift = mult - * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult - * (fsec/cyc << shift)/1000000 = mult - * (hpet_period << shift)/FSEC_PER_NSEC = mult - */ - tmp = (u64)hpet_period << HPET_SHIFT; - do_div(tmp, FSEC_PER_NSEC); - clocksource_hpet.mult = (u32)tmp; - clocksource_register(&clocksource_hpet); - - return hpet_timer_stop_set_go(hpet_tick); -} - -int hpet_reenable(void) -{ - return hpet_timer_stop_set_go(hpet_tick); -} - -#ifdef CONFIG_HPET_EMULATE_RTC -/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET - * is enabled, we support RTC interrupt functionality in software. - * RTC has 3 kinds of interrupts: - * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock - * is updated - * 2) Alarm Interrupt - generate an interrupt at a specific time of day - * 3) Periodic Interrupt - generate periodic interrupt, with frequencies - * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2) - * (1) and (2) above are implemented using polling at a frequency of - * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt - * overhead. (DEFAULT_RTC_INT_FREQ) - * For (3), we use interrupts at 64Hz or user specified periodic - * frequency, whichever is higher. - */ -#include - -#define DEFAULT_RTC_INT_FREQ 64 -#define RTC_NUM_INTS 1 - -static unsigned long UIE_on; -static unsigned long prev_update_sec; - -static unsigned long AIE_on; -static struct rtc_time alarm_time; - -static unsigned long PIE_on; -static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ; -static unsigned long PIE_count; - -static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ -static unsigned int hpet_t1_cmp; /* cached comparator register */ - -int is_hpet_enabled(void) -{ - return hpet_address != 0; -} - -/* - * Timer 1 for RTC, we do not use periodic interrupt feature, - * even if HPET supports periodic interrupts on Timer 1. - * The reason being, to set up a periodic interrupt in HPET, we need to - * stop the main counter. And if we do that everytime someone diables/enables - * RTC, we will have adverse effect on main kernel timer running on Timer 0. - * So, for the time being, simulate the periodic interrupt in software. - * - * hpet_rtc_timer_init() is called for the first time and during subsequent - * interuppts reinit happens through hpet_rtc_timer_reinit(). - */ -int hpet_rtc_timer_init(void) -{ - unsigned int cfg, cnt; - unsigned long flags; - - if (!is_hpet_enabled()) - return 0; - /* - * Set the counter 1 and enable the interrupts. - */ - if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) - hpet_rtc_int_freq = PIE_freq; - else - hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; - - local_irq_save(flags); - - cnt = hpet_readl(HPET_COUNTER); - cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); - hpet_writel(cnt, HPET_T1_CMP); - hpet_t1_cmp = cnt; - - cfg = hpet_readl(HPET_T1_CFG); - cfg &= ~HPET_TN_PERIODIC; - cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; - hpet_writel(cfg, HPET_T1_CFG); - - local_irq_restore(flags); - - return 1; -} - -static void hpet_rtc_timer_reinit(void) -{ - unsigned int cfg, cnt, ticks_per_int, lost_ints; - - if (unlikely(!(PIE_on | AIE_on | UIE_on))) { - cfg = hpet_readl(HPET_T1_CFG); - cfg &= ~HPET_TN_ENABLE; - hpet_writel(cfg, HPET_T1_CFG); - return; - } - - if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) - hpet_rtc_int_freq = PIE_freq; - else - hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; - - /* It is more accurate to use the comparator value than current count.*/ - ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq; - hpet_t1_cmp += ticks_per_int; - hpet_writel(hpet_t1_cmp, HPET_T1_CMP); - - /* - * If the interrupt handler was delayed too long, the write above tries - * to schedule the next interrupt in the past and the hardware would - * not interrupt until the counter had wrapped around. - * So we have to check that the comparator wasn't set to a past time. - */ - cnt = hpet_readl(HPET_COUNTER); - if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) { - lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1; - /* Make sure that, even with the time needed to execute - * this code, the next scheduled interrupt has been moved - * back to the future: */ - lost_ints++; - - hpet_t1_cmp += lost_ints * ticks_per_int; - hpet_writel(hpet_t1_cmp, HPET_T1_CMP); - - if (PIE_on) - PIE_count += lost_ints; - - if (printk_ratelimit()) - printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", - hpet_rtc_int_freq); - } -} - -/* - * The functions below are called from rtc driver. - * Return 0 if HPET is not being used. - * Otherwise do the necessary changes and return 1. - */ -int hpet_mask_rtc_irq_bit(unsigned long bit_mask) -{ - if (!is_hpet_enabled()) - return 0; - - if (bit_mask & RTC_UIE) - UIE_on = 0; - if (bit_mask & RTC_PIE) - PIE_on = 0; - if (bit_mask & RTC_AIE) - AIE_on = 0; - - return 1; -} - -int hpet_set_rtc_irq_bit(unsigned long bit_mask) -{ - int timer_init_reqd = 0; - - if (!is_hpet_enabled()) - return 0; - - if (!(PIE_on | AIE_on | UIE_on)) - timer_init_reqd = 1; - - if (bit_mask & RTC_UIE) { - UIE_on = 1; - } - if (bit_mask & RTC_PIE) { - PIE_on = 1; - PIE_count = 0; - } - if (bit_mask & RTC_AIE) { - AIE_on = 1; - } - - if (timer_init_reqd) - hpet_rtc_timer_init(); - - return 1; -} - -int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec) -{ - if (!is_hpet_enabled()) - return 0; - - alarm_time.tm_hour = hrs; - alarm_time.tm_min = min; - alarm_time.tm_sec = sec; - - return 1; -} - -int hpet_set_periodic_freq(unsigned long freq) -{ - if (!is_hpet_enabled()) - return 0; - - PIE_freq = freq; - PIE_count = 0; - - return 1; -} - -int hpet_rtc_dropped_irq(void) -{ - if (!is_hpet_enabled()) - return 0; - - return 1; -} - -irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) -{ - struct rtc_time curr_time; - unsigned long rtc_int_flag = 0; - int call_rtc_interrupt = 0; - - hpet_rtc_timer_reinit(); - - if (UIE_on | AIE_on) { - rtc_get_rtc_time(&curr_time); - } - if (UIE_on) { - if (curr_time.tm_sec != prev_update_sec) { - /* Set update int info, call real rtc int routine */ - call_rtc_interrupt = 1; - rtc_int_flag = RTC_UF; - prev_update_sec = curr_time.tm_sec; - } - } - if (PIE_on) { - PIE_count++; - if (PIE_count >= hpet_rtc_int_freq/PIE_freq) { - /* Set periodic int info, call real rtc int routine */ - call_rtc_interrupt = 1; - rtc_int_flag |= RTC_PF; - PIE_count = 0; - } - } - if (AIE_on) { - if ((curr_time.tm_sec == alarm_time.tm_sec) && - (curr_time.tm_min == alarm_time.tm_min) && - (curr_time.tm_hour == alarm_time.tm_hour)) { - /* Set alarm int info, call real rtc int routine */ - call_rtc_interrupt = 1; - rtc_int_flag |= RTC_AF; - } - } - if (call_rtc_interrupt) { - rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); - rtc_interrupt(rtc_int_flag, dev_id); - } - return IRQ_HANDLED; -} -#endif - -static int __init nohpet_setup(char *s) -{ - nohpet = 1; - return 1; -} - -__setup("nohpet", nohpet_setup); Index: linux/arch/x86_64/kernel/time.c =================================================================== --- linux.orig/arch/x86_64/kernel/time.c +++ linux/arch/x86_64/kernel/time.c @@ -150,48 +150,6 @@ int update_persistent_clock(struct times return set_rtc_mmss(now.tv_sec); } -void main_timer_handler(void) -{ -/* - * Here we are in the timer irq handler. We have irqs locally disabled (so we - * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running - * on the other CPU, so we need a lock. We also need to lock the vsyscall - * variables, because both do_timer() and us change them -arca+vojtech - */ - - write_seqlock(&xtime_lock); - -/* - * Do the timer stuff. - */ - - do_timer(1); -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - -/* - * In the SMP case we use the local APIC timer interrupt to do the profiling, - * except when we simulate SMP mode on a uniprocessor system, in that case we - * have to call the local interrupt handler. - */ - - if (!using_apic_timer) - smp_local_timer_interrupt(); - - write_sequnlock(&xtime_lock); -} - -static irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - if (apic_runs_main_timer > 1) - return IRQ_HANDLED; - main_timer_handler(); - if (using_apic_timer) - smp_send_timer_broadcast_ipi(); - return IRQ_HANDLED; -} - static irqreturn_t timer_event_interrupt(int irq, void *dev_id) { global_clock_event->event_handler(global_clock_event); Index: linux/include/asm-x86_64/apic.h =================================================================== --- linux.orig/include/asm-x86_64/apic.h +++ linux/include/asm-x86_64/apic.h @@ -79,8 +79,6 @@ extern void smp_local_timer_interrupt (v extern void setup_boot_APIC_clock (void); extern void setup_secondary_APIC_clock (void); extern int APIC_init_uniprocessor (void); -extern void disable_APIC_timer(void); -extern void enable_APIC_timer(void); extern void setup_apic_routing(void); extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, @@ -95,10 +93,6 @@ extern int apic_is_clustered_box(void); #define K8_APIC_EXT_INT_MSG_EXT 0x7 #define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 -void smp_send_timer_broadcast_ipi(void); -void switch_APIC_timer_to_ipi(void *cpumask); -void switch_ipi_to_APIC_timer(void *cpumask); - #define ARCH_APICTIMER_STOPS_ON_C3 1 extern unsigned boot_cpu_id; Index: linux/include/asm-x86_64/proto.h =================================================================== --- linux.orig/include/asm-x86_64/proto.h +++ linux/include/asm-x86_64/proto.h @@ -51,9 +51,6 @@ extern void reserve_bootmem_generic(unsi extern void load_gs_index(unsigned gs); -extern void stop_timer_interrupt(void); -extern void main_timer_handler(void); - extern unsigned long end_pfn_map; extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp); @@ -90,14 +87,10 @@ extern int timer_over_8254; extern int gsi_irq_sharing(int gsi); -extern void smp_local_timer_interrupt(void); - extern int force_mwait; long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); -void i8254_timer_resume(void); - #define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) #define round_down(x,y) ((x) & ~((y)-1)) patches/futex-performance-hack-sysctl-fix.patch0000664000077200007720000000553710655544576021236 0ustar mingomingoFrom lethal@linux-sh.org Fri May 18 06:46:43 2007 Return-Path: Received: from smtp.ocgnet.org (smtp.ocgnet.org [64.20.243.3]) by mail.tglx.de (Postfix) with ESMTP id 0FCC865C065 for ; Fri, 18 May 2007 06:46:43 +0200 (CEST) Received: from smtp.ocgnet.org (localhost [127.0.0.1]) by smtp.ocgnet.org (Postfix) with ESMTP id 616355203FB; Thu, 17 May 2007 23:46:39 -0500 (CDT) X-Spam-Checker-Version: SpamAssassin 3.1.3-gr0 (2006-06-01) on smtp.ocgnet.org X-Spam-Level: X-Spam-Status: No, score=0.0 required=5.0 tests=none autolearn=no version=3.1.3-gr0 Received: from master.linux-sh.org (124x34x33x190.ap124.ftth.ucom.ne.jp [124.34.33.190]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by smtp.ocgnet.org (Postfix) with ESMTP id E1F585203E0; Thu, 17 May 2007 23:46:38 -0500 (CDT) Received: from localhost (unknown [127.0.0.1]) by master.linux-sh.org (Postfix) with ESMTP id 4984664C7C; Fri, 18 May 2007 04:46:00 +0000 (UTC) X-Virus-Scanned: amavisd-new at linux-sh.org Received: from master.linux-sh.org ([127.0.0.1]) by localhost (master.linux-sh.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id BE+H5LV2TYuQ; Fri, 18 May 2007 13:46:00 +0900 (JST) Received: by master.linux-sh.org (Postfix, from userid 500) id 08A5664C7D; Fri, 18 May 2007 13:46:00 +0900 (JST) Date: Fri, 18 May 2007 13:45:59 +0900 From: Paul Mundt To: Ingo Molnar , Thomas Gleixner Cc: linux-kernel@vger.kernel.org Subject: [PATCH -rt] futex_performance_hack sysctl build fix Message-ID: <20070518044559.GB22660@linux-sh.org> Mail-Followup-To: Paul Mundt , Ingo Molnar , Thomas Gleixner , linux-kernel@vger.kernel.org MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline User-Agent: Mutt/1.5.13 (2006-08-11) X-Virus-Scanned: ClamAV using ClamSMTP X-Evolution-Source: imap://tglx%40linutronix.de@localhost:8993/ Content-Transfer-Encoding: 8bit -rt adds a futex_performance_hack sysctl, which is only defined if kernel/futex.c is built in. This fixes the build in the CONFIG_FUTEX=n case. Signed-off-by: Paul Mundt -- kernel/sysctl.c | 2 ++ 1 file changed, 2 insertions(+) Index: linux-rt-rebase.q/kernel/sysctl.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sysctl.c +++ linux-rt-rebase.q/kernel/sysctl.c @@ -324,6 +324,7 @@ static ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, +#ifdef CONFIG_FUTEX { .ctl_name = CTL_UNNUMBERED, .procname = "futex_performance_hack", @@ -332,6 +333,7 @@ static ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, +#endif { .ctl_name = KERN_PANIC, .procname = "prof_pid", patches/relay-fix.patch0000664000077200007720000000305410655544577014466 0ustar mingomingoSubject: relay: fix timer madness From: Ingo Molnar remove timer calls (!!!) from deep within the tracing infrastructure. This was totally bogus code that can cause lockups and worse. Poll the buffer every 2 jiffies for now. Signed-off-by: Ingo Molnar --- kernel/relay.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) Index: linux-rt-rebase.q/kernel/relay.c =================================================================== --- linux-rt-rebase.q.orig/kernel/relay.c +++ linux-rt-rebase.q/kernel/relay.c @@ -319,6 +319,10 @@ static void wakeup_readers(unsigned long { struct rchan_buf *buf = (struct rchan_buf *)data; wake_up_interruptible(&buf->read_wait); + /* + * Stupid polling for now: + */ + mod_timer(&buf->timer, jiffies + 1); } /** @@ -336,6 +340,7 @@ static void __relay_reset(struct rchan_b init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); + mod_timer(&buf->timer, jiffies + 1); } else del_timer_sync(&buf->timer); @@ -605,15 +610,6 @@ size_t relay_switch_subbuf(struct rchan_ buf->subbufs_produced++; buf->dentry->d_inode->i_size += buf->chan->subbuf_size - buf->padding[old_subbuf]; - smp_mb(); - if (waitqueue_active(&buf->read_wait)) - /* - * Calling wake_up_interruptible() from here - * will deadlock if we happen to be logging - * from the scheduler (trying to re-grab - * rq->lock), so defer it. - */ - __mod_timer(&buf->timer, jiffies + 1); } old = buf->data; patches/paravirt-function-pointer-fix.patch0000664000077200007720000000162410655544577020504 0ustar mingomingo--- arch/i386/kernel/paravirt.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) Index: linux-rt-rebase.q/arch/i386/kernel/paravirt.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/paravirt.c +++ linux-rt-rebase.q/arch/i386/kernel/paravirt.c @@ -273,6 +273,16 @@ int paravirt_disable_iospace(void) return ret; } +#ifdef CONFIG_HIGHPTE +/* + * kmap_atomic() might be an inline or a macro: + */ +static void *kmap_atomic_func(struct page *page, enum km_type idx) +{ + return kmap_atomic(page, idx); +} +#endif + struct paravirt_ops paravirt_ops = { .name = "bare hardware", .paravirt_enabled = 0, @@ -361,7 +371,7 @@ struct paravirt_ops paravirt_ops = { .pte_update_defer = paravirt_nop, #ifdef CONFIG_HIGHPTE - .kmap_atomic_pte = kmap_atomic, + .kmap_atomic_pte = kmap_atomic_func, #endif #ifdef CONFIG_X86_PAE patches/sched-rt-balance-fix.patch0000664000077200007720000000260310655544577016445 0ustar mingomingo Hi Ingo, Thomas, this one-liner fixes a bug in balance_rt_tasks() which sometimes manifests by having a lower prio task being scheduled while a higher prio task is sitting waiting on another runqueue. This is pretty hard to reproduce on low cpu count machines, for example, I had to have sched_football run in a loop for ~38h before it failed on a dual HT Xeon box. Sébastien. --- In the CPU loop in balance_rt_tasks(), the 'next' task pointer is only ever updated if this_rq->lock has been dropped in double_lock_balance(). This sometimes lead to 'this_cpu' pulling tasks that are only garanteed to have a higher priority than the 'next' picked before the loop. Fix this to update 'next' to the last picked RT task. Signed-off-by: Sébastien Dugué --- kernel/sched.c | 8 ++++++++ 1 file changed, 8 insertions(+) Index: linux-rt-rebase.q/kernel/sched.c =================================================================== --- linux-rt-rebase.q.orig/kernel/sched.c +++ linux-rt-rebase.q/kernel/sched.c @@ -1523,6 +1523,14 @@ static void balance_rt_tasks(struct rq * * in another runqueue. (low likelyhood * but possible) */ + + /* + * Update next so that we won't pick a task + * on another cpu with a priority lower (or equal) + * than the one we just picked. + */ + next = p; + } spin_unlock(&src_rq->lock); } patches/nmi-watchdog-disable.patch0000664000077200007720000000711510655544576016551 0ustar mingomingoSubject: [patch] x86_64: do not enable the NMI watchdog by default From: Ingo Molnar do not enable the NMI watchdog by default. Now that we have lockdep i cannot remember the last time it caught a real bug, but the NMI watchdog can /cause/ problems. Furthermore, to the typical user, an NMI watchdog assert results in a total lockup anyway (if under X). In that sense, all that the NMI watchdog does is that it makes the system /less/ stable and /less/ debuggable. people can still enable it either after bootup via: echo 1 > /proc/sys/kernel/nmi or via the nmi_watchdog=1 or nmi_watchdog=2 boot options. build and boot tested on an Athlon64 box. Signed-off-by: Ingo Molnar --- arch/x86_64/kernel/apic.c | 1 - arch/x86_64/kernel/io_apic.c | 2 -- arch/x86_64/kernel/nmi.c | 2 +- arch/x86_64/kernel/smpboot.c | 1 - include/asm-x86_64/nmi.h | 1 - 5 files changed, 1 insertion(+), 6 deletions(-) Index: linux-rt-rebase.q/arch/x86_64/kernel/apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/apic.c +++ linux-rt-rebase.q/arch/x86_64/kernel/apic.c @@ -517,7 +517,6 @@ void __cpuinit setup_local_APIC (void) oldvalue, value); } - nmi_watchdog_default(); setup_apic_nmi_watchdog(NULL); apic_pm_activate(); } Index: linux-rt-rebase.q/arch/x86_64/kernel/io_apic.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/io_apic.c +++ linux-rt-rebase.q/arch/x86_64/kernel/io_apic.c @@ -1720,7 +1720,6 @@ static inline void check_timer(void) */ unmask_IO_APIC_irq(0); if (!no_timer_check && timer_irq_works()) { - nmi_watchdog_default(); if (nmi_watchdog == NMI_IO_APIC) { disable_8259A_irq(0); setup_nmi(); @@ -1746,7 +1745,6 @@ static inline void check_timer(void) setup_ExtINT_IRQ0_pin(apic2, pin2, cfg->vector); if (timer_irq_works()) { apic_printk(APIC_VERBOSE," works.\n"); - nmi_watchdog_default(); if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); } Index: linux-rt-rebase.q/arch/x86_64/kernel/nmi.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/nmi.c +++ linux-rt-rebase.q/arch/x86_64/kernel/nmi.c @@ -54,7 +54,7 @@ static DEFINE_PER_CPU(short, wd_enabled) static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); /* Run after command line and cpu_init init, but before all other checks */ -void nmi_watchdog_default(void) +static inline void nmi_watchdog_default(void) { if (nmi_watchdog != NMI_DEFAULT) return; Index: linux-rt-rebase.q/arch/x86_64/kernel/smpboot.c =================================================================== --- linux-rt-rebase.q.orig/arch/x86_64/kernel/smpboot.c +++ linux-rt-rebase.q/arch/x86_64/kernel/smpboot.c @@ -850,7 +850,6 @@ static int __init smp_sanity_check(unsig */ void __init smp_prepare_cpus(unsigned int max_cpus) { - nmi_watchdog_default(); current_cpu_data = boot_cpu_data; current_thread_info()->cpu = 0; /* needed? */ set_cpu_sibling_map(0); Index: linux-rt-rebase.q/include/asm-x86_64/nmi.h =================================================================== --- linux-rt-rebase.q.orig/include/asm-x86_64/nmi.h +++ linux-rt-rebase.q/include/asm-x86_64/nmi.h @@ -59,7 +59,6 @@ extern void disable_timer_nmi_watchdog(v extern void enable_timer_nmi_watchdog(void); extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); -extern void nmi_watchdog_default(void); extern int setup_nmi_watchdog(char *); extern atomic_t nmi_active; patches/preempt-realtime-sched-i386.patch0000664000077200007720000000402410655544575017611 0ustar mingomingo--- arch/i386/kernel/entry.S | 14 +++++++++----- arch/i386/kernel/process.c | 4 +++- 2 files changed, 12 insertions(+), 6 deletions(-) Index: linux-rt-rebase.q/arch/i386/kernel/entry.S =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/entry.S +++ linux-rt-rebase.q/arch/i386/kernel/entry.S @@ -264,14 +264,18 @@ END(ret_from_exception) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) + cmpl $0, kernel_preemption + jz restore_nocheck cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? jnz restore_nocheck need_resched: movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl - jz restore_all + jz restore_nocheck testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all + jz restore_nocheck + DISABLE_INTERRUPTS(CLBR_ANY) + call preempt_schedule_irq jmp need_resched END(resume_kernel) @@ -481,11 +485,11 @@ work_pending: testl $(_TIF_NEED_RESCHED|_TIF_NEED_RESCHED_DELAYED), %ecx jz work_notifysig work_resched: - call schedule - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt + DISABLE_INTERRUPTS(CLBR_ANY) + call __schedule + # make sure we don't miss an interrupt # setting need_resched or sigpending # between sampling and the iret - TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? Index: linux-rt-rebase.q/arch/i386/kernel/process.c =================================================================== --- linux-rt-rebase.q.orig/arch/i386/kernel/process.c +++ linux-rt-rebase.q/arch/i386/kernel/process.c @@ -200,12 +200,14 @@ void cpu_idle(void) __get_cpu_var(irq_stat).idle_timestamp = jiffies; idle(); } + local_irq_disable(); trace_preempt_exit_idle(); tick_nohz_restart_sched_tick(); __preempt_enable_no_resched(); - schedule(); + __schedule(); preempt_disable(); trace_preempt_enter_idle(); + local_irq_enable(); } } patches/rcu-hrt-fixups.patch0000664000077200007720000001065010655544573015462 0ustar mingomingo include/linux/rcuclassic.h | 3 +++ include/linux/rcupdate.h | 1 + include/linux/rcupreempt.h | 3 +++ kernel/rcuclassic.c | 19 ++++++++++++++++--- kernel/rcupreempt.c | 22 +++++++++++++++++++++- 5 files changed, 44 insertions(+), 4 deletions(-) Index: linux/include/linux/rcuclassic.h =================================================================== --- linux.orig/include/linux/rcuclassic.h +++ linux/include/linux/rcuclassic.h @@ -144,5 +144,8 @@ extern void rcu_check_callbacks(int cpu, extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); +struct softirq_action; +extern void rcu_process_callbacks(struct softirq_action *unused); + #endif /* __KERNEL__ */ #endif /* __LINUX_RCUCLASSIC_H */ Index: linux/include/linux/rcupdate.h =================================================================== --- linux.orig/include/linux/rcupdate.h +++ linux/include/linux/rcupdate.h @@ -225,6 +225,7 @@ extern void rcu_barrier(void); /* Internal to kernel */ extern void rcu_init(void); +extern void rcu_advance_callbacks(int cpu, int user); extern void rcu_check_callbacks(int cpu, int user); #endif /* __KERNEL__ */ Index: linux/include/linux/rcupreempt.h =================================================================== --- linux.orig/include/linux/rcupreempt.h +++ linux/include/linux/rcupreempt.h @@ -49,6 +49,7 @@ extern void __rcu_read_lock(void); extern void __rcu_read_unlock(void); extern int rcu_pending(int cpu); +extern int rcu_needs_cpu(int cpu); #define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } #define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } @@ -62,5 +63,7 @@ extern void rcu_check_callbacks(int cpu, extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); +extern void rcu_process_callbacks(unsigned long unused); + #endif /* __KERNEL__ */ #endif /* __LINUX_RCUPREEMPT_H */ Index: linux/kernel/rcuclassic.c =================================================================== --- linux.orig/kernel/rcuclassic.c +++ linux/kernel/rcuclassic.c @@ -382,6 +382,8 @@ static void rcu_offline_cpu(int cpu) static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { + unsigned long flags; + if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { *rdp->donetail = rdp->curlist; rdp->donetail = rdp->curtail; @@ -390,12 +392,12 @@ static void __rcu_process_callbacks(stru } if (rdp->nxtlist && !rdp->curlist) { - local_irq_disable(); + local_irq_save(flags); rdp->curlist = rdp->nxtlist; rdp->curtail = rdp->nxttail; rdp->nxtlist = NULL; rdp->nxttail = &rdp->nxtlist; - local_irq_enable(); + local_irq_restore(flags); /* * start the next batch of callbacks @@ -422,7 +424,7 @@ static void __rcu_process_callbacks(stru rcu_do_batch(rdp); } -static void rcu_process_callbacks(struct softirq_action *unused) +void rcu_process_callbacks(struct softirq_action *unused) { __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); @@ -477,6 +479,17 @@ int rcu_needs_cpu(int cpu) return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); } +void rcu_advance_callbacks(int cpu, int user) +{ + if (user || + (idle_cpu(cpu) && !in_softirq() && + hardirq_count() <= (1 << HARDIRQ_SHIFT))) { + rcu_qsctr_inc(cpu); + rcu_bh_qsctr_inc(cpu); + } else if (!in_softirq()) + rcu_bh_qsctr_inc(cpu); +} + void rcu_check_callbacks(int cpu, int user) { if (user || Index: linux/kernel/rcupreempt.c =================================================================== --- linux.orig/kernel/rcupreempt.c +++ linux/kernel/rcupreempt.c @@ -259,7 +259,27 @@ void rcu_check_callbacks(int cpu, int us } } -static void rcu_process_callbacks(unsigned long data) +/* + * Needed by dynticks, to make sure all RCU processing has finished + * when we go idle: + */ +void rcu_advance_callbacks(int cpu, int user) +{ + unsigned long oldirq; + + if (rcu_ctrlblk.completed == rcu_data.completed) { + rcu_try_flip(); + if (rcu_ctrlblk.completed == rcu_data.completed) { + return; + } + } + spin_lock_irqsave(&rcu_data.lock, oldirq); + RCU_TRACE(rcupreempt_trace_check_callbacks, &rcu_data.trace); + __rcu_advance_callbacks(); + spin_unlock_irqrestore(&rcu_data.lock, oldirq); +} + +void rcu_process_callbacks(unsigned long unused) { unsigned long flags; struct rcu_head *next, *list; patches/trace-name-plus.patch0000664000077200007720000000641410655544572015561 0ustar mingomingo--- kernel/latency_trace.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) Index: linux/kernel/latency_trace.c =================================================================== --- linux.orig/kernel/latency_trace.c +++ linux/kernel/latency_trace.c @@ -866,29 +866,33 @@ static void notrace print_name(struct se * Special trace values: */ if (((long)eip < 100000L) && ((long)eip > -100000L)) { - seq_printf(m, "(%5ld)", eip); + seq_printf(m, "<%ld>", eip); return; } sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf); if (sym_name) - seq_puts(m, sym_name); + seq_printf(m, "%s+%#lx/%#lx", + sym_name, offset, size); else seq_printf(m, "<%08lx>", eip); } -static void notrace print_name_offset(struct seq_file *m, unsigned long eip) +static void notrace print_name_eip(struct seq_file *m, unsigned long eip) { char namebuf[KSYM_NAME_LEN+1]; unsigned long size, offset; const char *sym_name; char *modname; - sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf); - if (sym_name) - seq_printf(m, "%s+%#lx/%#lx <%08lx>", - sym_name, offset, size, eip); - else - seq_printf(m, "<%08lx>", eip); + if (eip) { + sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf); + if (sym_name) + seq_printf(m, "%s+%#lx/%#lx <%08lx>", + sym_name, offset, size, eip); + else + seq_printf(m, "<%08lx>", eip); + } else + seq_printf(m, "0"); } static unsigned long out_sequence = -1; @@ -1253,9 +1257,9 @@ static void * notrace l_start(struct seq seq_puts(m, " -----------------\n"); if (trace_user_triggered) { seq_puts(m, " => started at: "); - print_name_offset(m, tr->critical_start); + print_name_eip(m, tr->critical_start); seq_puts(m, "\n => ended at: "); - print_name_offset(m, tr->critical_end); + print_name_eip(m, tr->critical_end); seq_puts(m, "\n"); } seq_puts(m, "\n"); @@ -1365,9 +1369,9 @@ static int notrace l_show_fn(struct seq_ entry->preempt_count, trace_idx, entry->timestamp, abs_usecs/1000, abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000); - print_name_offset(m, entry->u.fn.eip); + print_name_eip(m, entry->u.fn.eip); seq_puts(m, " ("); - print_name_offset(m, entry->u.fn.parent_eip); + print_name_eip(m, entry->u.fn.parent_eip); seq_puts(m, ")\n"); } else { print_generic(m, entry); @@ -1392,7 +1396,7 @@ static int notrace l_show_special(struct print_generic(m, entry); print_timestamp(m, abs_usecs, rel_usecs); if (trace_verbose) - print_name_offset(m, entry->u.special.eip); + print_name_eip(m, entry->u.special.eip); else print_name(m, entry->u.special.eip); @@ -1436,7 +1440,7 @@ l_show_special_pid(struct seq_file *m, u print_generic(m, entry); print_timestamp(m, abs_usecs, rel_usecs); if (trace_verbose) - print_name_offset(m, entry->u.special.eip); + print_name_eip(m, entry->u.special.eip); else print_name(m, entry->u.special.eip); seq_printf(m, " <%.8s-%d> (%ld %ld)\n", @@ -1459,7 +1463,7 @@ l_show_special_sym(struct seq_file *m, u print_generic(m, entry); print_timestamp(m, abs_usecs, rel_usecs); if (trace_verbose) - print_name_offset(m, entry->u.special.eip); + print_name_eip(m, entry->u.special.eip); else print_name(m, entry->u.special.eip); patches/preempt-realtime-rawlocks.patch0000664000077200007720000001020710655544575017661 0ustar mingomingo--- drivers/oprofile/oprofilefs.c | 2 +- drivers/pci/access.c | 2 +- drivers/video/console/vgacon.c | 2 +- include/linux/kprobes.h | 2 +- include/linux/oprofile.h | 2 +- include/linux/percpu_counter.h | 2 +- kernel/kprobes.c | 2 +- kernel/softlockup.c | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) Index: linux-rt-rebase.q/drivers/oprofile/oprofilefs.c =================================================================== --- linux-rt-rebase.q.orig/drivers/oprofile/oprofilefs.c +++ linux-rt-rebase.q/drivers/oprofile/oprofilefs.c @@ -21,7 +21,7 @@ #define OPROFILEFS_MAGIC 0x6f70726f -DEFINE_SPINLOCK(oprofilefs_lock); +DEFINE_RAW_SPINLOCK(oprofilefs_lock); static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode) { Index: linux-rt-rebase.q/drivers/pci/access.c =================================================================== --- linux-rt-rebase.q.orig/drivers/pci/access.c +++ linux-rt-rebase.q/drivers/pci/access.c @@ -11,7 +11,7 @@ * configuration space. */ -static DEFINE_SPINLOCK(pci_lock); +static DEFINE_RAW_SPINLOCK(pci_lock); /* * Wrappers for all PCI configuration access functions. They just check Index: linux-rt-rebase.q/drivers/video/console/vgacon.c =================================================================== --- linux-rt-rebase.q.orig/drivers/video/console/vgacon.c +++ linux-rt-rebase.q/drivers/video/console/vgacon.c @@ -51,7 +51,7 @@ #include