# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.661 -> 1.663 # include/linux/sched.h 1.62 -> 1.63 # kernel/sys.c 1.20 -> 1.21 # kernel/sched.c 1.76 -> 1.78 # kernel/timer.c 1.9 -> 1.10 # # The following is the BitKeeper ChangeSet Log # -------------------------------------------- # 02/05/24 wli@elm3b52.eng.beaverton.ibm.com 1.662 # O(1) count_active_tasks(). # -------------------------------------------- # 02/05/24 wli@elm3b52.eng.beaverton.ibm.com 1.663 # sched.c: # Correct load average calculation by not incrementing ->nr_uninterruptible if a task is a freshly forked child or if the branch activating a task during wakeup is not taken. # -------------------------------------------- # diff -Nru a/include/linux/sched.h b/include/linux/sched.h --- a/include/linux/sched.h Sat May 25 20:29:51 2002 +++ b/include/linux/sched.h Sat May 25 20:29:51 2002 @@ -80,6 +80,7 @@ extern int nr_threads; extern int last_pid; extern unsigned long nr_running(void); +extern unsigned long nr_uninterruptible(void); #include #include diff -Nru a/kernel/sched.c b/kernel/sched.c --- a/kernel/sched.c Sat May 25 20:29:51 2002 +++ b/kernel/sched.c Sat May 25 20:29:51 2002 @@ -133,6 +133,7 @@ spinlock_t lock; spinlock_t frozen; unsigned long nr_running, nr_switches, expired_timestamp; + signed long nr_uninterruptible; task_t *curr, *idle; prio_array_t *active, *expired, arrays[2]; int prev_nr_running[NR_CPUS]; @@ -240,6 +241,8 @@ static inline void deactivate_task(struct task_struct *p, runqueue_t *rq) { rq->nr_running--; + if (p->state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible++; dequeue_task(p, p->array); p->array = NULL; } @@ -319,11 +322,16 @@ { unsigned long flags; int success = 0; + int uninterruptible = 0; runqueue_t *rq; rq = task_rq_lock(p, &flags); + if (p->state == TASK_UNINTERRUPTIBLE) + uninterruptible = 1; p->state = TASK_RUNNING; if (!p->array) { + if (uninterruptible) + rq->nr_uninterruptible--; activate_task(p, rq); if (p->prio < rq->curr->prio) resched_task(rq->curr); @@ -425,6 +433,16 @@ for (i = 0; i < smp_num_cpus; i++) sum += cpu_rq(cpu_logical_map(i))->nr_running; + + return sum; +} + +unsigned long nr_uninterruptible(void) +{ + unsigned long i, sum = 0; + + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible; return sum; } diff -Nru a/kernel/timer.c b/kernel/timer.c --- a/kernel/timer.c Sat May 25 20:29:51 2002 +++ b/kernel/timer.c Sat May 25 20:29:51 2002 @@ -597,17 +597,7 @@ */ static unsigned long count_active_tasks(void) { - struct task_struct *p; - unsigned long nr = 0; - - read_lock(&tasklist_lock); - for_each_task(p) { - if ((p->state == TASK_RUNNING || - (p->state & TASK_UNINTERRUPTIBLE))) - nr += FIXED_1; - } - read_unlock(&tasklist_lock); - return nr; + return (nr_running() + nr_uninterruptible()) * FIXED_1; } /*