]> git.hungrycats.org Git - linux/commitdiff
v2.4.6.4 -> v2.4.6.5
authorLinus Torvalds <torvalds@athlon.transmeta.com>
Tue, 5 Feb 2002 03:10:49 +0000 (19:10 -0800)
committerLinus Torvalds <torvalds@athlon.transmeta.com>
Tue, 5 Feb 2002 03:10:49 +0000 (19:10 -0800)
  - remember to bump the version string
  - Andrea Arkangeli: softirq cleanups and fixes, and everybody is happy
  again (ie I changed some details to make me happy ;)
  - Neil Brown: raid5 stall fix, nfsd filehandle sanity check fix

22 files changed:
Makefile
arch/alpha/kernel/entry.S
drivers/md/raid5.c
fs/nfsd/nfsfh.c
include/asm-alpha/hardirq.h
include/asm-alpha/softirq.h
include/asm-arm/softirq.h
include/asm-i386/hardirq.h
include/asm-i386/softirq.h
include/asm-ppc/softirq.h
include/asm-sh/softirq.h
include/asm-sparc/hardirq.h
include/asm-sparc/softirq.h
include/asm-sparc64/hardirq.h
include/asm-sparc64/softirq.h
include/linux/interrupt.h
include/linux/irq_cpustat.h
include/linux/netdevice.h
kernel/ksyms.c
kernel/sched.c
kernel/softirq.c
net/core/dev.c

index 6df0bb486ad82ad2de38e3271afef3689ed002bf..41dfa84748d4628bbfecbf459d4e32e059d47f09 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 4
 SUBLEVEL = 7
-EXTRAVERSION =-pre3
+EXTRAVERSION =-pre5
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
index 0f92c816ac5cb133a3529cb3ebf02f05773887da..ac819f531c710995886fb5e8b053c71b15f2eaed 100644 (file)
@@ -576,17 +576,6 @@ entSys:
 .align 3
 ret_from_sys_call:
        cmovne  $26,0,$19               /* $19 = 0 => non-restartable */
-#ifdef CONFIG_SMP
-       ldl     $3,TASK_PROCESSOR($8)
-       sll     $3,L1_CACHE_SHIFT,$3
-#endif
-       lda     $4,irq_stat
-#ifdef CONFIG_SMP
-       addq    $3,$4,$4
-#endif
-       ldq     $4,0($4)                /* __softirq_pending */
-       bne     $4,handle_softirq
-ret_from_softirq:
        ldq     $0,SP_OFF($30)
        and     $0,8,$0
        beq     $0,restore_all
@@ -664,17 +653,6 @@ strace_error:
        mov     $31,$26         /* tell "ret_from_sys_call" we can restart */
        br      ret_from_sys_call
 
-       .align 3
-handle_softirq:
-       subq    $30,16,$30
-       stq     $19,0($30)      /* save syscall nr */
-       stq     $20,8($30)      /* and error indication (a3) */
-       jsr     $26,do_softirq
-       ldq     $19,0($30)
-       ldq     $20,8($30)
-       addq    $30,16,$30
-       br      ret_from_softirq
-       
        .align 3
 syscall_error:
        /*
index 412cf5dd06df43bd572bacb86d2f244c1c88d746..60bd041ebea0f14c74d85c6373a5b89096a2f2ca 100644 (file)
@@ -66,10 +66,11 @@ static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
                        BUG();
                if (atomic_read(&conf->active_stripes)==0)
                        BUG();
-               if (test_bit(STRIPE_DELAYED, &sh->state))
-                       list_add_tail(&sh->lru, &conf->delayed_list);
-               else if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       list_add_tail(&sh->lru, &conf->handle_list);
+               if (test_bit(STRIPE_HANDLE, &sh->state)) {
+                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                               list_add_tail(&sh->lru, &conf->delayed_list);
+                       else
+                               list_add_tail(&sh->lru, &conf->handle_list);
                        md_wakeup_thread(conf->thread);
                } else {
                        if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
@@ -1167,10 +1168,9 @@ static void raid5_unplug_device(void *data)
 
        raid5_activate_delayed(conf);
        
-       if (conf->plugged) {
-               conf->plugged = 0;
-               md_wakeup_thread(conf->thread);
-       }       
+       conf->plugged = 0;
+       md_wakeup_thread(conf->thread);
+
        spin_unlock_irqrestore(&conf->device_lock, flags);
 }
 
index b8e8158641510a4e82ce6652c77550d90421f6c5..0d9a4f4eafb9e04405c480f51f815f3881a63cc0 100644 (file)
@@ -837,11 +837,11 @@ fh_update(struct svc_fh *fhp)
        dentry = fhp->fh_dentry;
        if (!dentry->d_inode)
                goto out_negative;
-       if (fhp->fh_handle.fh_fileid_type != 0)
-               goto out_uptodate;
        if (fhp->fh_handle.fh_version != 1) {
                _fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle);
        } else {
+               if (fhp->fh_handle.fh_fileid_type != 0)
+                       goto out_uptodate;
                datap = fhp->fh_handle.fh_auth+
                        fhp->fh_handle.fh_size/4 -1;
                fhp->fh_handle.fh_fileid_type =
index e9d08e3cedcf81ddb4076a20a9c5ad0931d798c2..c0ca14cf582d74ea471a56c02bad4606202cafd9 100644 (file)
@@ -10,6 +10,7 @@ typedef struct {
        unsigned int __local_irq_count;
        unsigned int __local_bh_count;
        unsigned int __syscall_count;
+       struct task_struct * __ksoftirqd_task;
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
index bb856123f76c489c83615b84e0e5dc93f488d29d..dba623bf85b9c0b17687ad9c9b1b4df4c4bd7f80 100644 (file)
@@ -8,21 +8,30 @@
 extern inline void cpu_bh_disable(int cpu)
 {
        local_bh_count(cpu)++;
-       mb();
+       barrier();
 }
 
-extern inline void cpu_bh_enable(int cpu)
+extern inline void __cpu_bh_enable(int cpu)
 {
-       mb();
+       barrier();
        local_bh_count(cpu)--;
 }
 
-#define local_bh_enable()      cpu_bh_enable(smp_processor_id())
-#define __local_bh_enable      local_bh_enable
+#define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
 #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
 
+#define local_bh_enable()                                      \
+do {                                                           \
+       int cpu;                                                \
+                                                               \
+       barrier();                                              \
+       cpu = smp_processor_id();                               \
+       if (!--local_bh_count(cpu) && softirq_pending(cpu))     \
+               do_softirq();                                   \
+} while (0)
+
 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
 
-#define __cpu_raise_softirq(cpu,nr) set_bit((nr), &softirq_pending(cpu))
+#define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
 
 #endif /* _ALPHA_SOFTIRQ_H */
index 78dd609063a1ec45cd6bb26a59d90ad3cfc2fe40..20b01947bf108129d489253666fdc6b8400c41f9 100644 (file)
@@ -12,7 +12,6 @@
 #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
 #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
 #define __cpu_raise_softirq(cpu,nr) set_bit((nr), &softirq_pending(cpu))
-#define raise_softirq(nr)      __cpu_raise_softirq(smp_processor_id(), (nr))
 
 #define in_softirq()           (local_bh_count(smp_processor_id()) != 0)
 
index 1e9b7bd742c42270d7faaea5d7c0c688936d2bca..69921aaa68f61496ccb59d24704d77cde1853ce0 100644 (file)
@@ -11,6 +11,7 @@ typedef struct {
        unsigned int __local_irq_count;
        unsigned int __local_bh_count;
        unsigned int __syscall_count;
+       struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
        unsigned int __nmi_count;       /* arch dependent */
 } ____cacheline_aligned irq_cpustat_t;
 
index 7607f0357cf170f369e3820b13cc1b9d9f588f0e..4bf8d607b906beabdbb5f225eb44e9fbf8df6f47 100644 (file)
@@ -11,8 +11,6 @@
 
 #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
 #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
-#define __cpu_raise_softirq(cpu,nr) set_bit((nr), &softirq_pending(cpu));
-#define raise_softirq(nr) __cpu_raise_softirq(smp_processor_id(), (nr))
 
 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
 
@@ -28,6 +26,7 @@
 do {                                                                   \
        unsigned int *ptr = &local_bh_count(smp_processor_id());        \
                                                                        \
+       barrier();                                                      \
        if (!--*ptr)                                                    \
                __asm__ __volatile__ (                                  \
                        "cmpl $0, -8(%0);"                              \
@@ -46,4 +45,6 @@ do {                                                                  \
                /* no registers clobbered */ );                         \
 } while (0)
 
+#define __cpu_raise_softirq(cpu, nr) __set_bit(nr, &softirq_pending(cpu))
+
 #endif /* __ASM_SOFTIRQ_H */
index 9280f0baa923d69a9130f21e8930776b53caa2d7..ed043d48872eb365534bf87f9f2f7dfe5d5fd0d1 100644 (file)
@@ -30,7 +30,6 @@ do {                                                  \
 } while (0)
 
 #define __cpu_raise_softirq(cpu, nr) set_bit((nr), &softirq_pending(cpu));
-#define raise_softirq(nr)      __cpu_raise_softirq(smp_processor_id(), (nr))
 
 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
 
index 998a8abfb2a748421d7ee725ad28b1fd637ceae8..06754f3f6fef9c94fd977eb4ce2d79e473e83253 100644 (file)
@@ -26,7 +26,6 @@ do {                                                  \
 } while (0)
 
 #define __cpu_raise_softirq(cpu, nr) set_bit((nr), &softirq_pending(cpu));
-#define raise_softirq(nr)      __cpu_raise_softirq(smp_processor_id(), (nr))
 
 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
 
index ab7958844f447c77874f1a38987013469b7f234b..d82222c892fc670448a81387d68eed6a3500a5bf 100644 (file)
@@ -23,6 +23,7 @@ typedef struct {
 #endif
        unsigned int __local_bh_count;
        unsigned int __syscall_count;
+        struct task_struct * __ksoftirqd_task;
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
index f63dea3a743cd95878f9e3f80da980ab0a7b2f9b..f65861fc92798d7a050d5a4f7ee031146740cf58 100644 (file)
@@ -22,11 +22,11 @@ do { if (!--local_bh_count(smp_processor_id()) && \
                __sti();                          \
      }                                           \
 } while (0)
-#define __cpu_raise_softirq(cpu, nr)   (softirq_pending(cpu) |= (1<<nr))
-#define raise_softirq(nr)                              \
+#define __do_cpu_raise_softirq(cpu, nr)        (softirq_pending(cpu) |= (1<<nr))
+#define __cpu_raise_softirq(cpu, nr)                   \
 do {   unsigned long flags;                            \
        local_irq_save(flags);                          \
-       __cpu_raise_softirq(smp_processor_id(), nr);    \
+       __do_cpu_raise_softirq(cpu, nr);                        \
        local_irq_restore(flags);                       \
 } while (0)
 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
index 6693f13f8cba4bfd862df8c0f5aa71dfecf5392b..1be21d47143da59b7f6d122eb21050d7c4fb138c 100644 (file)
@@ -22,6 +22,7 @@ typedef struct {
 #endif
        unsigned int __local_bh_count;
        unsigned int __syscall_count;
+        struct task_struct * __ksoftirqd_task;
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
index eeb8cfc64bd423d5904689718ed1123d320b3787..1db1b65ef396f73c08e52385b4bf5220a703aad7 100644 (file)
@@ -19,11 +19,12 @@ do { if (!--local_bh_count(smp_processor_id()) && \
                __sti();                          \
      }                                           \
 } while (0)
-#define __cpu_raise_softirq(cpu, nr)   (softirq_pending(cpu) |= (1<<nr))
-#define raise_softirq(nr)                              \
+
+#define __do_cpu_raise_softirq(cpu, nr)        (softirq_pending(cpu) |= (1<<nr))
+#define __cpu_raise_softirq(cpu,nr)                    \
 do {   unsigned long flags;                            \
        local_irq_save(flags);                          \
-       __cpu_raise_softirq(smp_processor_id(), nr);    \
+       __do_cpu_raise_softirq(cpu, nr);                        \
        local_irq_restore(flags);                       \
 } while (0)
 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
index d8ca2a605be2ab3bee1022654abd08043a78b129..5e8c520f42bf064e07d2feb9fb769ddff6ca7da3 100644 (file)
@@ -73,8 +73,9 @@ struct softirq_action
 
 asmlinkage void do_softirq(void);
 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
-
 extern void softirq_init(void);
+extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
+extern void FASTCALL(raise_softirq(unsigned int nr));
 
 
 
@@ -129,7 +130,7 @@ extern struct tasklet_head tasklet_vec[NR_CPUS];
 extern struct tasklet_head tasklet_hi_vec[NR_CPUS];
 
 #define tasklet_trylock(t) (!test_and_set_bit(TASKLET_STATE_RUN, &(t)->state))
-#define tasklet_unlock(t) clear_bit(TASKLET_STATE_RUN, &(t)->state)
+#define tasklet_unlock(t) do { smp_mb__before_clear_bit(); clear_bit(TASKLET_STATE_RUN, &(t)->state); } while(0)
 #define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
 
 extern void tasklet_schedule(struct tasklet_struct *t);
index 109b0c04081a83df9f5ecac9c2491673c6a44119..24696e15dacce42eccc2301756688e1f01ad7471 100644 (file)
@@ -30,6 +30,7 @@ extern irq_cpustat_t irq_stat[];                      /* defined in asm/hardirq.h */
 #define local_irq_count(cpu)   __IRQ_STAT((cpu), __local_irq_count)
 #define local_bh_count(cpu)    __IRQ_STAT((cpu), __local_bh_count)
 #define syscall_count(cpu)     __IRQ_STAT((cpu), __syscall_count)
+#define ksoftirqd_task(cpu)    __IRQ_STAT((cpu), __ksoftirqd_task)
   /* arch dependent irq_stat fields */
 #define nmi_count(cpu)         __IRQ_STAT((cpu), __nmi_count)          /* i386, ia64 */
 
index 830d9113645e3dd2e0bd365a264291c08e75590c..40c320ff1ee3bfeab4e27174c0327224343abbac 100644 (file)
@@ -487,7 +487,7 @@ static inline void __netif_schedule(struct net_device *dev)
                local_irq_save(flags);
                dev->next_sched = softnet_data[cpu].output_queue;
                softnet_data[cpu].output_queue = dev;
-               __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
+               cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
                local_irq_restore(flags);
        }
 }
@@ -536,7 +536,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
                local_irq_save(flags);
                skb->next = softnet_data[cpu].completion_queue;
                softnet_data[cpu].completion_queue = skb;
-               __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
+               cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
                local_irq_restore(flags);
        }
 }
index 9702fda030d5b36a8d34a723874296652e87af2c..67ce3774af2567cd8b6ceae7f0e31756bfec8322 100644 (file)
@@ -538,6 +538,8 @@ EXPORT_SYMBOL(tasklet_init);
 EXPORT_SYMBOL(tasklet_kill);
 EXPORT_SYMBOL(__run_task_queue);
 EXPORT_SYMBOL(do_softirq);
+EXPORT_SYMBOL(raise_softirq);
+EXPORT_SYMBOL(cpu_raise_softirq);
 EXPORT_SYMBOL(tasklet_schedule);
 EXPORT_SYMBOL(tasklet_hi_schedule);
 
index 9143185b5e2d55a3565313d92dcababc15da1fef..95e40b9e645338b800601f6a887803a5016e045d 100644 (file)
@@ -543,11 +543,6 @@ need_resched_back:
 
        release_kernel_lock(prev, this_cpu);
 
-       /* Do "administrative" work here while we don't hold any locks */
-       if (softirq_pending(this_cpu))
-               goto handle_softirq;
-handle_softirq_back:
-
        /*
         * 'sched_data' is protected by the fact that we can run
         * only one process per CPU.
@@ -689,14 +684,12 @@ recalculate:
        goto repeat_schedule;
 
 still_running:
+       if (!(prev->cpus_allowed & (1UL << this_cpu)))
+               goto still_running_back;
        c = goodness(prev, this_cpu, prev->active_mm);
        next = prev;
        goto still_running_back;
 
-handle_softirq:
-       do_softirq();
-       goto handle_softirq_back;
-
 move_rr_last:
        if (!prev->counter) {
                prev->counter = NICE_TO_TICKS(prev->nice);
index 9ffb8e3a63dff5bd5f70b3fc970eaa9e643b0d86..fd53797957eeb8cf1faf0fd75680f7c4cddd0ea0 100644 (file)
@@ -47,21 +47,38 @@ irq_cpustat_t irq_stat[NR_CPUS];
 
 static struct softirq_action softirq_vec[32] __cacheline_aligned;
 
+/*
+ * we cannot loop indefinitely here to avoid userspace starvation,
+ * but we also don't want to introduce a worst case 1/HZ latency
+ * to the pending events, so lets the scheduler to balance
+ * the softirq load for us.
+ */
+static inline void wakeup_softirqd(unsigned cpu)
+{
+       struct task_struct * tsk = ksoftirqd_task(cpu);
+
+       if (tsk && tsk->state != TASK_RUNNING)
+               wake_up_process(tsk);
+}
+
 asmlinkage void do_softirq()
 {
        int cpu = smp_processor_id();
        __u32 pending;
+       long flags;
+       __u32 mask;
 
        if (in_interrupt())
                return;
 
-       local_irq_disable();
+       local_irq_save(flags);
 
        pending = softirq_pending(cpu);
 
        if (pending) {
                struct softirq_action *h;
 
+               mask = ~pending;
                local_bh_disable();
 restart:
                /* Reset the pending bitmask before enabling irqs */
@@ -81,14 +98,40 @@ restart:
                local_irq_disable();
 
                pending = softirq_pending(cpu);
-               if (pending)
+               if (pending & mask) {
+                       mask &= ~pending;
                        goto restart;
+               }
                __local_bh_enable();
+
+               if (pending)
+                       wakeup_softirqd(cpu);
        }
 
-       local_irq_enable();
+       local_irq_restore(flags);
+}
+
+inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
+{
+       __cpu_raise_softirq(cpu, nr);
+
+       /*
+        * If we're in an interrupt or bh, we're done
+        * (this also catches bh-disabled code). We will
+        * actually run the softirq once we return from
+        * the irq or bh.
+        *
+        * Otherwise we wake up ksoftirqd to make sure we
+        * schedule the softirq soon.
+        */
+       if (!(local_irq_count(cpu) | local_bh_count(cpu)))
+               wakeup_softirqd(cpu);
 }
 
+void raise_softirq(unsigned int nr)
+{
+       cpu_raise_softirq(smp_processor_id(), nr);
+}
 
 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
 {
@@ -112,11 +155,10 @@ void tasklet_schedule(struct tasklet_struct *t)
         * If nobody is running it then add it to this CPU's
         * tasklet queue.
         */
-       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) &&
-                                               tasklet_trylock(t)) {
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
                t->next = tasklet_vec[cpu].list;
                tasklet_vec[cpu].list = t;
-               __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+               cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
                tasklet_unlock(t);
        }
        local_irq_restore(flags);
@@ -130,11 +172,10 @@ void tasklet_hi_schedule(struct tasklet_struct *t)
        cpu = smp_processor_id();
        local_irq_save(flags);
 
-       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) &&
-                                               tasklet_trylock(t)) {
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
                t->next = tasklet_hi_vec[cpu].list;
                tasklet_hi_vec[cpu].list = t;
-               __cpu_raise_softirq(cpu, HI_SOFTIRQ);
+               cpu_raise_softirq(cpu, HI_SOFTIRQ);
                tasklet_unlock(t);
        }
        local_irq_restore(flags);
@@ -148,37 +189,30 @@ static void tasklet_action(struct softirq_action *a)
        local_irq_disable();
        list = tasklet_vec[cpu].list;
        tasklet_vec[cpu].list = NULL;
+       local_irq_enable();
 
        while (list) {
                struct tasklet_struct *t = list;
 
                list = list->next;
 
-               /*
-                * A tasklet is only added to the queue while it's
-                * locked, so no other CPU can have this tasklet
-                * pending:
-                */
                if (!tasklet_trylock(t))
                        BUG();
-repeat:
-               if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
-                       BUG();
                if (!atomic_read(&t->count)) {
-                       local_irq_enable();
+                       if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+                               BUG();
                        t->func(t->data);
-                       local_irq_disable();
-                       /*
-                        * One more run if the tasklet got reactivated:
-                        */
-                       if (test_bit(TASKLET_STATE_SCHED, &t->state))
-                               goto repeat;
+                       tasklet_unlock(t);
+                       continue;
                }
                tasklet_unlock(t);
-               if (test_bit(TASKLET_STATE_SCHED, &t->state))
-                       tasklet_schedule(t);
+
+               local_irq_disable();
+               t->next = tasklet_vec[cpu].list;
+               tasklet_vec[cpu].list = t;
+               cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+               local_irq_enable();
        }
-       local_irq_enable();
 }
 
 
@@ -193,6 +227,7 @@ static void tasklet_hi_action(struct softirq_action *a)
        local_irq_disable();
        list = tasklet_hi_vec[cpu].list;
        tasklet_hi_vec[cpu].list = NULL;
+       local_irq_enable();
 
        while (list) {
                struct tasklet_struct *t = list;
@@ -201,21 +236,21 @@ static void tasklet_hi_action(struct softirq_action *a)
 
                if (!tasklet_trylock(t))
                        BUG();
-repeat:
-               if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
-                       BUG();
                if (!atomic_read(&t->count)) {
-                       local_irq_enable();
+                       if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+                               BUG();
                        t->func(t->data);
-                       local_irq_disable();
-                       if (test_bit(TASKLET_STATE_SCHED, &t->state))
-                               goto repeat;
+                       tasklet_unlock(t);
+                       continue;
                }
                tasklet_unlock(t);
-               if (test_bit(TASKLET_STATE_SCHED, &t->state))
-                       tasklet_hi_schedule(t);
+
+               local_irq_disable();
+               t->next = tasklet_hi_vec[cpu].list;
+               tasklet_hi_vec[cpu].list = t;
+               cpu_raise_softirq(cpu, HI_SOFTIRQ);
+               local_irq_enable();
        }
-       local_irq_enable();
 }
 
 
@@ -335,3 +370,61 @@ void __run_task_queue(task_queue *list)
                        f(data);
        }
 }
+
+static int ksoftirqd(void * __bind_cpu)
+{
+       int bind_cpu = *(int *) __bind_cpu;
+       int cpu = cpu_logical_map(bind_cpu);
+
+       daemonize();
+       current->nice = 19;
+       sigfillset(&current->blocked);
+
+       /* Migrate to the right CPU */
+       current->cpus_allowed = 1UL << cpu;
+       while (smp_processor_id() != cpu)
+               schedule();
+
+       sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);
+
+       __set_current_state(TASK_INTERRUPTIBLE);
+       mb();
+
+       ksoftirqd_task(cpu) = current;
+
+       for (;;) {
+               if (!softirq_pending(cpu))
+                       schedule();
+
+               __set_current_state(TASK_RUNNING);
+
+               while (softirq_pending(cpu)) {
+                       do_softirq();
+                       if (current->need_resched)
+                               schedule();
+               }
+
+               __set_current_state(TASK_INTERRUPTIBLE);
+       }
+}
+
+static __init int spawn_ksoftirqd(void)
+{
+       int cpu;
+
+       for (cpu = 0; cpu < smp_num_cpus; cpu++) {
+               if (kernel_thread(ksoftirqd, (void *) &cpu,
+                                 CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
+                       printk("spawn_ksoftirqd() failed for cpu %d\n", cpu);
+               else {
+                       while (!ksoftirqd_task(cpu_logical_map(cpu))) {
+                               current->policy |= SCHED_YIELD;
+                               schedule();
+                       }
+               }
+       }
+
+       return 0;
+}
+
+__initcall(spawn_ksoftirqd);
index 6e697864dcb57b95c7899eab46b17567cd39eb07..250de831bac2986a7ae1d6833e68e334903a3d88 100644 (file)
@@ -1217,6 +1217,8 @@ int netif_rx(struct sk_buff *skb)
 enqueue:
                        dev_hold(skb->dev);
                        __skb_queue_tail(&queue->input_pkt_queue,skb);
+
+                       /* Runs from irqs or BH's, no need to wake BH */
                        __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
                        local_irq_restore(flags);
 #ifndef OFFLINE_SAMPLE
@@ -1527,6 +1529,8 @@ softnet_break:
 
        local_irq_disable();
        netdev_rx_stat[this_cpu].time_squeeze++;
+
+       /* This already runs in BH context, no need to wake up BH's */
        __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
        local_irq_enable();