]> git.hungrycats.org Git - linux/commitdiff
Create "wake_up_state()" macro that selectively wakes up processes only
authorLinus Torvalds <torvalds@home.transmeta.com>
Sun, 9 Feb 2003 13:32:37 +0000 (05:32 -0800)
committerLinus Torvalds <torvalds@home.transmeta.com>
Sun, 9 Feb 2003 13:32:37 +0000 (05:32 -0800)
from certain states.

This simplifies "default_wake_function()", and makes it possible for
signal handling to wake up only the processes it _should_ wake up
without races.

include/linux/sched.h
kernel/sched.c
kernel/signal.c

index c5ff8e452d4905ce8de673dae87ff9838c7ce194..16864532fcd935741dc0f28b4d55b8953476592f 100644 (file)
@@ -516,6 +516,7 @@ extern unsigned long itimer_ticks;
 extern unsigned long itimer_next;
 extern void do_timer(struct pt_regs *);
 
+extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
 extern int FASTCALL(wake_up_process(struct task_struct * tsk));
 extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk));
 extern void FASTCALL(sched_exit(task_t * p));
index 68bd00e54a812f2efcc4226cef31cc522a63bdbd..3e967ec6814f0cff4191adc61e0847f75e1e47e8 100644 (file)
@@ -438,6 +438,7 @@ void kick_if_running(task_t * p)
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
+ * @state: the mask of task states that can be woken
  * @sync: do a synchronous wakeup?
  *
  * Put it on the run-queue if it's not already there. The "current"
@@ -448,7 +449,7 @@ void kick_if_running(task_t * p)
  *
  * returns failure only if the task is already active.
  */
-static int try_to_wake_up(task_t * p, int sync)
+static int try_to_wake_up(task_t * p, unsigned int state, int sync)
 {
        unsigned long flags;
        int success = 0;
@@ -458,28 +459,30 @@ static int try_to_wake_up(task_t * p, int sync)
 repeat_lock_task:
        rq = task_rq_lock(p, &flags);
        old_state = p->state;
-       if (!p->array) {
-               /*
-                * Fast-migrate the task if it's not running or runnable
-                * currently. Do not violate hard affinity.
-                */
-               if (unlikely(sync && !task_running(rq, p) &&
-                       (task_cpu(p) != smp_processor_id()) &&
-                       (p->cpus_allowed & (1UL << smp_processor_id())))) {
-
-                       set_task_cpu(p, smp_processor_id());
-                       task_rq_unlock(rq, &flags);
-                       goto repeat_lock_task;
+       if (old_state & state) {
+               if (!p->array) {
+                       /*
+                        * Fast-migrate the task if it's not running or runnable
+                        * currently. Do not violate hard affinity.
+                        */
+                       if (unlikely(sync && !task_running(rq, p) &&
+                               (task_cpu(p) != smp_processor_id()) &&
+                               (p->cpus_allowed & (1UL << smp_processor_id())))) {
+
+                               set_task_cpu(p, smp_processor_id());
+                               task_rq_unlock(rq, &flags);
+                               goto repeat_lock_task;
+                       }
+                       if (old_state == TASK_UNINTERRUPTIBLE)
+                               rq->nr_uninterruptible--;
+                       activate_task(p, rq);
+       
+                       if (p->prio < rq->curr->prio)
+                               resched_task(rq->curr);
+                       success = 1;
                }
-               if (old_state == TASK_UNINTERRUPTIBLE)
-                       rq->nr_uninterruptible--;
-               activate_task(p, rq);
-
-               if (p->prio < rq->curr->prio)
-                       resched_task(rq->curr);
-               success = 1;
+               p->state = TASK_RUNNING;
        }
-       p->state = TASK_RUNNING;
        task_rq_unlock(rq, &flags);
 
        return success;
@@ -487,7 +490,12 @@ repeat_lock_task:
 
 int wake_up_process(task_t * p)
 {
-       return try_to_wake_up(p, 0);
+       return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+}
+
+int wake_up_state(task_t *p, unsigned int state)
+{
+       return try_to_wake_up(p, state, 0);
 }
 
 /*
@@ -1263,7 +1271,7 @@ need_resched:
 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
 {
        task_t *p = curr->task;
-       return ((p->state & mode) && try_to_wake_up(p, sync));
+       return try_to_wake_up(p, mode, sync);
 }
 
 /*
@@ -2418,7 +2426,7 @@ void __init sched_init(void)
        rq->curr = current;
        rq->idle = current;
        set_task_cpu(current, smp_processor_id());
-       wake_up_process(current);
+       wake_up_forked_process(current);
 
        init_timers();
 
index 1b4dfe6d8a5495609b6a52b694e0601b515cd3af..63c16ac1a9fa92824abd67b5bc88bf4089ed39e6 100644 (file)
@@ -620,13 +620,9 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                t = p;
                do {
                        rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
+                       wake_up_state(t, TASK_STOPPED);
+                       
                        /*
-                        * This wakeup is only need if in TASK_STOPPED,
-                        * but there can be SMP races with testing for that.
-                        * In the normal SIGCONT case, all will be stopped.
-                        * A spuriously sent SIGCONT will interrupt all running
-                        * threads to check signals even if it's ignored.
-                        *
                         * If there is a handler for SIGCONT, we must make
                         * sure that no thread returns to user mode before
                         * we post the signal, in case it was the only
@@ -637,11 +633,9 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                         * siglock that we hold now and until we've queued
                         * the pending signal. 
                         */
-                       if (!(t->flags & PF_EXITING)) {
-                               if (!sigismember(&t->blocked, SIGCONT))
-                                       set_tsk_thread_flag(t, TIF_SIGPENDING);
-                               wake_up_process(t);
-                       }
+                       if (!sigismember(&t->blocked, SIGCONT))
+                               set_tsk_thread_flag(t, TIF_SIGPENDING);
+
                        t = next_thread(t);
                } while (t != p);
        }