/***
* try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread
+ * @state: the mask of task states that can be woken
* @sync: do a synchronous wakeup?
*
* Put it on the run-queue if it's not already there. The "current"
*
* returns failure only if the task is already active.
*/
-static int try_to_wake_up(task_t * p, int sync)
+static int try_to_wake_up(task_t * p, unsigned int state, int sync)
{
unsigned long flags;
int success = 0;
repeat_lock_task:
rq = task_rq_lock(p, &flags);
old_state = p->state;
- if (!p->array) {
- /*
- * Fast-migrate the task if it's not running or runnable
- * currently. Do not violate hard affinity.
- */
- if (unlikely(sync && !task_running(rq, p) &&
- (task_cpu(p) != smp_processor_id()) &&
- (p->cpus_allowed & (1UL << smp_processor_id())))) {
-
- set_task_cpu(p, smp_processor_id());
- task_rq_unlock(rq, &flags);
- goto repeat_lock_task;
+ if (old_state & state) {
+ if (!p->array) {
+ /*
+ * Fast-migrate the task if it's not running or runnable
+ * currently. Do not violate hard affinity.
+ */
+ if (unlikely(sync && !task_running(rq, p) &&
+ (task_cpu(p) != smp_processor_id()) &&
+ (p->cpus_allowed & (1UL << smp_processor_id())))) {
+
+ set_task_cpu(p, smp_processor_id());
+ task_rq_unlock(rq, &flags);
+ goto repeat_lock_task;
+ }
+ if (old_state == TASK_UNINTERRUPTIBLE)
+ rq->nr_uninterruptible--;
+ activate_task(p, rq);
+
+ if (p->prio < rq->curr->prio)
+ resched_task(rq->curr);
+ success = 1;
}
- if (old_state == TASK_UNINTERRUPTIBLE)
- rq->nr_uninterruptible--;
- activate_task(p, rq);
-
- if (p->prio < rq->curr->prio)
- resched_task(rq->curr);
- success = 1;
+ p->state = TASK_RUNNING;
}
- p->state = TASK_RUNNING;
task_rq_unlock(rq, &flags);
return success;
int wake_up_process(task_t * p)
{
- return try_to_wake_up(p, 0);
+ return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+}
+
+int wake_up_state(task_t *p, unsigned int state)
+{
+ return try_to_wake_up(p, state, 0);
}
/*
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
{
task_t *p = curr->task;
- return ((p->state & mode) && try_to_wake_up(p, sync));
+ return try_to_wake_up(p, mode, sync);
}
/*
rq->curr = current;
rq->idle = current;
set_task_cpu(current, smp_processor_id());
- wake_up_process(current);
+ wake_up_forked_process(current);
init_timers();
t = p;
do {
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
+ wake_up_state(t, TASK_STOPPED);
+
/*
- * This wakeup is only need if in TASK_STOPPED,
- * but there can be SMP races with testing for that.
- * In the normal SIGCONT case, all will be stopped.
- * A spuriously sent SIGCONT will interrupt all running
- * threads to check signals even if it's ignored.
- *
* If there is a handler for SIGCONT, we must make
* sure that no thread returns to user mode before
* we post the signal, in case it was the only
* siglock that we hold now and until we've queued
* the pending signal.
*/
- if (!(t->flags & PF_EXITING)) {
- if (!sigismember(&t->blocked, SIGCONT))
- set_tsk_thread_flag(t, TIF_SIGPENDING);
- wake_up_process(t);
- }
+ if (!sigismember(&t->blocked, SIGCONT))
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
+
t = next_thread(t);
} while (t != p);
}