]> git.hungrycats.org Git - linux/commitdiff
- revert the raw spinlock usage - it's too dangerous and volatile.
authorIngo Molnar <mingo@elte.hu>
Fri, 14 Jun 2002 01:56:44 +0000 (03:56 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 14 Jun 2002 01:56:44 +0000 (03:56 +0200)
kernel/sched.c

index 0761e349c0991f9c32b858947a92928a6b70180d..1254ad6df71ffc8686d5a206b6439fc7402550dd 100644 (file)
@@ -156,12 +156,6 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
  * task_rq_lock - lock the runqueue a given task resides on and disable
  * interrupts.  Note the ordering: we can safely lookup the task_rq without
  * explicitly disabling preemption.
- *
- * WARNING: to squeeze out a few more cycles we do not disable preemption
- * explicitly (or implicitly), we just keep interrupts disabled. This means
- * that within task_rq_lock/unlock sections you must be careful
- * about locking/unlocking spinlocks, since they could cause an unexpected
- * preemption.
  */
 static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
 {
@@ -170,9 +164,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
 repeat_lock_task:
        local_irq_save(*flags);
        rq = task_rq(p);
-       _raw_spin_lock(&rq->lock);
+       spin_lock(&rq->lock);
        if (unlikely(rq != task_rq(p))) {
-               _raw_spin_unlock_irqrestore(&rq->lock, *flags);
+               spin_unlock_irqrestore(&rq->lock, *flags);
                goto repeat_lock_task;
        }
        return rq;
@@ -180,8 +174,7 @@ repeat_lock_task:
 
 static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
 {
-       _raw_spin_unlock_irqrestore(&rq->lock, *flags);
-       preempt_check_resched();
+       spin_unlock_irqrestore(&rq->lock, *flags);
 }
 
 /*
@@ -289,15 +282,8 @@ static inline void resched_task(task_t *p)
        nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
 
        if (!need_resched && !nrpolling && (p->thread_info->cpu != smp_processor_id()))
-               /*
-                * NOTE: smp_send_reschedule() can be called from
-                * spinlocked sections which do not have an elevated
-                * preemption count. So the code either has to avoid
-                * spinlocks, or has to put preempt_disable() and
-                * preempt_enable_no_resched() around the code.
-                */
                smp_send_reschedule(p->thread_info->cpu);
-       preempt_enable_no_resched();
+       preempt_enable();
 #else
        set_tsk_need_resched(p);
 #endif
@@ -348,10 +334,8 @@ repeat:
  */
 void kick_if_running(task_t * p)
 {
-       if (p == task_rq(p)->curr) {
+       if (p == task_rq(p)->curr)
                resched_task(p);
-               preempt_check_resched();
-       }
 }
 #endif