condition, we test the preemption count in preempt_schedule(). This reduces the icache
footprint and the overhead of preemption.
- plus optimize the irq-path preemption check a bit.
jnz restore_all
incl TI_PRE_COUNT(%ebx)
sti
- call SYMBOL_NAME(preempt_schedule)
+ movl TI_TASK(%ebx), %ecx # ti->task
+ movl $0, (%ecx) # current->state = TASK_RUNNING
+ call SYMBOL_NAME(schedule)
jmp ret_from_intr
#endif
do { \
--current_thread_info()->preempt_count; \
barrier(); \
- if (unlikely(!(current_thread_info()->preempt_count) && \
- test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
} while (0)
#define spin_lock(lock) \
*/
asmlinkage void preempt_schedule(void)
{
+ if (unlikely(preempt_get_count()))
+ return;
current->state = TASK_RUNNING;
schedule();
}