__cond_resched();
}
-#ifdef CONFIG_PREEMPT
-
/*
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
- * Note: this does not assume the given lock is the _only_ lock held.
- * The kernel preemption counter gives us "free" checking that we are
- * atomic -- let's use it.
+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+ * operations here to prevent schedule() from being called twice (once via
+ * spin_unlock(), once by hand).
*/
static inline void cond_resched_lock(spinlock_t * lock)
{
- if (need_resched() && preempt_count() == 1) {
+ if (need_resched()) {
_raw_spin_unlock(lock);
preempt_enable_no_resched();
__cond_resched();
}
}
-#else
-
-static inline void cond_resched_lock(spinlock_t * lock)
-{
-}
-
-#endif
-
/* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes.
callers must hold sig->siglock. */
mmu_gather_t *tlb;
unsigned long end, block;
+ might_sleep();
+
if (is_vm_hugetlb_page(vma)) {
zap_hugepage_range(vma, address, size);
return;