info.si_code = SEGV_MAPERR;
/*
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (preempt_count() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
#define preempt_count() (current_thread_info()->preempt_count)
+#define inc_preempt_count() \
+do { \
+ preempt_count()++; \
+} while (0)
+
+#define dec_preempt_count() \
+do { \
+ preempt_count()--; \
+} while (0)
+
#ifdef CONFIG_PREEMPT
extern void preempt_schedule(void);
#define preempt_disable() \
do { \
- preempt_count()++; \
+ inc_preempt_count(); \
barrier(); \
} while (0)
#define preempt_enable_no_resched() \
do { \
- preempt_count()--; \
+ dec_preempt_count(); \
barrier(); \
} while (0)
preempt_schedule(); \
} while (0)
+#define inc_preempt_count_non_preempt() do { } while (0)
+#define dec_preempt_count_non_preempt() do { } while (0)
+
#else
#define preempt_disable() do { } while (0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
+/*
+ * Sometimes we want to increment the preempt count, but we know that it's
+ * already incremented if the kernel is compiled for preemptibility.
+ */
+#define inc_preempt_count_non_preempt() inc_preempt_count()
+#define dec_preempt_count_non_preempt() dec_preempt_count()
+
#endif
#endif /* __LINUX_PREEMPT_H */