extern struct hwrpb_struct *hwrpb;
extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-extern spinlock_t kernel_flag;
extern spinlock_t rtc_lock;
/* these are C runtime functions with special calling conventions: */
*/
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(flush_tlb_all);
EXPORT_SYMBOL(flush_tlb_mm);
IPI_CPU_STOP,
};
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
/* Set to a secondary's cpuid when it comes online. */
static int smp_secondary_alive __initdata = 0;
EXPORT_SYMBOL_NOVERS(__up_wakeup);
EXPORT_SYMBOL(get_wchan);
-
-#ifdef CONFIG_PREEMPT
-EXPORT_SYMBOL(kernel_flag);
-#endif
#define MEM_SIZE (16*1024*1024)
#endif
-#ifdef CONFIG_PREEMPT
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-#endif
-
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
char fpe_type[8];
#ifdef CONFIG_SMP
EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL_NOVERS(__write_lock_failed);
EXPORT_SYMBOL_NOVERS(__read_lock_failed);
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
+#include <linux/interrupt.h>
#include <asm/mtrr.h>
#include <asm/pgalloc.h>
* about nothing of note with C stepping upwards.
*/
-/* The 'big kernel lock' */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
/*
EXPORT_SYMBOL(smp_call_function_single);
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
-
-#include <asm/smplock.h>
-EXPORT_SYMBOL(kernel_flag);
-
#else /* !CONFIG_SMP */
EXPORT_SYMBOL(__flush_tlb_all);
#include <asm/unistd.h>
#include <asm/mca.h>
-/*
- * The Big Kernel Lock. It's not supposed to be used for performance critical stuff
- * anymore. But we still need to align it because certain workloads are still affected by
- * it. For example, llseek() and various other filesystem related routines still use the
- * BKL.
- */
-spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
-
/*
* Structure and data for smp_call_function(). This is designed to minimise static memory
* requirements. It also looks cleaner.
/* Ze Big Kernel Lock! */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
int smp_threads_ready; /* Not used */
int smp_num_cpus;
int global_irq_holder = NO_PROC_ID;
#endif /* CONFIG_SGI_IP27 */
/* The 'big kernel lock' */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
int smp_threads_ready; /* Not used */
atomic_t smp_commenced = ATOMIC_INIT(0);
struct cpuinfo_mips cpu_data[NR_CPUS];
#ifdef CONFIG_SMP
EXPORT_SYMBOL(synchronize_irq);
-#include <asm/smplock.h>
-EXPORT_SYMBOL(kernel_flag);
-
#include <asm/system.h>
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(probe_irq_mask);
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(kernel_flag);
-#endif /* CONFIG_SMP */
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL_NOVERS(DMA_MODE_READ);
struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
atomic_t ipi_recv;
atomic_t ipi_sent;
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
unsigned int prof_multiplier[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 };
unsigned int prof_counter[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 };
unsigned long cache_decay_ticks = HZ/100;
EXPORT_SYMBOL(disable_irq_nosync);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(synchronize_irq);
-EXPORT_SYMBOL(kernel_flag);
#endif /* CONFIG_SMP */
EXPORT_SYMBOL(register_ioctl32_conversion);
#include <asm/machdep.h>
int smp_threads_ready = 0;
-spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
unsigned long cache_decay_ticks;
/* initialised so it doesnt end up in bss */
int smp_threads_ready=0; /* Set when the idlers are all forked. */
static atomic_t smp_commenced = ATOMIC_INIT(0);
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
volatile unsigned long phys_cpu_present_map;
volatile unsigned long cpu_online_map;
unsigned long cache_decay_ticks = 0;
}
EXPORT_SYMBOL(lowcore_ptr);
-EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_num_cpus);
int smp_threads_ready=0; /* Set when the idlers are all forked. */
static atomic_t smp_commenced = ATOMIC_INIT(0);
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
volatile unsigned long phys_cpu_present_map;
volatile unsigned long cpu_online_map;
unsigned long cache_decay_ticks = 0;
}
EXPORT_SYMBOL(lowcore_ptr);
-EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_num_cpus);
* instruction which is much better...
*/
-/* Kernel spinlock */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
/* Used to make bitops atomic */
unsigned char bitops_spinlock = 0;
extern void dump_thread(struct pt_regs *, struct user *);
-#ifdef CONFIG_SMP
-extern spinlock_t kernel_flag;
-#endif
-
/* One thing to note is that the way the symbols of the mul/div
* support routines are named is a mess, they all start with
* a '.' which makes it a bitch to export, here is the trick:
EXPORT_SYMBOL_PRIVATE(_change_bit);
#ifdef CONFIG_SMP
-/* Kernel wide locking */
-EXPORT_SYMBOL(kernel_flag);
-
/* IRQ implementation. */
EXPORT_SYMBOL(global_irq_holder);
EXPORT_SYMBOL(synchronize_irq);
/* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id;
-/* Kernel spinlock */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0);
unsigned long cpu_online_map = 0;
atomic_t sparc64_num_cpus_possible = ATOMIC_INIT(0);
extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
-#ifdef CONFIG_SMP
-extern spinlock_t kernel_flag;
-#ifdef CONFIG_DEBUG_SPINLOCK
+#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
extern void _do_spin_lock (spinlock_t *lock, char *str);
extern void _do_spin_unlock (spinlock_t *lock);
extern int _spin_trylock (spinlock_t *lock);
extern void _do_write_lock(rwlock_t *rw, char *str);
extern void _do_write_unlock(rwlock_t *rw);
#endif
-#endif
extern unsigned long phys_base;
extern unsigned long pfn_base;
EXPORT_SYMBOL(__write_unlock);
#endif
-/* Kernel wide locking */
-EXPORT_SYMBOL(kernel_flag);
-
/* Hard IRQ locking */
#ifdef CONFIG_SMP
EXPORT_SYMBOL(synchronize_irq);
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-/* The 'big kernel lock' */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
/*
* the following functions deal with sending IPIs between CPUs.
*
#ifdef CONFIG_SMP
EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_num_cpus);
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL_NOVERS(__write_lock_failed);
#define __LINUX_SMPLOCK_H
#include <linux/config.h>
-
-#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
-
-#define lock_kernel() do { } while(0)
-#define unlock_kernel() do { } while(0)
-#define release_kernel_lock(task) do { } while(0)
-#define reacquire_kernel_lock(task) do { } while(0)
-#define kernel_locked() 1
-
-#else
-
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/sched.h>
-#include <asm/current.h>
+#include <linux/spinlock.h>
+
+#if CONFIG_SMP || CONFIG_PREEMPT
extern spinlock_t kernel_flag;
#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
- * Release global kernel lock and global interrupt lock
+ * Release global kernel lock.
*/
-#define release_kernel_lock(task) \
-do { \
- if (unlikely(task->lock_depth >= 0)) \
- put_kernel_lock(); \
-} while (0)
+static inline void release_kernel_lock(struct task_struct *task)
+{
+ if (unlikely(task->lock_depth >= 0))
+ put_kernel_lock();
+}
/*
* Re-acquire the kernel lock
*/
-#define reacquire_kernel_lock(task) \
-do { \
- if (unlikely(task->lock_depth >= 0)) \
- get_kernel_lock(); \
-} while (0)
-
+static inline void reacquire_kernel_lock(struct task_struct *task)
+{
+ if (unlikely(task->lock_depth >= 0))
+ get_kernel_lock();
+}
/*
* Getting the big kernel lock.
* so we only need to worry about other
* CPU's.
*/
-static __inline__ void lock_kernel(void)
+static inline void lock_kernel(void)
{
int depth = current->lock_depth+1;
- if (!depth)
+ if (likely(!depth))
get_kernel_lock();
current->lock_depth = depth;
}
-static __inline__ void unlock_kernel(void)
+static inline void unlock_kernel(void)
{
- if (current->lock_depth < 0)
+ if (unlikely(current->lock_depth < 0))
BUG();
- if (--current->lock_depth < 0)
+ if (likely(--current->lock_depth < 0))
put_kernel_lock();
}
-#endif /* CONFIG_SMP */
+#else
+
+#define lock_kernel() do { } while(0)
+#define unlock_kernel() do { } while(0)
+#define release_kernel_lock(task) do { } while(0)
+#define reacquire_kernel_lock(task) do { } while(0)
+#define kernel_locked() 1
-#endif
+#endif /* CONFIG_SMP || CONFIG_PREEMPT */
+#endif /* __LINUX_SMPLOCK_H */
#include <linux/buffer_head.h>
#include <linux/root_dev.h>
#include <linux/percpu.h>
+#include <linux/smp_lock.h>
#include <asm/checksum.h>
#if defined(CONFIG_PROC_FS)
#if CONFIG_SMP
EXPORT_SYMBOL_GPL(set_cpus_allowed);
#endif
+#if CONFIG_SMP || CONFIG_PREEMPT
+EXPORT_SYMBOL(kernel_flag);
+#endif
EXPORT_SYMBOL(jiffies);
EXPORT_SYMBOL(jiffies_64);
EXPORT_SYMBOL(xtime);
}
#if CONFIG_SMP
-
/*
* This is how migration works:
*
#endif
+#if CONFIG_SMP || CONFIG_PREEMPT
+/*
+ * The 'big kernel lock'
+ *
+ * This spinlock is taken and released recursively by lock_kernel()
+ * and unlock_kernel(). It is transparently dropped and reaquired
+ * over schedule(). It is used to protect legacy code that hasn't
+ * been migrated to a proper locking design yet.
+ *
+ * Don't use in new code.
+ */
+spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+#endif
+
extern void init_timervecs(void);
extern void timer_bh(void);
extern void tqueue_bh(void);