]> git.hungrycats.org Git - linux/commitdiff
Cleanup BKL handling and move kernel_flag definition to common code
authorChristoph Hellwig <hch@sb.bsdonline.org>
Fri, 23 Aug 2002 15:56:54 +0000 (17:56 +0200)
committerChristoph Hellwig <hch@sb.bsdonline.org>
Fri, 23 Aug 2002 15:56:54 +0000 (17:56 +0200)
26 files changed:
arch/alpha/kernel/alpha_ksyms.c
arch/alpha/kernel/smp.c
arch/arm/kernel/armksyms.c
arch/arm/kernel/setup.c
arch/i386/kernel/i386_ksyms.c
arch/i386/kernel/smp.c
arch/ia64/kernel/ia64_ksyms.c
arch/ia64/kernel/smp.c
arch/mips/kernel/smp.c
arch/mips64/kernel/smp.c
arch/parisc/kernel/parisc_ksyms.c
arch/ppc/kernel/ppc_ksyms.c
arch/ppc/kernel/smp.c
arch/ppc64/kernel/ppc_ksyms.c
arch/ppc64/kernel/smp.c
arch/s390/kernel/smp.c
arch/s390x/kernel/smp.c
arch/sparc/kernel/smp.c
arch/sparc/kernel/sparc_ksyms.c
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/sparc64_ksyms.c
arch/x86_64/kernel/smp.c
arch/x86_64/kernel/x8664_ksyms.c
include/linux/smp_lock.h
kernel/ksyms.c
kernel/sched.c

index c8a66386754003347bcd684073d1a5171e5e7ded..f9797ec4868480a9be98818f1c439c177af093f8 100644 (file)
@@ -40,7 +40,6 @@
 extern struct hwrpb_struct *hwrpb;
 extern void dump_thread(struct pt_regs *, struct user *);
 extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-extern spinlock_t kernel_flag;
 extern spinlock_t rtc_lock;
 
 /* these are C runtime functions with special calling conventions: */
@@ -207,7 +206,6 @@ EXPORT_SYMBOL(up);
  */
 
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(kernel_flag);
 EXPORT_SYMBOL(synchronize_irq);
 EXPORT_SYMBOL(flush_tlb_all);
 EXPORT_SYMBOL(flush_tlb_mm);
index eb8ff9eec6d99c3f7b6a13a0cf0d987e2bc173b8..b328c280aa95723311d68516a42d0a80bd57127b 100644 (file)
@@ -67,8 +67,6 @@ enum ipi_message_type {
        IPI_CPU_STOP,
 };
 
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
 /* Set to a secondary's cpuid when it comes online.  */
 static int smp_secondary_alive __initdata = 0;
 
index 8138a71d362687406643e0587379eab27d9170c6..9d287938c27e688a3c79584bda2b4c318651d0fc 100644 (file)
@@ -273,7 +273,3 @@ EXPORT_SYMBOL_NOVERS(__down_trylock_failed);
 EXPORT_SYMBOL_NOVERS(__up_wakeup);
 
 EXPORT_SYMBOL(get_wchan);
-
-#ifdef CONFIG_PREEMPT
-EXPORT_SYMBOL(kernel_flag);
-#endif
index f87230487d6bd14fec0cab0e082cffbd9b6a4d50..4baecfe247e5ab519e7e47382ecf8d7ddac944d4 100644 (file)
 #define MEM_SIZE       (16*1024*1024)
 #endif
 
-#ifdef CONFIG_PREEMPT
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-#endif
-
 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
 char fpe_type[8];
 
index d31b096b4da79c38766a4589be3f01128e3b5494..2879def67aa1cd9cf7991e1358a27a24cb2b2c90 100644 (file)
@@ -126,7 +126,6 @@ EXPORT_SYMBOL(mmx_copy_page);
 
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(kernel_flag);
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL_NOVERS(__write_lock_failed);
 EXPORT_SYMBOL_NOVERS(__read_lock_failed);
index 19000818cf2bbcab602a19fad0c9d399d3f22df4..0d463df558c3bd2595599c1a6eae16c2e4feef06 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/mc146818rtc.h>
 #include <linux/cache.h>
+#include <linux/interrupt.h>
 
 #include <asm/mtrr.h>
 #include <asm/pgalloc.h>
  *     about nothing of note with C stepping upwards.
  */
 
-/* The 'big kernel lock' */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
 struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
 
 /*
index d7e8085dcaadee26f72ece1f09fb80b9c2e97af9..67a3c4c86c2c5fe5c0dfe04cff9fa0e0f4c3ab19 100644 (file)
@@ -84,10 +84,6 @@ EXPORT_SYMBOL(smp_call_function);
 EXPORT_SYMBOL(smp_call_function_single);
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(ia64_cpu_to_sapicid);
-
-#include <asm/smplock.h>
-EXPORT_SYMBOL(kernel_flag);
-
 #else /* !CONFIG_SMP */
 
 EXPORT_SYMBOL(__flush_tlb_all);
index 7bde7122fdc2026f16fe10d24b00a42b0fe7eea0..0344a6d519e824a148744ae3cc3355ec7e0dd613 100644 (file)
 #include <asm/unistd.h>
 #include <asm/mca.h>
 
-/*
- * The Big Kernel Lock.  It's not supposed to be used for performance critical stuff
- * anymore.  But we still need to align it because certain workloads are still affected by
- * it.  For example, llseek() and various other filesystem related routines still use the
- * BKL.
- */
-spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
-
 /*
  * Structure and data for smp_call_function(). This is designed to minimise static memory
  * requirements. It also looks cleaner.
index 45bff0a5ae8290554d59212cc9ba01e7df19e93e..f766ead50818f867c2427fafa5c22be7da951b0a 100644 (file)
@@ -53,7 +53,6 @@
 
 
 /* Ze Big Kernel Lock! */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
 int smp_threads_ready;  /* Not used */
 int smp_num_cpus;    
 int global_irq_holder = NO_PROC_ID;
index 6d1855c8d9ae013ba032d17e14639b2d9251e21e..4494b7d96ca6910e093f5cb33aab75a663ad1603 100644 (file)
@@ -53,7 +53,6 @@ static void sendintr(int destid, unsigned char status)
 #endif /* CONFIG_SGI_IP27 */
 
 /* The 'big kernel lock' */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
 int smp_threads_ready; /* Not used */
 atomic_t smp_commenced = ATOMIC_INIT(0);
 struct cpuinfo_mips cpu_data[NR_CPUS];
index 111a2fb5a4672fbd55479f768b823f1f2b3ae856..8d40cada7361b02500d9e39203529b030c75fa0f 100644 (file)
@@ -35,9 +35,6 @@ EXPORT_SYMBOL(boot_cpu_data);
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(synchronize_irq);
 
-#include <asm/smplock.h>
-EXPORT_SYMBOL(kernel_flag);
-
 #include <asm/system.h>
 EXPORT_SYMBOL(__global_sti);
 EXPORT_SYMBOL(__global_cli);
index 95b915ed328fddb68b5edd8e26e0720a251f0f53..a1fc5194a0b28157fb0f549c588acf86e086358b 100644 (file)
@@ -93,9 +93,6 @@ EXPORT_SYMBOL(enable_irq);
 EXPORT_SYMBOL(disable_irq);
 EXPORT_SYMBOL(disable_irq_nosync);
 EXPORT_SYMBOL(probe_irq_mask);
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(kernel_flag);
-#endif /* CONFIG_SMP */
 
 EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
 EXPORT_SYMBOL_NOVERS(DMA_MODE_READ);
index fd4970b2f59944ae64ded70c16210ab4c78bf9ce..efc7b9724bb9f64e7d60919d2862f483ce69cf7d 100644 (file)
@@ -47,7 +47,6 @@ struct cpuinfo_PPC cpu_data[NR_CPUS];
 struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
 atomic_t ipi_recv;
 atomic_t ipi_sent;
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
 unsigned int prof_multiplier[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 };
 unsigned int prof_counter[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 };
 unsigned long cache_decay_ticks = HZ/100;
index 953959974eac0de152497a8df9c57a1d7a0435d5..4ed07f854e62b8803cd1c627d7069352d2ebc91d 100644 (file)
@@ -74,7 +74,6 @@ EXPORT_SYMBOL(disable_irq);
 EXPORT_SYMBOL(disable_irq_nosync);
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(synchronize_irq);
-EXPORT_SYMBOL(kernel_flag);
 #endif /* CONFIG_SMP */
 
 EXPORT_SYMBOL(register_ioctl32_conversion);
index d5a531bd0d8888b33515c7cf5a5cac4feb24a4c3..3e602ba919518efdefcee7dbb56b2fb93eeb80f7 100644 (file)
@@ -51,7 +51,6 @@
 #include <asm/machdep.h>
 
 int smp_threads_ready = 0;
-spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
 unsigned long cache_decay_ticks;
 
 /* initialised so it doesnt end up in bss */
index b460bb23ed6c56de141ff478681322c4f5f0ebb8..6c41cc6c7914b9d681d09003e622f81227aeaee4 100644 (file)
@@ -54,8 +54,6 @@ cycles_t         cacheflush_time=0;
 int              smp_threads_ready=0;      /* Set when the idlers are all forked. */
 static atomic_t  smp_commenced = ATOMIC_INIT(0);
 
-spinlock_t       kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
 volatile unsigned long phys_cpu_present_map;
 volatile unsigned long cpu_online_map;
 unsigned long    cache_decay_ticks = 0;
@@ -634,7 +632,6 @@ int setup_profiling_timer(unsigned int multiplier)
 }
 
 EXPORT_SYMBOL(lowcore_ptr);
-EXPORT_SYMBOL(kernel_flag);
 EXPORT_SYMBOL(smp_ctl_set_bit);
 EXPORT_SYMBOL(smp_ctl_clear_bit);
 EXPORT_SYMBOL(smp_num_cpus);
index c9ff1ff21f438217d8d5fa8fe746966098de408b..eba81ceede09e9bf3e18c8d980e7f5689fb6a00d 100644 (file)
@@ -53,8 +53,6 @@ cycles_t         cacheflush_time=0;
 int              smp_threads_ready=0;      /* Set when the idlers are all forked. */
 static atomic_t  smp_commenced = ATOMIC_INIT(0);
 
-spinlock_t       kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
 volatile unsigned long phys_cpu_present_map;
 volatile unsigned long cpu_online_map;
 unsigned long    cache_decay_ticks = 0;
@@ -613,7 +611,6 @@ int setup_profiling_timer(unsigned int multiplier)
 }
 
 EXPORT_SYMBOL(lowcore_ptr);
-EXPORT_SYMBOL(kernel_flag);
 EXPORT_SYMBOL(smp_ctl_set_bit);
 EXPORT_SYMBOL(smp_ctl_clear_bit);
 EXPORT_SYMBOL(smp_num_cpus);
index cb994b3a252037a1d61da9b8540c4f95ac70e62f..b4ca31eecb2022d7afa05ece80c13c3adf172b8d 100644 (file)
@@ -66,9 +66,6 @@ cycles_t cacheflush_time = 0; /* XXX */
  * instruction which is much better...
  */
 
-/* Kernel spinlock */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
 /* Used to make bitops atomic */
 unsigned char bitops_spinlock = 0;
 
index 832ad57aec3195662ecbf13d3d60bb76e77d2c31..1f51d96bbd5ae4c198165cadea7b2609c2545d06 100644 (file)
@@ -77,10 +77,6 @@ extern int __divdi3(int, int);
 
 extern void dump_thread(struct pt_regs *, struct user *);
 
-#ifdef CONFIG_SMP
-extern spinlock_t kernel_flag;
-#endif
-
 /* One thing to note is that the way the symbols of the mul/div
  * support routines are named is a mess, they all start with
  * a '.' which makes it a bitch to export, here is the trick:
@@ -130,9 +126,6 @@ EXPORT_SYMBOL_PRIVATE(_clear_bit);
 EXPORT_SYMBOL_PRIVATE(_change_bit);
 
 #ifdef CONFIG_SMP
-/* Kernel wide locking */
-EXPORT_SYMBOL(kernel_flag);
-
 /* IRQ implementation. */
 EXPORT_SYMBOL(global_irq_holder);
 EXPORT_SYMBOL(synchronize_irq);
index 74dc8427eeb1347e2f593aec8e80f1987ae884f5..ebf4cb96cff6fe6757c71504a71274000915bfd8 100644 (file)
@@ -46,9 +46,6 @@ cpuinfo_sparc cpu_data[NR_CPUS];
 /* Please don't make this stuff initdata!!!  --DaveM */
 static unsigned char boot_cpu_id;
 
-/* Kernel spinlock */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
 atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0);
 unsigned long cpu_online_map = 0;
 atomic_t sparc64_num_cpus_possible = ATOMIC_INIT(0);
index 41208bd8aa82391ecf1f6c6a58f020af6c764291..391c7078a944c1d30a43d6c447a2b21f9db01a35 100644 (file)
@@ -101,9 +101,7 @@ extern int __ashrdi3(int, int);
 extern void dump_thread(struct pt_regs *, struct user *);
 extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
 
-#ifdef CONFIG_SMP
-extern spinlock_t kernel_flag;
-#ifdef CONFIG_DEBUG_SPINLOCK
+#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
 extern void _do_spin_lock (spinlock_t *lock, char *str);
 extern void _do_spin_unlock (spinlock_t *lock);
 extern int _spin_trylock (spinlock_t *lock);
@@ -112,7 +110,6 @@ extern void _do_read_unlock(rwlock_t *rw, char *str);
 extern void _do_write_lock(rwlock_t *rw, char *str);
 extern void _do_write_unlock(rwlock_t *rw);
 #endif
-#endif
 
 extern unsigned long phys_base;
 extern unsigned long pfn_base;
@@ -127,9 +124,6 @@ EXPORT_SYMBOL(__write_lock);
 EXPORT_SYMBOL(__write_unlock);
 #endif
 
-/* Kernel wide locking */
-EXPORT_SYMBOL(kernel_flag);
-
 /* Hard IRQ locking */
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(synchronize_irq);
index 2078148fe8ce1b8613fd2d65475bc7965fb34709..f10fd18b44a4bb321fa157fb5fea8e7f11c633f4 100644 (file)
@@ -22,9 +22,6 @@
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 
-/* The 'big kernel lock' */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
 /*
  * the following functions deal with sending IPIs between CPUs.
  *
index 2bbb7d8238b58933a2ddfa444f8df3c3f1b13c3c..956ca21cbc2e971e8ac10c531c130cff57303be5 100644 (file)
@@ -109,7 +109,6 @@ EXPORT_SYMBOL(mmx_copy_page);
 
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(kernel_flag);
 EXPORT_SYMBOL(smp_num_cpus);
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL_NOVERS(__write_lock_failed);
index 40f5358fc856220a43988787ef643ff5f518f1ec..5a0b83a677d96c019bda456d166afa08c4f18348 100644 (file)
@@ -2,21 +2,10 @@
 #define __LINUX_SMPLOCK_H
 
 #include <linux/config.h>
-
-#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
-
-#define lock_kernel()                          do { } while(0)
-#define unlock_kernel()                                do { } while(0)
-#define release_kernel_lock(task)              do { } while(0)
-#define reacquire_kernel_lock(task)            do { } while(0)
-#define kernel_locked() 1
-
-#else
-
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
 #include <linux/sched.h>
-#include <asm/current.h>
+#include <linux/spinlock.h>
+
+#if CONFIG_SMP || CONFIG_PREEMPT
 
 extern spinlock_t kernel_flag;
 
@@ -26,23 +15,22 @@ extern spinlock_t kernel_flag;
 #define put_kernel_lock()      spin_unlock(&kernel_flag)
 
 /*
- * Release global kernel lock and global interrupt lock
+ * Release global kernel lock.
  */
-#define release_kernel_lock(task)              \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               put_kernel_lock();              \
-} while (0)
+static inline void release_kernel_lock(struct task_struct *task)
+{
+       if (unlikely(task->lock_depth >= 0))
+               put_kernel_lock();
+}
 
 /*
  * Re-acquire the kernel lock
  */
-#define reacquire_kernel_lock(task)            \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               get_kernel_lock();              \
-} while (0)
-
+static inline void reacquire_kernel_lock(struct task_struct *task)
+{
+       if (unlikely(task->lock_depth >= 0))
+               get_kernel_lock();
+}
 
 /*
  * Getting the big kernel lock.
@@ -51,22 +39,29 @@ do {                                                \
  * so we only need to worry about other
  * CPU's.
  */
-static __inline__ void lock_kernel(void)
+static inline void lock_kernel(void)
 {
        int depth = current->lock_depth+1;
-       if (!depth)
+       if (likely(!depth))
                get_kernel_lock();
        current->lock_depth = depth;
 }
 
-static __inline__ void unlock_kernel(void)
+static inline void unlock_kernel(void)
 {
-       if (current->lock_depth < 0)
+       if (unlikely(current->lock_depth < 0))
                BUG();
-       if (--current->lock_depth < 0)
+       if (likely(--current->lock_depth < 0))
                put_kernel_lock();
 }
 
-#endif /* CONFIG_SMP */
+#else
+
+#define lock_kernel()                          do { } while(0)
+#define unlock_kernel()                                do { } while(0)
+#define release_kernel_lock(task)              do { } while(0)
+#define reacquire_kernel_lock(task)            do { } while(0)
+#define kernel_locked()                                1
 
-#endif
+#endif /* CONFIG_SMP || CONFIG_PREEMPT */
+#endif /* __LINUX_SMPLOCK_H */
index dff65833890724af4d5820a4ed2959a180067df5..84b116465785f99158faf9e47665692243326d7a 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/buffer_head.h>
 #include <linux/root_dev.h>
 #include <linux/percpu.h>
+#include <linux/smp_lock.h>
 #include <asm/checksum.h>
 
 #if defined(CONFIG_PROC_FS)
@@ -481,6 +482,9 @@ EXPORT_SYMBOL_GPL(idle_cpu);
 #if CONFIG_SMP
 EXPORT_SYMBOL_GPL(set_cpus_allowed);
 #endif
+#if CONFIG_SMP || CONFIG_PREEMPT
+EXPORT_SYMBOL(kernel_flag);
+#endif
 EXPORT_SYMBOL(jiffies);
 EXPORT_SYMBOL(jiffies_64);
 EXPORT_SYMBOL(xtime);
index 0d9e5bdef0506ca4c59cc47e4553c8b8807f4f29..4749298e45bd5bd82d47991b2fa617d81860fb07 100644 (file)
@@ -1881,7 +1881,6 @@ void __init init_idle(task_t *idle, int cpu)
 }
 
 #if CONFIG_SMP
-
 /*
  * This is how migration works:
  *
@@ -2070,6 +2069,20 @@ __init int migration_init(void)
 
 #endif
 
+#if CONFIG_SMP || CONFIG_PREEMPT
+/*
+ * The 'big kernel lock'
+ *
+ * This spinlock is taken and released recursively by lock_kernel()
+ * and unlock_kernel().  It is transparently dropped and reaquired
+ * over schedule().  It is used to protect legacy code that hasn't
+ * been migrated to a proper locking design yet.
+ *
+ * Don't use in new code.
+ */
+spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+#endif
+
 extern void init_timervecs(void);
 extern void timer_bh(void);
 extern void tqueue_bh(void);