--- /dev/null
+#ifndef _ASM_GENERIC_PERCPU_H_
+#define _ASM_GENERIC_PERCPU_H_
+
+#define __GENERIC_PER_CPU
+#include <linux/compiler.h>
+
+extern unsigned long __per_cpu_offset[NR_CPUS];
+
+/* var is in discarded region: offset to particular copy we want */
+#define per_cpu(var, cpu) (*RELOC_HIDE(&var, __per_cpu_offset[cpu]))
+#define this_cpu(var) per_cpu(var, smp_processor_id())
+
+#endif /* _ASM_GENERIC_PERCPU_H_ */
--- /dev/null
+#ifndef __ARCH_I386_PERCPU__
+#define __ARCH_I386_PERCPU__
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ARCH_I386_PERCPU__ */
--- /dev/null
+#ifndef __ARCH_PPC_PERCPU__
+#define __ARCH_PPC_PERCPU__
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ARCH_PPC_PERCPU__ */
/* This macro obfuscates arithmetic on a variable address so that gcc
shouldn't recognize the original var, and make assumptions about it */
-#define RELOC_HIDE(var, off) \
- ({ __typeof__(&(var)) __ptr; \
- __asm__ ("" : "=g"(__ptr) : "0"((void *)&(var) + (off))); \
- *__ptr; })
+#define RELOC_HIDE(ptr, off) \
+ ({ __typeof__(ptr) __ptr; \
+ __asm__ ("" : "=g"(__ptr) : "0"((void *)(ptr) + (off))); \
+ __ptr; })
#endif /* __LINUX_COMPILER_H */
--- /dev/null
+#ifndef __LINUX_PERCPU_H
+#define __LINUX_PERCPU_H
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+#define __per_cpu_data __attribute__((section(".data.percpu")))
+#include <asm/percpu.h>
+#else
+#define __per_cpu_data
+#define per_cpu(var, cpu) var
+#define this_cpu(var) var
+#endif
+
+#endif /* __LINUX_PERCPU_H */
#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/
#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */
-#define __per_cpu_data __attribute__((section(".data.percpu")))
-
-#ifndef __HAVE_ARCH_PER_CPU
-extern unsigned long __per_cpu_offset[NR_CPUS];
-
-/* var is in discarded region: offset to particular copy we want */
-#define per_cpu(var, cpu) RELOC_HIDE(var, per_cpu_offset(cpu))
-
-#define this_cpu(var) per_cpu(var, smp_processor_id())
-#endif /* !__HAVE_ARCH_PER_CPU */
#else /* !SMP */
/*
#include <linux/iobuf.h>
#include <linux/bootmem.h>
#include <linux/tty.h>
+#include <linux/percpu.h>
#include <asm/io.h>
#include <asm/bugs.h>
#define smp_init() do { } while (0)
#endif
-static inline void setup_per_cpu_areas(void)
-{
-}
#else
-#ifndef __HAVE_ARCH_PER_CPU
+#ifdef __GENERIC_PER_CPU
unsigned long __per_cpu_offset[NR_CPUS];
static void __init setup_per_cpu_areas(void)
memcpy(ptr, __per_cpu_start, size);
}
}
-#endif /* !__HAVE_ARCH_PER_CPU */
+#else
+static inline void setup_per_cpu_areas(void)
+{
+}
+#endif /* !__GENERIC_PER_CPU */
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)