struct halt_info args;
args.mode = mode;
args.restart_cmd = restart_cmd;
-#ifdef CONFIG_SMP
- smp_call_function(common_shutdown_1, &args, 1, 0);
-#endif
- common_shutdown_1(&args);
+ on_each_cpu(common_shutdown_1, &args, 1, 0);
}
void
smp_imb(void)
{
/* Must wait other processors to flush their icache before continue. */
- if (smp_call_function(ipi_imb, NULL, 1, 1))
+ if (on_each_cpu(ipi_imb, NULL, 1, 1))
printk(KERN_CRIT "smp_imb: timed out\n");
-
- imb();
}
static void
{
/* Although we don't have any data to pass, we do want to
synchronize with the other processors. */
- if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) {
+ if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
printk(KERN_CRIT "flush_tlb_all: timed out\n");
}
-
- tbia();
}
#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
void
flush_tlb_mm(struct mm_struct *mm)
{
+ preempt_disable();
+
if (mm == current->active_mm) {
flush_tlb_current(mm);
if (atomic_read(&mm->mm_users) <= 1) {
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
printk(KERN_CRIT "flush_tlb_mm: timed out\n");
}
+
+ preempt_enable();
}
struct flush_tlb_page_struct {
struct flush_tlb_page_struct data;
struct mm_struct *mm = vma->vm_mm;
+ preempt_disable();
+
if (mm == current->active_mm) {
flush_tlb_current_page(mm, vma, addr);
if (atomic_read(&mm->mm_users) <= 1) {
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
printk(KERN_CRIT "flush_tlb_page: timed out\n");
}
+
+ preempt_enable();
}
void
if ((vma->vm_flags & VM_EXEC) == 0)
return;
+ preempt_disable();
+
if (mm == current->active_mm) {
__load_new_mm_context(mm);
if (atomic_read(&mm->mm_users) <= 1) {
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
printk(KERN_CRIT "flush_icache_page: timed out\n");
}
+
+ preempt_enable();
}
\f
#ifdef CONFIG_DEBUG_SPINLOCK
void print_all_local_APICs (void)
{
- smp_call_function(print_local_APIC, NULL, 1, 1);
- print_local_APIC(NULL);
+ on_each_cpu(print_local_APIC, NULL, 1, 1);
}
void /*__init*/ print_PIC(void)
*/
printk(KERN_INFO "activating NMI Watchdog ...");
- smp_call_function(enable_NMI_through_LVT0, NULL, 1, 1);
- enable_NMI_through_LVT0(NULL);
+ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
printk(" done.\n");
}
wmb();
if (reload) {
- load_LDT(pc);
#ifdef CONFIG_SMP
preempt_disable();
+ load_LDT(pc);
if (current->mm->cpu_vm_mask != (1 << smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
preempt_enable();
+#else
+ load_LDT(pc);
#endif
}
if (oldsize) {
int i, error = 0, err;
struct microcode *m;
- if (smp_call_function(do_update_one, NULL, 1, 1) != 0) {
+ if (on_each_cpu(do_update_one, NULL, 1, 1) != 0) {
printk(KERN_ERR "microcode: IPI timeout, giving up\n");
return -EIO;
}
- do_update_one(NULL);
for (i=0; i<NR_CPUS; i++) {
err = update_req[i].err;
preempt_enable();
}
-static inline void do_flush_tlb_all_local(void)
+static void do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
leave_mm(cpu);
}
-static void flush_tlb_all_ipi(void* info)
-{
- do_flush_tlb_all_local();
-}
-
void flush_tlb_all(void)
{
- preempt_disable();
- smp_call_function (flush_tlb_all_ipi,0,1,1);
-
- do_flush_tlb_all_local();
- preempt_enable();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
/*
return 0;
memcpy((void *) page, sysent, sizeof(sysent));
- enable_sep_cpu(NULL);
- smp_call_function(enable_sep_cpu, NULL, 1, 1);
+ on_each_cpu(enable_sep_cpu, NULL, 1, 1);
return 0;
}
smp_call_function_interrupt();
}
-static inline void
-do_flush_tlb_all_local(void)
+static void
+do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
}
-static void
-flush_tlb_all_function(void* info)
-{
- do_flush_tlb_all_local();
-}
-
/* flush the TLB of every active CPU in the system */
void
flush_tlb_all(void)
{
- preempt_disable();
- smp_call_function (flush_tlb_all_function, 0, 1, 1);
- do_flush_tlb_all_local();
- preempt_enable();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
/* used to set up the trampoline for other CPUs when the memory manager
}
static inline void flush_map(void)
-{
-#ifdef CONFIG_SMP
- smp_call_function(flush_kernel_map, NULL, 1, 1);
-#endif
- flush_kernel_map(NULL);
+{
+ on_each_cpu(flush_kernel_map, NULL, 1, 1);
}
struct deferred_page {
* without actually triggering any NMIs as this will
* break the core code horrifically.
*/
- smp_call_function(nmi_cpu_setup, NULL, 0, 1);
- nmi_cpu_setup(0);
+ on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
set_nmi_callback(nmi_callback);
oprofile_pmdev = set_nmi_pm_callback(oprofile_pm_callback);
return 0;
{
unset_nmi_pm_callback(oprofile_pmdev);
unset_nmi_callback();
- smp_call_function(nmi_cpu_shutdown, NULL, 0, 1);
- nmi_cpu_shutdown(0);
+ on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
}
static int nmi_start(void)
{
- smp_call_function(nmi_cpu_start, NULL, 0, 1);
- nmi_cpu_start(0);
+ on_each_cpu(nmi_cpu_start, NULL, 0, 1);
return 0;
}
static void nmi_stop(void)
{
- smp_call_function(nmi_cpu_stop, NULL, 0, 1);
- nmi_cpu_stop(0);
+ on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
}
void
smp_flush_tlb_all (void)
{
- smp_call_function((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
- local_flush_tlb_all();
+ on_each_cpu((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
}
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
- local_finish_flush_tlb_mm(mm);
-
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
+ {
+ local_finish_flush_tlb_mm(mm);
return;
+ }
/*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
* rather trivial.
*/
- smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+ on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
}
/*
void flush_tlb_all(void)
{
- smp_call_function(flush_tlb_all_ipi, 0, 1, 1);
- _flush_tlb_all();
+ on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
}
static void flush_tlb_mm_ipi(void *mm)
void flush_tlb_mm(struct mm_struct *mm)
{
+ preempt_disable();
+
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
} else {
CPU_CONTEXT(i, mm) = 0;
}
_flush_tlb_mm(mm);
+
+ preempt_enable();
}
struct flush_tlb_data {
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
+ preempt_disable();
+
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd;
CPU_CONTEXT(i, mm) = 0;
}
_flush_tlb_range(mm, start, end);
+
+ preempt_enable();
}
static void flush_tlb_page_ipi(void *info)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ preempt_disable();
+
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
CPU_CONTEXT(i, vma->vm_mm) = 0;
}
_flush_tlb_page(vma, page);
+
+ preempt_enable();
}
void
flush_data_cache(void)
{
- smp_call_function((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
- flush_data_cache_local();
+ on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
}
#endif
static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */
-#ifdef CONFIG_SMP
static void cpu_set_eiem(void *info)
{
set_eiem((unsigned long) info);
}
-#endif
static inline void disable_cpu_irq(void *unused, int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem &= ~eirr_bit;
- set_eiem(cpu_eiem);
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
static void enable_cpu_irq(void *unused, int irq)
mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
cpu_eiem |= eirr_bit;
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
- set_eiem(cpu_eiem);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
/* mask and disable are the same at the CPU level
** handle *any* unmasked pending interrupts.
** ie We don't need to check for pending interrupts here.
*/
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
- set_eiem(cpu_eiem);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
/*
__setup("maxcpus=", maxcpus);
/*
- * Flush all other CPU's tlb and then mine. Do this with smp_call_function()
+ * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
* as we want to ensure all TLB's flushed before proceeding.
*/
void
smp_flush_tlb_all(void)
{
- smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
- flush_tlb_all_local();
+ on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
}
do_recycle++;
}
spin_unlock(&sid_lock);
- smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
- flush_tlb_all_local();
+ on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
if (do_recycle) {
spin_lock(&sid_lock);
recycle_sids(recycle_ndirty,recycle_dirty_array);
/* schedule ourselves to be run again */
mod_timer(&tau_timer, jiffies + shrink_timer) ;
-#ifdef CONFIG_SMP
- smp_call_function(tau_timeout, NULL, 1, 0);
-#endif
- tau_timeout(NULL);
+ on_each_cpu(tau_timeout, NULL, 1, 0);
}
/*
tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer);
-#ifdef CONFIG_SMP
- smp_call_function(TAU_init_smp, NULL, 1, 0);
-#endif
- TAU_init_smp(NULL);
+ on_each_cpu(TAU_init_smp, NULL, 1, 0);
printk("Thermal assist unit ");
#ifdef CONFIG_TAU_INT
void machine_restart_smp(char * __unused)
{
cpu_restart_map = cpu_online_map;
- smp_call_function(do_machine_restart, NULL, 0, 0);
- do_machine_restart(NULL);
+ on_each_cpu(do_machine_restart, NULL, 0, 0);
}
static void do_machine_halt(void * __unused)
void machine_halt_smp(void)
{
- smp_call_function(do_machine_halt, NULL, 0, 0);
- do_machine_halt(NULL);
+ on_each_cpu(do_machine_halt, NULL, 0, 0);
}
static void do_machine_power_off(void * __unused)
void machine_power_off_smp(void)
{
- smp_call_function(do_machine_power_off, NULL, 0, 0);
- do_machine_power_off(NULL);
+ on_each_cpu(do_machine_power_off, NULL, 0, 0);
}
/*
void smp_ptlb_all(void)
{
- smp_call_function(smp_ptlb_callback, NULL, 0, 1);
- local_flush_tlb();
+ on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
}
/*
parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = 0xFFFFFFFF;
+ preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_set_bit(cr, bit);
+ preempt_enable();
}
/*
parms.end_ctl = cr;
parms.orvals[cr] = 0x00000000;
parms.andvals[cr] = ~(1 << bit);
+ preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_clear_bit(cr, bit);
+ preempt_enable();
}
/*
void machine_restart_smp(char * __unused)
{
cpu_restart_map = cpu_online_map;
- smp_call_function(do_machine_restart, NULL, 0, 0);
- do_machine_restart(NULL);
+ on_each_cpu(do_machine_restart, NULL, 0, 0);
}
static void do_machine_halt(void * __unused)
void machine_halt_smp(void)
{
- smp_call_function(do_machine_halt, NULL, 0, 0);
- do_machine_halt(NULL);
+ on_each_cpu(do_machine_halt, NULL, 0, 0);
}
static void do_machine_power_off(void * __unused)
void machine_power_off_smp(void)
{
- smp_call_function(do_machine_power_off, NULL, 0, 0);
- do_machine_power_off(NULL);
+ on_each_cpu(do_machine_power_off, NULL, 0, 0);
}
/*
parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = -1L;
+ preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_set_bit(cr, bit);
+ preempt_enable();
}
/*
parms.end_ctl = cr;
parms.orvals[cr] = 0;
parms.andvals[cr] = ~(1L << bit);
+ preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_clear_bit(cr, bit);
+ preempt_enable();
}
{
u32 low, high;
int i;
- unsigned int *cpu = info;
- BUG_ON (*cpu != smp_processor_id());
-
- preempt_disable();
for (i=0; i<banks; i++) {
rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
wmb();
}
}
- preempt_enable();
}
static void mce_timerfunc (unsigned long data)
{
- unsigned int i;
-
- for (i=0; i<smp_num_cpus; i++) {
- if (i == smp_processor_id())
- mce_checkregs(&i);
- else
- smp_call_function (mce_checkregs, &i, 1, 1);
- }
+ on_each_cpu (mce_checkregs, NULL, 1, 1);
/* Refresh the timer. */
mce_timer.expires = jiffies + MCE_RATE;
void print_all_local_APICs (void)
{
- smp_call_function(print_local_APIC, NULL, 1, 1);
- print_local_APIC(NULL);
+ on_each_cpu(print_local_APIC, NULL, 1, 1);
}
void /*__init*/ print_PIC(void)
pc->size = mincount;
wmb();
if (reload) {
- load_LDT(pc);
#ifdef CONFIG_SMP
preempt_disable();
+ load_LDT(pc);
if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
preempt_enable();
+#else
+ load_LDT(pc);
#endif
}
if (oldsize) {
preempt_enable();
}
-static inline void do_flush_tlb_all_local(void)
+static void do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
leave_mm(cpu);
}
-static void flush_tlb_all_ipi(void* info)
-{
- do_flush_tlb_all_local();
-}
-
void flush_tlb_all(void)
{
- preempt_disable();
- smp_call_function (flush_tlb_all_ipi,0,1,1);
-
- do_flush_tlb_all_local();
- preempt_enable();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
void smp_kdb_stop(void)
static inline void flush_map(unsigned long address)
{
- preempt_disable();
-#ifdef CONFIG_SMP
- smp_call_function(flush_kernel_map, (void *)address, 1, 1);
-#endif
- flush_kernel_map((void *)address);
- preempt_enable();
+ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
}
struct deferred_page {
static void __attribute__((unused)) global_cache_flush(void)
{
- if (smp_call_function(ipi_handler, NULL, 1, 1) != 0)
+ if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
panic(PFX "timed out waiting for the other CPUs!\n");
- flush_agp_cache();
}
#else
static inline void global_cache_flush(void)
do_machine_quiesce(void)
{
cpu_quiesce_map = cpu_online_map;
- smp_call_function(do_load_quiesce_psw, NULL, 0, 0);
- do_load_quiesce_psw(NULL);
+ on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
}
#else
static void
static void invalidate_bh_lrus(void)
{
- preempt_disable();
- invalidate_bh_lru(NULL);
- smp_call_function(invalidate_bh_lru, NULL, 1, 1);
- preempt_enable();
+ on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
}
void set_bh_page(struct buffer_head *bh,
extern void flush_cache_all_local(void);
-#ifdef CONFIG_SMP
static inline void flush_cache_all(void)
{
- smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
- flush_cache_all_local();
+ on_each_cpu((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
}
-#else
-#define flush_cache_all flush_cache_all_local
-#endif
-
/* The following value needs to be tuned and probably scaled with the
* cache size.
#ifdef CONFIG_SMP
+#include <linux/preempt.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
-#include <linux/threads.h>
+#include <linux/thread_info.h>
#include <asm/smp.h>
#include <asm/bug.h>
extern int smp_call_function (void (*func) (void *info), void *info,
int retry, int wait);
+/*
+ * Call a function on all processors
+ */
+static inline int on_each_cpu(void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ int ret = 0;
+
+ preempt_disable();
+ ret = smp_call_function(func, info, retry, wait);
+ func(info);
+ preempt_enable();
+ return ret;
+}
+
/*
* True once the per process idle is forked
*/
#define hard_smp_processor_id() 0
#define smp_threads_ready 1
#define smp_call_function(func,info,retry,wait) ({ 0; })
+#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
static inline void smp_send_reschedule(int cpu) { }
static inline void smp_send_reschedule_all(void) { }
#define cpu_online_map 1
static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
{
check_irq_on();
+ preempt_disable();
+
local_irq_disable();
func(arg);
local_irq_enable();
if (smp_call_function(func, arg, 1, 1))
BUG();
+
+ preempt_enable();
}
static void free_block (kmem_cache_t* cachep, void** objpp, int len);