]> git.hungrycats.org Git - linux/commitdiff
[PATCH] s390: cpu hotplug support
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 6 Jul 2004 16:18:28 +0000 (09:18 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Tue, 6 Jul 2004 16:18:28 +0000 (09:18 -0700)
From: Ursula Braun-Krahl <braunu@de.ibm.com>
From: Heiko Carstens <heiko.carstens@de.ibm.com>
From: Gerald Schaefer <geraldsc@de.ibm.com>
From: Martin Schwidefsky <schwidefsky@de.ibm.com>

Add s390 architecture support for cpu hotplug.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
13 files changed:
arch/s390/Kconfig
arch/s390/appldata/appldata_base.c
arch/s390/appldata/appldata_os.c
arch/s390/defconfig
arch/s390/kernel/process.c
arch/s390/kernel/s390_ksyms.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/mm/cmm.c
drivers/s390/char/sclp.c
drivers/s390/net/iucv.c
include/asm-s390/sigp.h
include/asm-s390/smp.h

index 778c21724bc7dd3d0f2eb1d9de61ddae4173cd6b..5906668177f0b4ed2378765d2dd22f44e436f21f 100644 (file)
@@ -107,6 +107,15 @@ config NR_CPUS
          This is purely to save memory - each supported CPU adds
          approximately sixteen kilobytes to the kernel image.
 
+config HOTPLUG_CPU
+       bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
+       depends on SMP && HOTPLUG && EXPERIMENTAL
+       default n
+       help
+         Say Y here to experiment with turning CPUs off and on.  CPUs
+         can be controlled through /sys/devices/system/cpu/cpu#.
+         Say N if you want to disable CPU hotplug.
+
 config MATHEMU
        bool "IEEE FPU emulation"
        depends on MARCH_G5
index 0f8c08ad974abbe52d6331973b3d35e78ece7c45..d957683d6165aec1f8e9cf8c13375ed33dcfac0e 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/sysctl.h>
 #include <asm/timer.h>
 //#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
 
 #include "appldata.h"
 
@@ -124,10 +126,6 @@ static struct ctl_table appldata_dir_table[] = {
  */
 DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
 static atomic_t appldata_expire_count = ATOMIC_INIT(0);
-static struct appldata_mod_vtimer_args {
-       struct vtimer_list *timer;
-       u64    expires;
-} appldata_mod_vtimer_args;
 
 static spinlock_t appldata_timer_lock = SPIN_LOCK_UNLOCKED;
 static int appldata_interval = APPLDATA_CPU_INTERVAL;
@@ -154,7 +152,7 @@ static LIST_HEAD(appldata_ops_list);
 static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
 {
        P_DEBUG("   -= Timer =-\n");
-       P_DEBUG("CPU: %i, expire: %i\n", smp_processor_id(),
+       P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
                atomic_read(&appldata_expire_count));
        if (atomic_dec_and_test(&appldata_expire_count)) {
                atomic_set(&appldata_expire_count, num_online_cpus());
@@ -187,17 +185,6 @@ static void appldata_tasklet_function(unsigned long data)
        spin_unlock(&appldata_ops_lock);
 }
 
-/*
- * appldata_mod_vtimer_wrap()
- *
- * wrapper function for mod_virt_timer(), because smp_call_function_on()
- * accepts only one parameter.
- */
-static void appldata_mod_vtimer_wrap(void *p) {
-       struct appldata_mod_vtimer_args *args = p;
-       mod_virt_timer(args->timer, args->expires);
-}
-
 /*
  * appldata_diag()
  *
@@ -247,6 +234,79 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
 
 
 /****************************** /proc stuff **********************************/
+
+/*
+ * appldata_mod_vtimer_wrap()
+ *
+ * wrapper function for mod_virt_timer(), because smp_call_function_on()
+ * accepts only one parameter.
+ */
+static void __appldata_mod_vtimer_wrap(void *p) {
+       struct {
+               struct vtimer_list *timer;
+               u64    expires;
+       } *args = p;
+       mod_virt_timer(args->timer, args->expires);
+}
+
+#define APPLDATA_ADD_TIMER     0
+#define APPLDATA_DEL_TIMER     1
+#define APPLDATA_MOD_TIMER     2
+
+/*
+ * __appldata_vtimer_setup()
+ *
+ * Add, delete or modify virtual timers on all online cpus.
+ * The caller needs to get the appldata_timer_lock spinlock.
+ */
+static void
+__appldata_vtimer_setup(int cmd)
+{
+       u64 per_cpu_interval;
+       int i;
+
+       switch (cmd) {
+       case APPLDATA_ADD_TIMER:
+               if (appldata_timer_active)
+                       break;
+               per_cpu_interval = (u64) (appldata_interval*1000 /
+                                         num_online_cpus()) * TOD_MICRO;
+               for_each_online_cpu(i) {
+                       per_cpu(appldata_timer, i).expires = per_cpu_interval;
+                       smp_call_function_on(add_virt_timer_periodic,
+                                            &per_cpu(appldata_timer, i),
+                                            0, 1, i);
+               }
+               appldata_timer_active = 1;
+               P_INFO("Monitoring timer started.\n");
+               break;
+       case APPLDATA_DEL_TIMER:
+               for_each_online_cpu(i)
+                       del_virt_timer(&per_cpu(appldata_timer, i));
+               if (!appldata_timer_active)
+                       break;
+               appldata_timer_active = 0;
+               atomic_set(&appldata_expire_count, num_online_cpus());
+               P_INFO("Monitoring timer stopped.\n");
+               break;
+       case APPLDATA_MOD_TIMER:
+               per_cpu_interval = (u64) (appldata_interval*1000 /
+                                         num_online_cpus()) * TOD_MICRO;
+               if (!appldata_timer_active)
+                       break;
+               for_each_online_cpu(i) {
+                       struct {
+                               struct vtimer_list *timer;
+                               u64    expires;
+                       } args;
+                       args.timer = &per_cpu(appldata_timer, i);
+                       args.expires = per_cpu_interval;
+                       smp_call_function_on(__appldata_mod_vtimer_wrap,
+                                            &args, 0, 1, i);
+               }
+       }
+}
+
 /*
  * appldata_timer_handler()
  *
@@ -256,9 +316,8 @@ static int
 appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
                           void __user *buffer, size_t *lenp)
 {
-       int len, i;
+       int len;
        char buf[2];
-       u64 per_cpu_interval;
 
        if (!*lenp || filp->f_pos) {
                *lenp = 0;
@@ -272,30 +331,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
                        return -EFAULT;
                goto out;
        }
-       per_cpu_interval = (u64) (appldata_interval*1000 /
-                                num_online_cpus()) * TOD_MICRO;
        len = *lenp;
        if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
                return -EFAULT;
        spin_lock(&appldata_timer_lock);
-       per_cpu_interval = (u64) (appldata_interval*1000 /
-                                num_online_cpus()) * TOD_MICRO;
-       if ((buf[0] == '1') && (!appldata_timer_active)) {
-               for (i = 0; i < num_online_cpus(); i++) {
-                       per_cpu(appldata_timer, i).expires = per_cpu_interval;
-                       smp_call_function_on(add_virt_timer_periodic,
-                                               &per_cpu(appldata_timer, i),
-                                               0, 1, i);
-               }
-               appldata_timer_active = 1;
-               P_INFO("Monitoring timer started.\n");
-       } else if ((buf[0] == '0') && (appldata_timer_active)) {
-               for (i = 0; i < num_online_cpus(); i++) {
-                       del_virt_timer(&per_cpu(appldata_timer, i));
-               }
-               appldata_timer_active = 0;
-               P_INFO("Monitoring timer stopped.\n");
-       }
+       if (buf[0] == '1')
+               __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
+       else if (buf[0] == '0')
+               __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
        spin_unlock(&appldata_timer_lock);
 out:
        *lenp = len;
@@ -313,9 +356,8 @@ static int
 appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
                           void __user *buffer, size_t *lenp)
 {
-       int len, i, interval;
+       int len, interval;
        char buf[16];
-       u64 per_cpu_interval;
 
        if (!*lenp || filp->f_pos) {
                *lenp = 0;
@@ -340,20 +382,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
        }
 
        spin_lock(&appldata_timer_lock);
-       per_cpu_interval = (u64) (interval*1000 / num_online_cpus()) * TOD_MICRO;
        appldata_interval = interval;
-       if (appldata_timer_active) {
-               for (i = 0; i < num_online_cpus(); i++) {
-                       appldata_mod_vtimer_args.timer =
-                                       &per_cpu(appldata_timer, i);
-                       appldata_mod_vtimer_args.expires =
-                                       per_cpu_interval;
-                       smp_call_function_on(
-                               appldata_mod_vtimer_wrap,
-                               &appldata_mod_vtimer_args,
-                               0, 1, i);
-               }
-       }
+       __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
        spin_unlock(&appldata_timer_lock);
 
        P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
@@ -564,6 +594,56 @@ void appldata_unregister_ops(struct appldata_ops *ops)
 
 
 /******************************* init / exit *********************************/
+
+static void
+appldata_online_cpu(int cpu)
+{
+       init_virt_timer(&per_cpu(appldata_timer, cpu));
+       per_cpu(appldata_timer, cpu).function = appldata_timer_function;
+       per_cpu(appldata_timer, cpu).data = (unsigned long)
+               &appldata_tasklet_struct;
+       atomic_inc(&appldata_expire_count);
+       spin_lock(&appldata_timer_lock);
+       __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
+       spin_unlock(&appldata_timer_lock);
+}
+
+static void
+appldata_offline_cpu(int cpu)
+{
+       del_virt_timer(&per_cpu(appldata_timer, cpu));
+       if (atomic_dec_and_test(&appldata_expire_count)) {
+               atomic_set(&appldata_expire_count, num_online_cpus());
+               tasklet_schedule(&appldata_tasklet_struct);
+       }
+       spin_lock(&appldata_timer_lock);
+       __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
+       spin_unlock(&appldata_timer_lock);
+}
+
+static int
+appldata_cpu_notify(struct notifier_block *self,
+                   unsigned long action, void *hcpu)
+{
+       switch (action) {
+       case CPU_ONLINE:
+               appldata_online_cpu((long) hcpu);
+               break;
+#ifdef CONFIG_HOTPLUG_CPU
+       case CPU_DEAD:
+               appldata_offline_cpu((long) hcpu);
+               break;
+#endif
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __devinitdata appldata_nb = {
+       .notifier_call = appldata_cpu_notify,
+};
+
 /*
  * appldata_init()
  *
@@ -576,13 +656,11 @@ static int __init appldata_init(void)
        P_DEBUG("sizeof(parameter_list) = %lu\n",
                sizeof(struct appldata_parameter_list));
 
-       for (i = 0; i < num_online_cpus(); i++) {
-               init_virt_timer(&per_cpu(appldata_timer, i));
-               per_cpu(appldata_timer, i).function = appldata_timer_function;
-               per_cpu(appldata_timer, i).data = (unsigned long)
-                                               &appldata_tasklet_struct;
-       }
-       atomic_set(&appldata_expire_count, num_online_cpus());
+       for_each_online_cpu(i)
+               appldata_online_cpu(i);
+
+       /* Register cpu hotplug notifier */
+       register_cpu_notifier(&appldata_nb);
 
        appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
 #ifdef MODULE
@@ -623,9 +701,9 @@ static void __exit appldata_exit(void)
        }
        spin_unlock_bh(&appldata_ops_lock);
 
-       for (i = 0; i < num_online_cpus(); i++) {
-               del_virt_timer(&per_cpu(appldata_timer, i));
-       }
+       for_each_online_cpu(i)
+               appldata_offline_cpu(i);
+
        appldata_timer_active = 0;
 
        unregister_sysctl_table(appldata_sysctl_header);
index 58ad2cf48eeec26c7b7b4871e953d2cf694f644a..b83f074845514ad409f8c5da3e57cf3a2c2b8459 100644 (file)
@@ -98,8 +98,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
                LOAD_INT(a2), LOAD_FRAC(a2));
 
        P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
-       for (i = 0; i < NR_CPUS; i++) {
-               if (!cpu_online(i)) continue;
+       for (i = 0; i < os_data->nr_cpus; i++) {
                P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
                        "idle = %u, irq = %u, softirq = %u, iowait = %u\n",
                                i,
@@ -124,7 +123,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
  */
 static void appldata_get_os_data(void *data)
 {
-       int i;
+       int i, j;
        struct appldata_os_data *os_data;
 
        os_data = data;
@@ -139,21 +138,23 @@ static void appldata_get_os_data(void *data)
        os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
        os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
 
-       for (i = 0; i < num_online_cpus(); i++) {
-               os_data->os_cpu[i].per_cpu_user =
+       j = 0;
+       for_each_online_cpu(i) {
+               os_data->os_cpu[j].per_cpu_user =
                                        kstat_cpu(i).cpustat.user;
-               os_data->os_cpu[i].per_cpu_nice =
+               os_data->os_cpu[j].per_cpu_nice =
                                        kstat_cpu(i).cpustat.nice;
-               os_data->os_cpu[i].per_cpu_system =
+               os_data->os_cpu[j].per_cpu_system =
                                        kstat_cpu(i).cpustat.system;
-               os_data->os_cpu[i].per_cpu_idle =
+               os_data->os_cpu[j].per_cpu_idle =
                                        kstat_cpu(i).cpustat.idle;
-               os_data->os_cpu[i].per_cpu_irq =
+               os_data->os_cpu[j].per_cpu_irq =
                                        kstat_cpu(i).cpustat.irq;
-               os_data->os_cpu[i].per_cpu_softirq =
+               os_data->os_cpu[j].per_cpu_softirq =
                                        kstat_cpu(i).cpustat.softirq;
-               os_data->os_cpu[i].per_cpu_iowait =
+               os_data->os_cpu[j].per_cpu_iowait =
                                        kstat_cpu(i).cpustat.iowait;
+               j++;
        }
 
        os_data->timestamp = get_clock();
index 3b8924c05cf74bffa78c7ddcccee25fb6381c0b1..6be5383f8a57fd66949fef97afc9ef09ddc3b964 100644 (file)
@@ -46,7 +46,6 @@ CONFIG_MODULES=y
 CONFIG_OBSOLETE_MODPARM=y
 # CONFIG_MODVERSIONS is not set
 CONFIG_KMOD=y
-CONFIG_STOP_MACHINE=y
 
 #
 # Base setup
@@ -63,6 +62,7 @@ CONFIG_MARCH_G5=y
 # CONFIG_MARCH_Z990 is not set
 CONFIG_SMP=y
 CONFIG_NR_CPUS=32
+# CONFIG_HOTPLUG_CPU is not set
 CONFIG_MATHEMU=y
 
 #
@@ -510,6 +510,7 @@ CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_AES is not set
 # CONFIG_CRYPTO_CAST5 is not set
 # CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
 # CONFIG_CRYPTO_ARC4 is not set
 # CONFIG_CRYPTO_DEFLATE is not set
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
index 4f4d32056f31e6b08f9659ea840c2831d5f06ed0..73cb6baeb5259c1a39277273099cc709a61a373e 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/config.h>
 #include <linux/compiler.h>
+#include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -122,6 +123,11 @@ void default_idle(void)
        /* enable monitor call class 0 */
        __ctl_set_bit(8, 15);
 
+#ifdef CONFIG_HOTPLUG_CPU
+       if (cpu_is_offline(smp_processor_id()))
+               cpu_die();
+#endif
+
        /* 
         * Wait for external, I/O or machine check interrupt and
         * switch off machine check bit after the wait has ended.
index 38517aaf3c29c6530911185fbe44e6929cb7e354..43afd32f82028727b91a520162ecbc25eb502a64 100644 (file)
@@ -50,17 +50,6 @@ EXPORT_SYMBOL(overflowuid);
 EXPORT_SYMBOL(overflowgid);
 EXPORT_SYMBOL(empty_zero_page);
 
-/*
- * virtual CPU timer
- */
-#ifdef CONFIG_VIRT_TIMER
-EXPORT_SYMBOL(init_virt_timer);
-EXPORT_SYMBOL(add_virt_timer);
-EXPORT_SYMBOL(add_virt_timer_periodic);
-EXPORT_SYMBOL(mod_virt_timer);
-EXPORT_SYMBOL(del_virt_timer);
-#endif
-
 /*
  * misc.
  */
index f75cc79ae099ace96a32da8cfd0137840afb70cb..a50891ce00725c6bc30bc3a85caaa9b3e096f678 100644 (file)
@@ -58,8 +58,6 @@ struct {
 } memory_chunk[MEMORY_CHUNKS] = { { 0 } };
 #define CHUNK_READ_WRITE 0
 #define CHUNK_READ_ONLY 1
-int cpus_initialized = 0;
-static cpumask_t cpu_initialized;
 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
 
 /*
@@ -85,15 +83,8 @@ static struct resource data_resource = { "Kernel data", 0, 0 };
  */
 void __devinit cpu_init (void)
 {
-        int nr = smp_processor_id();
         int addr = hard_smp_processor_id();
 
-        if (cpu_test_and_set(nr,cpu_initialized)) {
-                printk("CPU#%d ALREADY INITIALIZED!!!!!!!!!\n", nr);
-                for (;;) local_irq_enable();
-        }
-        cpus_initialized++;
-
         /*
          * Store processor id in lowcore (used e.g. in timer_interrupt)
          */
index bf81dd8266446f0fa97caf7f57bcbcbef74de1dc..c261941f34455f5a0faf8e0f9d5d2c41d99acc79 100644 (file)
@@ -5,6 +5,7 @@
  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *               Heiko Carstens (heiko.carstens@de.ibm.com)
  *
  *  based on other smp stuff by 
  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
@@ -57,6 +58,8 @@ cpumask_t cpu_online_map;
 cpumask_t cpu_possible_map;
 unsigned long    cache_decay_ticks = 0;
 
+static struct task_struct *current_set[NR_CPUS];
+
 EXPORT_SYMBOL(cpu_online_map);
 
 /*
@@ -124,7 +127,6 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
        struct call_data_struct data;
        int cpus = num_online_cpus()-1;
 
-       /* FIXME: get cpu lock -hc */
        if (cpus <= 0)
                return 0;
 
@@ -211,7 +213,6 @@ EXPORT_SYMBOL(smp_call_function_on);
 
 static inline void do_send_stop(void)
 {
-        unsigned long dummy;
         int i, rc;
 
         /* stop all processors */
@@ -219,25 +220,23 @@ static inline void do_send_stop(void)
                 if (!cpu_online(i) || smp_processor_id() == i)
                        continue;
                do {
-                       rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
+                       rc = signal_processor(i, sigp_stop);
                } while (rc == sigp_busy);
        }
 }
 
 static inline void do_store_status(void)
 {
-        unsigned long low_core_addr;
-        unsigned long dummy;
         int i, rc;
 
         /* store status of all processors in their lowcores (real 0) */
         for (i =  0; i < NR_CPUS; i++) {
                 if (!cpu_online(i) || smp_processor_id() == i) 
                        continue;
-               low_core_addr = (unsigned long) lowcore_ptr[i];
                do {
-                       rc = signal_processor_ps(&dummy, low_core_addr, i,
-                                                sigp_store_status_at_address);
+                       rc = signal_processor_p(
+                               (__u32)(unsigned long) lowcore_ptr[i], i,
+                               sigp_store_status_at_address);
                } while(rc == sigp_busy);
         }
 }
@@ -265,8 +264,10 @@ static cpumask_t cpu_restart_map;
 
 static void do_machine_restart(void * __unused)
 {
+       static atomic_t cpuid = ATOMIC_INIT(-1);
+
        cpu_clear(smp_processor_id(), cpu_restart_map);
-       if (smp_processor_id() == 0) {
+       if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
                /* Wait for all other cpus to enter do_machine_restart. */
                while (!cpus_empty(cpu_restart_map))
                        cpu_relax();
@@ -307,7 +308,9 @@ static void do_wait_for_stop(void)
 
 static void do_machine_halt(void * __unused)
 {
-       if (smp_processor_id() == 0) {
+       static atomic_t cpuid = ATOMIC_INIT(-1);
+
+       if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
                smp_send_stop();
                if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
                        cpcmd(vmhalt_cmd, NULL, 0);
@@ -324,7 +327,9 @@ void machine_halt_smp(void)
 
 static void do_machine_power_off(void * __unused)
 {
-       if (smp_processor_id() == 0) {
+       static atomic_t cpuid = ATOMIC_INIT(-1);
+
+       if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
                smp_send_stop();
                if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
                        cpcmd(vmpoff_cmd, NULL, 0);
@@ -482,7 +487,24 @@ void smp_ctl_clear_bit(int cr, int bit) {
  * Lets check how many CPUs we have.
  */
 
-void __init smp_check_cpus(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+
+void
+__init smp_check_cpus(unsigned int max_cpus)
+{
+       int cpu;
+
+       /*
+        * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
+        */
+       for (cpu = 1; cpu < max_cpus; cpu++)
+               cpu_set(cpu, cpu_possible_map);
+}
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+void
+__init smp_check_cpus(unsigned int max_cpus)
 {
         int curr_cpu, num_cpus;
        __u16 boot_cpu_addr;
@@ -505,6 +527,8 @@ void __init smp_check_cpus(unsigned int max_cpus)
         printk("Boot cpu address %2X\n", boot_cpu_addr);
 }
 
+#endif /* CONFIG_HOTPLUG_CPU */
+
 /*
  *      Activate a secondary processor.
  */
@@ -536,26 +560,95 @@ int __devinit start_secondary(void *cpuvoid)
         return cpu_idle(NULL);
 }
 
-static struct task_struct *__devinit fork_by_hand(void)
+static void __init smp_create_idle(unsigned int cpu)
 {
-       struct pt_regs regs;
-       /* don't care about the psw and regs settings since we'll never
-          reschedule the forked task. */
-       memset(&regs,0,sizeof(struct pt_regs));
-       return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
+       struct pt_regs regs;
+       struct task_struct *p;
+
+       /*
+        *  don't care about the psw and regs settings since we'll never
+        *  reschedule the forked task.
+        */
+       memset(&regs, 0, sizeof(struct pt_regs));
+       p = copy_process(CLONE_VM | CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
+       if (IS_ERR(p))
+               panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+
+       wake_up_forked_process(p);
+       init_idle(p, cpu);
+       unhash_process(p);
+       current_set[cpu] = p;
 }
 
-int __cpu_up(unsigned int cpu)
+/* Reserving and releasing of CPUs */
+
+static atomic_t smp_cpu_reserved[NR_CPUS];
+
+int
+smp_get_cpu(cpumask_t cpu_mask)
+{
+       int val, cpu;
+
+       /* Try to find an already reserved cpu. */
+       for_each_cpu_mask(cpu, cpu_mask) {
+               while ((val = atomic_read(&smp_cpu_reserved[cpu])) != 0) {
+                       if (!atomic_compare_and_swap(val, val + 1,
+                                                    &smp_cpu_reserved[cpu]))
+                               /* Found one. */
+                               goto out;
+               }
+       }
+       /* Reserve a new cpu from cpu_mask. */
+       for_each_cpu_mask(cpu, cpu_mask) {
+               atomic_inc(&smp_cpu_reserved[cpu]);
+               if (cpu_online(cpu))
+                       goto out;
+               atomic_dec(&smp_cpu_reserved[cpu]);
+       }
+       cpu = -ENODEV;
+out:
+       return cpu;
+}
+
+void
+smp_put_cpu(int cpu)
+{
+       atomic_dec(&smp_cpu_reserved[cpu]);
+}
+
+static inline int
+cpu_stopped(int cpu)
 {
-        struct task_struct *idle;
+       __u32 status;
+
+       /* Check for stopped state */
+       if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
+               if (status & 0x40)
+                       return 1;
+       }
+       return 0;
+}
+
+/* Upping and downing of CPUs */
+
+int
+__cpu_up(unsigned int cpu)
+{
+       struct task_struct *idle;
         struct _lowcore    *cpu_lowcore;
         sigp_ccode          ccode;
+       int                 curr_cpu;
 
-       /*
-        *  Set prefix page for new cpu
-        */
+       for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
+               __cpu_logical_map[cpu] = (__u16) curr_cpu;
+               if (cpu_stopped(cpu))
+                       break;
+       }
+
+       if (!cpu_stopped(cpu))
+               return -ENODEV;
 
-       ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]),
+       ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
                                   cpu, sigp_set_prefix);
        if (ccode){
                printk("sigp_set_prefix failed for cpu %d "
@@ -564,23 +657,7 @@ int __cpu_up(unsigned int cpu)
                return -EIO;
        }
 
-        /* We can't use kernel_thread since we must _avoid_ to reschedule
-           the child. */
-        idle = fork_by_hand();
-       if (IS_ERR(idle)){
-                printk("failed fork for CPU %d", cpu);
-               return -EIO;
-       }
-       wake_up_forked_process(idle);
-
-        /*
-         * We remove it from the pidhash and the runqueue
-         * once we got the process:
-         */
-       init_idle(idle, cpu);
-
-        unhash_process(idle);
-
+       idle = current_set[cpu];
         cpu_lowcore = lowcore_ptr[cpu];
        cpu_lowcore->save_area[15] = idle->thread.ksp;
        cpu_lowcore->kernel_stack = (unsigned long)
@@ -599,6 +676,65 @@ int __cpu_up(unsigned int cpu)
        return 0;
 }
 
+int
+__cpu_disable(void)
+{
+       unsigned long flags;
+       ec_creg_mask_parms cr_parms;
+
+       local_irq_save(flags);
+
+       if (atomic_read(&smp_cpu_reserved[smp_processor_id()])) {
+               local_irq_restore(flags);
+               return -EBUSY;
+       }
+
+       /* disable all external interrupts */
+
+       cr_parms.start_ctl = 0;
+       cr_parms.end_ctl = 0;
+       cr_parms.orvals[0] = 0;
+       cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
+                               1<<11 | 1<<10 | 1<< 6 | 1<< 4);
+       smp_ctl_bit_callback(&cr_parms);
+
+       /* disable all I/O interrupts */
+
+       cr_parms.start_ctl = 6;
+       cr_parms.end_ctl = 6;
+       cr_parms.orvals[6] = 0;
+       cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
+                               1<<27 | 1<<26 | 1<<25 | 1<<24);
+       smp_ctl_bit_callback(&cr_parms);
+
+       /* disable most machine checks */
+
+       cr_parms.start_ctl = 14;
+       cr_parms.end_ctl = 14;
+       cr_parms.orvals[14] = 0;
+       cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+       smp_ctl_bit_callback(&cr_parms);
+
+       local_irq_restore(flags);
+       return 0;
+}
+
+void
+__cpu_die(unsigned int cpu)
+{
+       /* Wait until target cpu is down */
+       while (!cpu_stopped(cpu));
+       printk("Processor %d spun down\n", cpu);
+}
+
+void
+cpu_die(void)
+{
+       signal_processor(smp_processor_id(), sigp_stop);
+       BUG();
+       for(;;);
+}
+
 /*
  *     Cycle through the processors and setup structures.
  */
@@ -606,6 +742,7 @@ int __cpu_up(unsigned int cpu)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        unsigned long async_stack;
+       unsigned int cpu;
         int i;
 
         /* request the 0x1202 external interrupt */
@@ -632,13 +769,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
        }
        set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
+
+       for_each_cpu(cpu)
+               if (cpu != smp_processor_id())
+                       smp_create_idle(cpu);
 }
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-       cpu_set(smp_processor_id(), cpu_online_map);
-       cpu_set(smp_processor_id(), cpu_possible_map);
-       S390_lowcore.percpu_offset = __per_cpu_offset[smp_processor_id()];
+       BUG_ON(smp_processor_id() != 0);
+
+       cpu_set(0, cpu_online_map);
+       cpu_set(0, cpu_possible_map);
+       S390_lowcore.percpu_offset = __per_cpu_offset[0];
+       current_set[0] = current;
 }
 
 void smp_cpus_done(unsigned int max_cpus)
@@ -679,3 +823,6 @@ EXPORT_SYMBOL(lowcore_ptr);
 EXPORT_SYMBOL(smp_ctl_set_bit);
 EXPORT_SYMBOL(smp_ctl_clear_bit);
 EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_get_cpu);
+EXPORT_SYMBOL(smp_put_cpu);
+
index 095a5648c1cfc5a4bbd78f8c650640df1bb4f465..72d4f1e6ff7b70d14bd31d2c60056129243e77b5 100644 (file)
@@ -407,6 +407,14 @@ struct ctl_table_header *cmm_sysctl_header;
 static int
 cmm_init (void)
 {
+       int rc;
+
+       /* Prevent logical cpu 0 from being set offline. */
+       rc = smp_get_cpu(cpumask_of_cpu(0));
+       if (rc) {
+               printk(KERN_ERR "CMM: unable to reserve cpu 0\n");
+               return rc;
+       }
 #ifdef CONFIG_CMM_PROC
        cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
 #endif
@@ -430,6 +438,8 @@ cmm_exit(void)
 #ifdef CONFIG_CMM_IUCV
        smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
 #endif
+       /* Allow logical cpu 0 to be set offline again. */
+       smp_put_cpu(0);
 }
 
 module_init(cmm_init);
index 2efa4daa17b68d19987500d83c06be23f610e103..047d305ccbab6606db2b50177b0d601bcbd2b934 100644 (file)
@@ -494,11 +494,12 @@ static struct sclp_register sclp_state_change_event = {
 static void
 do_load_quiesce_psw(void * __unused)
 {
+       static atomic_t cpuid = ATOMIC_INIT(-1);
        psw_t quiesce_psw;
-       unsigned long status;
+       __u32 status;
        int i;
 
-       if (smp_processor_id() != 0)
+       if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
                signal_processor(smp_processor_id(), sigp_stop);
        /* Wait for all other cpus to enter stopped state */
        i = 1;
@@ -511,7 +512,7 @@ do_load_quiesce_psw(void * __unused)
                case sigp_order_code_accepted:
                case sigp_status_stored:
                        /* Check for stopped and check stop state */
-                       if (test_bit(6, &status) || test_bit(4, &status))
+                       if (status & 0x50)
                                i++;
                        break;
                case sigp_busy:
index 36790af32b760e3d5ba0ced22d6ae9aa361e6a2d..f1ddb496c4b07e0468769b42546be7eb2b8ff06c 100644 (file)
@@ -1,5 +1,5 @@
 /* 
- * $Id: iucv.c,v 1.33 2004/05/24 10:19:18 braunu Exp $
+ * $Id: iucv.c,v 1.34 2004/06/24 10:53:48 braunu Exp $
  *
  * IUCV network driver
  *
@@ -29,7 +29,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
- * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.33 $
+ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.34 $
  *
  */
 \f
@@ -177,9 +177,11 @@ static handler **iucv_pathid_table;
 static unsigned long max_connections;
 
 /**
- * declare_flag: is 0 when iucv_declare_buffer has not been called
+ * iucv_cpuid: contains the logical cpu number of the cpu which
+ * has declared the iucv buffer by issuing DECLARE_BUFFER.
+ * If no cpu has done the initialization iucv_cpuid contains -1.
  */
-static int declare_flag;
+static int iucv_cpuid = -1;
 /**
  * register_flag: is 0 when external interrupt has not been registered
  */
@@ -352,7 +354,7 @@ do { \
 static void
 iucv_banner(void)
 {
-       char vbuf[] = "$Revision: 1.33 $";
+       char vbuf[] = "$Revision: 1.34 $";
        char *version = vbuf;
 
        if ((version = strchr(version, ':'))) {
@@ -631,16 +633,16 @@ iucv_remove_pathid(__u16 pathid)
 }
 
 /**
- * iucv_declare_buffer_cpu0
- * Register at VM for subsequent IUCV operations. This is always
- * executed on CPU 0. Called from iucv_declare_buffer().
+ * iucv_declare_buffer_cpuid
+ * Register at VM for subsequent IUCV operations. This is executed
+ * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
  */
 static void
-iucv_declare_buffer_cpu0 (void *result)
+iucv_declare_buffer_cpuid (void *result)
 {
        iparml_db *parm;
 
-       if (!(result && (smp_processor_id() == 0)))
+       if (smp_processor_id() != iucv_cpuid)
                return;
        parm = (iparml_db *)grab_param();
        parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
@@ -650,16 +652,17 @@ iucv_declare_buffer_cpu0 (void *result)
 }
 
 /**
- * iucv_retrieve_buffer_cpu0:
- * Unregister IUCV usage at VM. This is always executed on CPU 0.
+ * iucv_retrieve_buffer_cpuid:
+ * Unregister IUCV usage at VM. This is always executed on the same
+ * cpu that registered the buffer to VM.
  * Called from iucv_retrieve_buffer().
  */
 static void
-iucv_retrieve_buffer_cpu0 (void *result)
+iucv_retrieve_buffer_cpuid (void *cpu)
 {
        iparml_control *parm;
 
-       if (smp_processor_id() != 0)
+       if (smp_processor_id() != iucv_cpuid)
                return;
        parm = (iparml_control *)grab_param();
        b2f0(RETRIEVE_BUFFER, parm);
@@ -676,18 +679,22 @@ iucv_retrieve_buffer_cpu0 (void *result)
 static int
 iucv_declare_buffer (void)
 {
-       ulong b2f0_result = 0x0deadbeef;
+       unsigned long flags;
+       ulong b2f0_result;
 
        iucv_debug(1, "entering");
-       preempt_disable();
-       if (smp_processor_id() == 0)
-               iucv_declare_buffer_cpu0(&b2f0_result);
-       else
-               smp_call_function(iucv_declare_buffer_cpu0, &b2f0_result, 0, 1);
-       preempt_enable();
-       iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
-       if (b2f0_result == 0x0deadbeef)
-           b2f0_result = 0xaa;
+       spin_lock_irqsave (&iucv_lock, flags);
+       if (iucv_cpuid == -1) {
+               /* Reserve any cpu for use by iucv. */
+               iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
+               spin_unlock_irqrestore (&iucv_lock, flags);
+               smp_call_function(iucv_declare_buffer_cpuid,
+                                 &b2f0_result, 0, 1);
+               iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
+       } else {
+               spin_unlock_irqrestore (&iucv_lock, flags);
+               b2f0_result = 0;
+       }
        iucv_debug(1, "exiting");
        return b2f0_result;
 }
@@ -702,14 +709,11 @@ static int
 iucv_retrieve_buffer (void)
 {
        iucv_debug(1, "entering");
-       if (declare_flag) {
-               preempt_disable();
-               if (smp_processor_id() == 0)
-                       iucv_retrieve_buffer_cpu0(0);
-               else
-                       smp_call_function(iucv_retrieve_buffer_cpu0, 0, 0, 1);
-               declare_flag = 0;
-               preempt_enable();
+       if (iucv_cpuid != -1) {
+               smp_call_function(iucv_retrieve_buffer_cpuid, 0, 0, 1);
+               /* Release the cpu reserved by iucv_declare_buffer. */
+               smp_put_cpu(iucv_cpuid);
+               iucv_cpuid = -1;
        }
        iucv_debug(1, "exiting");
        return 0;
@@ -862,38 +866,31 @@ iucv_register_program (__u8 pgmname[16],
                return NULL;
        }
 
-       if (declare_flag == 0) {
-               rc = iucv_declare_buffer();
-               if (rc) {
-                       char *err = "Unknown";
-                       iucv_remove_handler(new_handler);
-                       kfree(new_handler);
-                       switch(rc) {
-                               case 0x03:
-                                       err = "Directory error";
-                                       break;
-                               case 0x0a:
-                                       err = "Invalid length";
-                                       break;
-                               case 0x13:
-                                       err = "Buffer already exists";
-                                       break;
-                               case 0x3e:
-                                       err = "Buffer overlap";
-                                       break;
-                               case 0x5c:
-                                       err = "Paging or storage error";
-                                       break;
-                               case 0xaa:
-                                       err = "Function not called";
-                                       break;
-                       }
-                       printk(KERN_WARNING "%s: iucv_declare_buffer "
-                              "returned error 0x%02lx (%s)\n", __FUNCTION__, rc,
-                              err);
-                       return NULL;
+       rc = iucv_declare_buffer();
+       if (rc) {
+               char *err = "Unknown";
+               iucv_remove_handler(new_handler);
+               kfree(new_handler);
+               switch(rc) {
+               case 0x03:
+                       err = "Directory error";
+                       break;
+               case 0x0a:
+                       err = "Invalid length";
+                       break;
+               case 0x13:
+                       err = "Buffer already exists";
+                       break;
+               case 0x3e:
+                       err = "Buffer overlap";
+                       break;
+               case 0x5c:
+                       err = "Paging or storage error";
+                       break;
                }
-               declare_flag = 1;
+               printk(KERN_WARNING "%s: iucv_declare_buffer "
+                      "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
+               return NULL;
        }
        if (register_flag == 0) {
                /* request the 0x4000 external interrupt */
@@ -2190,11 +2187,11 @@ iucv_send2way_prmmsg_array (__u16 pathid,
 }
 
 void
-iucv_setmask_cpu0 (void *result)
+iucv_setmask_cpuid (void *result)
 {
         iparml_set_mask *parm;
 
-        if (smp_processor_id() != 0)
+        if (smp_processor_id() != iucv_cpuid)
                 return;
 
         iucv_debug(1, "entering");
@@ -2228,14 +2225,15 @@ iucv_setmask (int SetMaskFlag)
                ulong result;
                __u8  param;
        } u;
+       int cpu;
 
        u.param = SetMaskFlag;
-       preempt_disable();
-       if (smp_processor_id() == 0)
-               iucv_setmask_cpu0(&u);
+       cpu = get_cpu();
+       if (cpu == iucv_cpuid)
+               iucv_setmask_cpuid(&u);
        else
-               smp_call_function(iucv_setmask_cpu0, &u, 0, 1);
-       preempt_enable();
+               smp_call_function(iucv_setmask_cpuid, &u, 0, 1);
+       put_cpu();
 
        return u.result;
 }
index d5583c78dc9b1858b7822d6c67aa64ac74983f3e..3979bc3858e2a4be93d6b5b6537ee7b2381c6866 100644 (file)
@@ -5,6 +5,7 @@
  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *               Heiko Carstens (heiko.carstens@de.ibm.com)
  *
  *  sigp.h by D.J. Barrow (c) IBM 1999
  *  contains routines / structures for signalling other S/390 processors in an
@@ -72,17 +73,10 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
        sigp_ccode ccode;
 
        __asm__ __volatile__(
-#ifndef __s390x__
                "    sr     1,1\n"        /* parameter=0 in gpr 1 */
                "    sigp   1,%1,0(%2)\n"
                "    ipm    %0\n"
                "    srl    %0,28\n"
-#else /* __s390x__ */
-               "    sgr    1,1\n"        /* parameter=0 in gpr 1 */
-               "    sigp   1,%1,0(%2)\n"
-               "    ipm    %0\n"
-               "    srl    %0,28"
-#endif /* __s390x__ */
                : "=d" (ccode)
                : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
                : "cc" , "memory", "1" );
@@ -93,23 +87,16 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
  * Signal processor with parameter
  */
 extern __inline__ sigp_ccode
-signal_processor_p(unsigned long parameter,__u16 cpu_addr,
+signal_processor_p(__u32 parameter, __u16 cpu_addr,
                   sigp_order_code order_code)
 {
        sigp_ccode ccode;
        
        __asm__ __volatile__(
-#ifndef __s390x__
                "    lr     1,%1\n"       /* parameter in gpr 1 */
                "    sigp   1,%2,0(%3)\n"
                "    ipm    %0\n"
                "    srl    %0,28\n"
-#else /* __s390x__ */
-               "    lgr    1,%1\n"       /* parameter in gpr 1 */
-               "    sigp   1,%2,0(%3)\n"
-               "    ipm    %0\n"
-               "    srl    %0,28\n"
-#endif /* __s390x__ */
                : "=d" (ccode)
                : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
                   "a" (order_code)
@@ -121,27 +108,18 @@ signal_processor_p(unsigned long parameter,__u16 cpu_addr,
  * Signal processor with parameter and return status
  */
 extern __inline__ sigp_ccode
-signal_processor_ps(unsigned long *statusptr, unsigned long parameter,
+signal_processor_ps(__u32 *statusptr, __u32 parameter,
                    __u16 cpu_addr, sigp_order_code order_code)
 {
        sigp_ccode ccode;
        
        __asm__ __volatile__(
-#ifndef __s390x__
-               "    sr     2,2\n"        /* clear status so it doesn't contain rubbish if not saved. */
+               "    sr     2,2\n"        /* clear status */
                "    lr     3,%2\n"       /* parameter in gpr 3 */
                "    sigp   2,%3,0(%4)\n"
                "    st     2,%1\n"
                "    ipm    %0\n"
                "    srl    %0,28\n"
-#else /* __s390x__ */
-               "    sgr    2,2\n"        /* clear status so it doesn't contain rubbish if not saved. */
-               "    lgr    3,%2\n"       /* parameter in gpr 3 */
-               "    sigp   2,%3,0(%4)\n"
-               "    stg    2,%1\n"
-               "    ipm    %0\n"
-               "    srl    %0,28\n"
-#endif /* __s390x__ */
                : "=d" (ccode), "=m" (*statusptr)
                : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
                   "a" (order_code)
@@ -151,5 +129,3 @@ signal_processor_ps(unsigned long *statusptr, unsigned long parameter,
 }
 
 #endif /* __SIGP__ */
-
-
index 70d06261bb9e40bad53ae7a6635a815fcde2ec98..0590eccb051ab04c3657b6ed8fd3decb15c56f55 100644 (file)
@@ -5,6 +5,7 @@
  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *               Heiko Carstens (heiko.carstens@de.ibm.com)
  */
 #ifndef __ASM_SMP_H
 #define __ASM_SMP_H
@@ -47,6 +48,9 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
 
 #define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
 
+extern int smp_get_cpu(cpumask_t cpu_map);
+extern void smp_put_cpu(int cpu);
+
 extern __inline__ __u16 hard_smp_processor_id(void)
 {
         __u16 cpu_address;
@@ -57,10 +61,17 @@ extern __inline__ __u16 hard_smp_processor_id(void)
 
 #define cpu_logical_map(cpu) (cpu)
 
+extern int __cpu_disable (void);
+extern void __cpu_die (unsigned int cpu);
+extern void cpu_die (void) __attribute__ ((noreturn));
+extern int __cpu_up (unsigned int cpu);
+
 #endif
 
 #ifndef CONFIG_SMP
 #define smp_call_function_on(func,info,nonatomic,wait,cpu)      ({ 0; })
+#define smp_get_cpu(cpu) ({ 0; })
+#define smp_put_cpu(cpu) ({ 0; })
 #endif
 
 #endif