jiffies_to_clock_t(iowait),
jiffies_to_clock_t(irq),
jiffies_to_clock_t(softirq));
- for_each_online_cpu(i) {
+ for_each_cpu(i) {
seq_printf(p, "cpu%d %u %u %u %u %u %u %u\n",
i,
jiffies_to_clock_t(kstat_cpu(i).cpustat.user),
static int stat_open(struct inode *inode, struct file *file)
{
- unsigned size = 4096 * (1 + num_online_cpus() / 32);
+ unsigned size = 4096 * (1 + num_possible_cpus() / 32);
char *buf;
struct seq_file *m;
int res;
#include <linux/sysdev.h>
#include <linux/node.h>
+#include <linux/compiler.h>
+#include <linux/cpumask.h>
#include <asm/semaphore.h>
struct cpu {
extern struct semaphore cpucontrol;
#define lock_cpu_hotplug() down(&cpucontrol)
#define unlock_cpu_hotplug() up(&cpucontrol)
+#define lock_cpu_hotplug_interruptible() down_interruptible(&cpucontrol)
+#define hotcpu_notifier(fn, pri) { \
+ static struct notifier_block fn##_nb = { fn, pri }; \
+ register_cpu_notifier(&fn##_nb); \
+}
+#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#else
#define lock_cpu_hotplug() do { } while (0)
#define unlock_cpu_hotplug() do { } while (0)
+#define lock_cpu_hotplug_interruptible() 0
+#define hotcpu_notifier(fn, pri)
+
+/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
+#define cpu_is_offline(cpu) 0
#endif
#endif /* _LINUX_CPU_H_ */
extern cpumask_t cpu_possible_map;
#define num_online_cpus() cpus_weight(cpu_online_map)
+#define num_possible_cpus() cpus_weight(cpu_possible_map)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map)
#define for_each_online_cpu(cpu) for_each_cpu_mask(cpu, cpu_online_map)
#else
#define cpu_online_map cpumask_of_cpu(0)
+#define cpu_possible_map cpumask_of_cpu(0)
#define num_online_cpus() 1
+#define num_possible_cpus() 1
#define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; })
#define cpu_possible(cpu) ({ BUG_ON((cpu) != 0); 1; })
log_level_unknown = 1;
}
- if (!cpu_online(smp_processor_id())) {
+ if (!cpu_online(smp_processor_id()) && !system_running) {
/*
* Some console drivers may assume that per-cpu resources have
* been allocated. So don't allow them to be called by this
goto out_unlock;
retval = 0;
- cpus_and(mask, p->cpus_allowed, cpu_online_map);
+ cpus_and(mask, p->cpus_allowed, cpu_possible_map);
out_unlock:
read_unlock(&tasklist_lock);