p = buf;
- if ((num_online_cpus() == 1) &&
+ if ((num_possible_cpus() == 1) &&
!(error = apm_get_power_status(&bx, &cx, &dx))) {
ac_line_status = (bx >> 8) & 0xff;
battery_status = bx & 0xff;
}
}
- if (debug && (num_online_cpus() == 1)) {
+ if (debug && (num_possible_cpus() == 1)) {
error = apm_get_power_status(&bx, &cx, &dx);
if (error)
printk(KERN_INFO "apm: power status not available\n");
pm_power_off = apm_power_off;
register_sysrq_key('o', &sysrq_poweroff_op);
- if (num_online_cpus() == 1) {
+ if (num_possible_cpus() == 1) {
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
console_blank_hook = apm_console_blank;
#endif
printk(KERN_NOTICE "apm: disabled on user request.\n");
return -ENODEV;
}
- /* FIXME: When boot code changes, this will need to be
- deactivated when/if a CPU comes up --RR */
- if ((num_online_cpus() > 1) && !power_off) {
+ if ((num_possible_cpus() > 1) && !power_off) {
printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n");
return -ENODEV;
}
kernel_thread(apm, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND | SIGCHLD);
- /* FIXME: When boot code changes, this will need to be
- deactivated when/if a CPU comes up --RR */
- if (num_online_cpus() > 1) {
+ if (num_possible_cpus() > 1) {
printk(KERN_NOTICE
"apm: disabled - APM is not SMP safe (power off active).\n");
return 0;
* Maciej W. Rozycki : Bits for genuine 82489DX APICs
* Martin J. Bligh : Added support for multi-quad systems
* Dave Jones : Report invalid combinations of Athlon CPUs.
- */
+* Rusty Russell : Hacked into shape for new "hotplug" boot process. */
#include <linux/config.h>
#include <linux/init.h>
/* Set if we find a B stepping CPU */
static int __initdata smp_b_stepping;
-/* Setup configured maximum number of CPUs to activate */
-static int __initdata max_cpus = NR_CPUS;
-
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
unsigned long cpu_online_map;
static volatile unsigned long cpu_callin_map;
-static volatile unsigned long cpu_callout_map;
+volatile unsigned long cpu_callout_map;
+static unsigned long smp_commenced_mask;
/* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */
int smp_threads_ready;
-/*
- * Setup routine for controlling SMP activation
- *
- * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
- * activation entirely (the MPS table probe still happens, though).
- *
- * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
- * greater than 0, limits the maximum number of CPUs activated in
- * SMP mode to <NUM>.
- */
-
-static int __init nosmp(char *str)
-{
- max_cpus = 0;
- return 1;
-}
-
-__setup("nosmp", nosmp);
-
-static int __init maxcpus(char *str)
-{
- get_option(&str, &max_cpus);
- return 1;
-}
-
-__setup("maxcpus=", maxcpus);
-
/*
* Trampoline 80x86 program as an array.
*/
* a given CPU
*/
-void __init smp_store_cpu_info(int id)
+static void __init smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c = cpu_data + id;
;
}
-/*
- * Architecture specific routine called by the kernel just before init is
- * fired off. This allows the BP to have everything in order [we hope].
- * At the end of this all the APs will hit the system scheduling and off
- * we go. Each AP will load the system gdt's and jump through the kernel
- * init into idle(). At this point the scheduler will one day take over
- * and give them jobs to do. smp_callin is a standard routine
- * we use to track CPUs as they power up.
- */
-
-static atomic_t smp_commenced = ATOMIC_INIT(0);
-
-void __init smp_commence(void)
-{
- /*
- * Lets the callins below out of their loop.
- */
- Dprintk("Setting commenced=1, go go go\n");
-
- wmb();
- atomic_set(&smp_commenced,1);
-}
-
/*
* TSC synchronization.
*
unsigned long one_usec;
int buggy = 0;
- printk("checking TSC synchronization across CPUs: ");
+ printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus());
one_usec = ((1<<30)/fast_gettimeoffset_quotient)*(1<<2);
/*
* all APs synchronize but they loop on '== num_cpus'
*/
- while (atomic_read(&tsc_count_start) != num_online_cpus()-1)
+ while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
mb();
atomic_set(&tsc_count_stop, 0);
wmb();
/*
* Wait for all APs to leave the synchronization point:
*/
- while (atomic_read(&tsc_count_stop) != num_online_cpus()-1)
+ while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
mb();
atomic_set(&tsc_count_start, 0);
wmb();
sum = 0;
for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i)) {
+ if (test_bit(i, &cpu_callout_map)) {
t0 = tsc_values[i];
sum += t0;
}
}
- avg = div64(sum, num_online_cpus());
+ avg = div64(sum, num_booting_cpus());
sum = 0;
for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i))
+ if (!test_bit(i, &cpu_callout_map))
continue;
delta = tsc_values[i] - avg;
if (delta < 0)
int i;
/*
- * num_online_cpus is not necessarily known at the time
+ * Not every cpu is online at the time
* this gets called, so we first wait for the BP to
* finish SMP initialization:
*/
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&tsc_count_start);
- while (atomic_read(&tsc_count_start) != num_online_cpus())
+ while (atomic_read(&tsc_count_start) != num_booting_cpus())
mb();
rdtscll(tsc_values[smp_processor_id()]);
write_tsc(0, 0);
atomic_inc(&tsc_count_stop);
- while (atomic_read(&tsc_count_stop) != num_online_cpus()) mb();
+ while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
}
}
#undef NR_LOOPS
*/
phys_id = GET_APIC_ID(apic_read(APIC_ID));
cpuid = smp_processor_id();
- if (test_and_set_bit(cpuid, &cpu_online_map)) {
+ if (test_bit(cpuid, &cpu_callin_map)) {
printk("huh, phys CPU#%d, CPU#%d already present??\n",
phys_id, cpuid);
BUG();
*/
cpu_init();
smp_callin();
- while (!atomic_read(&smp_commenced))
+ while (!test_bit(smp_processor_id(), &smp_commenced_mask))
rep_nop();
+ setup_secondary_APIC_clock();
enable_APIC_timer();
/*
* low-memory mappings have been cleared, flush them from
* the local TLBs too.
*/
local_flush_tlb();
-
+ set_bit(smp_processor_id(), &cpu_online_map);
+ wmb();
return cpu_idle();
}
unmap_cpu_to_boot_apicid(cpu, apicid);
clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
- clear_bit(cpu, &cpu_online_map); /* was set in smp_callin() */
cpucount--;
}
int cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-void __init smp_boot_cpus(void)
+static void __init smp_boot_cpus(unsigned int max_cpus)
{
int apicid, cpu, bit;
* We have the boot CPU online for sure.
*/
set_bit(0, &cpu_online_map);
+ set_bit(0, &cpu_callout_map);
boot_cpu_logical_apicid = logical_smp_processor_id();
map_cpu_to_boot_apicid(0, boot_cpu_apicid);
#ifndef CONFIG_VISWS
io_apic_irqs = 0;
#endif
- cpu_online_map = phys_cpu_present_map = 1;
+ phys_cpu_present_map = 1;
if (APIC_init_uniprocessor())
printk(KERN_NOTICE "Local APIC not detected."
" Using dummy APIC emulation.\n");
- goto smp_done;
+ return;
}
/*
#ifndef CONFIG_VISWS
io_apic_irqs = 0;
#endif
- cpu_online_map = phys_cpu_present_map = 1;
- goto smp_done;
+ phys_cpu_present_map = 1;
+ return;
}
verify_local_APIC();
#ifndef CONFIG_VISWS
io_apic_irqs = 0;
#endif
- cpu_online_map = phys_cpu_present_map = 1;
- goto smp_done;
+ phys_cpu_present_map = 1;
+ return;
}
connect_bsp_APIC();
} else {
unsigned long bogosum = 0;
for (cpu = 0; cpu < NR_CPUS; cpu++)
- if (cpu_online_map & (1<<cpu))
+ if (cpu_callout_map & (1<<cpu))
bogosum += cpu_data[cpu].loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount+1,
for (cpu = 0; cpu < NR_CPUS; cpu++) {
int i;
- if (!cpu_online(cpu)) continue;
+ if (!test_bit(cpu, &cpu_callout_map)) continue;
for (i = 0; i < NR_CPUS; i++) {
- if (i == cpu || !cpu_online(i))
+ if (i == cpu || !test_bit(i, &cpu_callout_map))
continue;
if (phys_proc_id[cpu] == phys_proc_id[i]) {
cpu_sibling_map[cpu] = i;
setup_IO_APIC();
#endif
- /*
- * Set up all local APIC timers in the system:
- */
- setup_APIC_clocks();
+ setup_boot_APIC_clock();
/*
* Synchronize the TSC with the AP
*/
if (cpu_has_tsc && cpucount)
synchronize_tsc_bp();
+}
+
+/* These are wrappers to interface to the new boot process. Someone
+ who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_boot_cpus(max_cpus);
+}
-smp_done:
+int __devinit __cpu_up(unsigned int cpu)
+{
+ /* This only works at boot for x86. See "rewrite" above. */
+ if (test_bit(cpu, &smp_commenced_mask))
+ return -ENOSYS;
+
+ /* In case one didn't come up */
+ if (!test_bit(cpu, &cpu_callin_map))
+ return -EIO;
+
+ /* Unleash the CPU! */
+ set_bit(cpu, &smp_commenced_mask);
+ while (!test_bit(cpu, &cpu_online_map))
+ mb();
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
zap_low_mappings();
}