# someone write a better help text please.
config K8_NUMA
bool "K8 NUMA support"
- depends on SMP
+ depends on SMP && NOT_WORKING
help
Enable NUMA (Non Unified Memory Architecture) support for
AMD Opteron Multiprocessor systems. The kernel will try to allocate
int dont_enable_local_apic __initdata = 0;
-int prof_multiplier[NR_CPUS] = { 1, };
-int prof_old_multiplier[NR_CPUS] = { 1, };
-DEFINE_PER_CPU(int, prof_counter) = 1;
+static DEFINE_PER_CPU(int, prof_multiplier) = 1;
+static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
+static DEFINE_PER_CPU(int, prof_counter) = 1;
+
+void enable_NMI_through_LVT0 (void * dummy)
+{
+ unsigned int v, ver;
+
+ ver = apic_read(APIC_LVR);
+ ver = GET_APIC_VERSION(ver);
+ v = APIC_DM_NMI; /* unmask and set to NMI */
+ apic_write_around(APIC_LVT0, v);
+}
int get_maxlvt(void)
{
v = apic_read(APIC_LVR);
ver = GET_APIC_VERSION(v);
- /* 82489DXs do not report # of LVT entries. */
- maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2;
+ maxlvt = GET_APIC_MAXLVT(v);
return maxlvt;
}
* accordingly.
*/
for (i = 0; i < NR_CPUS; ++i)
- prof_multiplier[i] = multiplier;
+ per_cpu(prof_multiplier, i) = multiplier;
return 0;
}
*
* Interrupts are already masked off at this point.
*/
- per_cpu(prof_counter, cpu) = prof_multiplier[cpu];
- if (per_cpu(prof_counter, cpu) != prof_old_multiplier[cpu]) {
+ per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
+ if (per_cpu(prof_counter, cpu) !=
+ per_cpu(prof_old_multiplier, cpu)) {
__setup_APIC_LVTT(calibration_result/
per_cpu(prof_counter, cpu));
- prof_old_multiplier[cpu] = per_cpu(prof_counter, cpu);
+ per_cpu(prof_old_multiplier, cpu) =
+ per_cpu(prof_counter, cpu);
}
#ifdef CONFIG_SMP
* A newly forked process directly context switches into this.
*/
ENTRY(ret_from_fork)
-#if CONFIG_SMP || CONFIG_PREEMPT
+ movq %rax,%rdi /* prev task, returned by __switch_to -> arg1 */
call schedule_tail
-#endif
GET_THREAD_INFO(%rcx)
bt $TIF_SYSCALL_TRACE,threadinfo_flags(%rcx)
jc rff_trace
rff_trace:
movq %rsp,%rdi
call syscall_trace
+ GET_THREAD_INFO(%rcx)
jmp rff_action
/*
swapgs
gs_change:
movl %edi,%gs
-2: sfence /* workaround */
+2: mfence /* workaround */
swapgs
popf
ret
void __init UNEXPECTED_IO_APIC(void)
{
+#if 0
printk(KERN_WARNING " WARNING: unexpected IO-APIC, please mail\n");
printk(KERN_WARNING " to linux-smp@vger.kernel.org\n");
+#endif
}
void __init print_IO_APIC(void)
*/
static struct hw_interrupt_type ioapic_edge_irq_type = {
- "IO-APIC-edge",
- startup_edge_ioapic_irq,
- shutdown_edge_ioapic_irq,
- enable_edge_ioapic_irq,
- disable_edge_ioapic_irq,
- ack_edge_ioapic_irq,
- end_edge_ioapic_irq,
- set_ioapic_affinity,
+ .typename = "IO-APIC-edge",
+ .startup = startup_edge_ioapic_irq,
+ .shutdown = shutdown_edge_ioapic_irq,
+ .enable = enable_edge_ioapic_irq,
+ .disable = disable_edge_ioapic_irq,
+ .ack = ack_edge_ioapic_irq,
+ .end = end_edge_ioapic_irq,
+ .set_affinity = set_ioapic_affinity,
};
static struct hw_interrupt_type ioapic_level_irq_type = {
- "IO-APIC-level",
- startup_level_ioapic_irq,
- shutdown_level_ioapic_irq,
- enable_level_ioapic_irq,
- disable_level_ioapic_irq,
- mask_and_ack_level_ioapic_irq,
- end_level_ioapic_irq,
- set_ioapic_affinity,
+ .typename = "IO-APIC-level",
+ .startup = startup_level_ioapic_irq,
+ .shutdown = shutdown_level_ioapic_irq,
+ .enable = enable_level_ioapic_irq,
+ .disable = disable_level_ioapic_irq,
+ .ack = mask_and_ack_level_ioapic_irq,
+ .end = end_level_ioapic_irq,
+ .set_affinity = set_ioapic_affinity,
};
static inline void init_IO_APIC_traps(void)
static void end_lapic_irq (unsigned int i) { /* nothing */ }
static struct hw_interrupt_type lapic_irq_type = {
- "local-APIC-edge",
- NULL, /* startup_irq() not used for IRQ0 */
- NULL, /* shutdown_irq() not used for IRQ0 */
- enable_lapic_irq,
- disable_lapic_irq,
- ack_lapic_irq,
- end_lapic_irq
+ .typename = "local-APIC-edge",
+ .startup = NULL, /* startup_irq() not used for IRQ0 */
+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
+ .enable = enable_lapic_irq,
+ .disable = disable_lapic_irq,
+ .ack = ack_lapic_irq,
+ .end = end_lapic_irq,
};
-void enable_NMI_through_LVT0 (void * dummy)
-{
- unsigned int v, ver;
-
- ver = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(ver);
- v = APIC_DM_NMI; /* unmask and set to NMI */
- if (!APIC_INTEGRATED(ver)) /* 82489DX */
- v |= APIC_LVT_LEVEL_TRIGGER;
- apic_write_around(APIC_LVT0, v);
-}
-
static void setup_nmi (void)
{
/*
/*
* this changes the io permissions bitmap in the current task.
*/
-asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
{
struct thread_struct * t = ¤t->thread;
struct tss_struct * tss;
if (!(m->mpc_cpuflag & CPU_ENABLED))
return;
- printk("Processor #%d %d:%d APIC version %d\n",
+ printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
m->mpc_apicid,
(m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
(m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
m->mpc_apicver);
- if (m->mpc_featureflag&(1<<0))
- Dprintk(" Floating point unit present.\n");
- if (m->mpc_featureflag&(1<<7))
- Dprintk(" Machine Exception supported.\n");
- if (m->mpc_featureflag&(1<<8))
- Dprintk(" 64 bit compare & exchange supported.\n");
- if (m->mpc_featureflag&(1<<9))
- Dprintk(" Internal APIC present.\n");
-
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
Dprintk(" Bootup CPU\n");
boot_cpu_id = m->mpc_apicid;
num_processors++;
if (m->mpc_apicid > MAX_APICS) {
- printk("Processor #%d INVALID. (Max ID: %d).\n",
+ printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
m->mpc_apicid, MAX_APICS);
return;
}
* Validate version
*/
if (ver == 0x0) {
- printk("BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
+ printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
ver = 0x10;
}
apic_version[m->mpc_apicid] = ver;
} else if (strncmp(str, "MCA", 3) == 0) {
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
} else {
- printk("Unknown bustype %s\n", str);
- panic("cannot handle bus - mail to linux-smp@vger.kernel.org");
+ printk(KERN_ERR "Unknown bustype %s\n", str);
}
}
printk("I/O APIC #%d Version %d at 0x%X.\n",
m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
if (nr_ioapics >= MAX_IO_APICS) {
- printk("Max # of I/O APICs (%d) exceeded (found %d).\n",
+ printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
MAX_IO_APICS, nr_ioapics);
panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
}
}
memcpy(str,mpc->mpc_oem,8);
str[8]=0;
- printk("OEM ID: %s ",str);
+ printk(KERN_INFO "OEM ID: %s ",str);
memcpy(str,mpc->mpc_productid,12);
str[12]=0;
- printk("Product ID: %s ",str);
+ printk(KERN_INFO "Product ID: %s ",str);
- printk("APIC at: 0x%X\n",mpc->mpc_lapic);
+ printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic);
/* save the local APIC address, it might be non-default */
if (!acpi_lapic)
* If it does, we assume it's valid.
*/
if (mpc_default_type == 5) {
- printk("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
- printk("ELCR contains invalid data... not using ELCR\n");
+ printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
else {
- printk("Using ELCR to identify PCI interrupts\n");
+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
ELCR_fallback = 1;
}
}
bus.mpc_busid = 0;
switch (mpc_default_type) {
default:
- printk("???\nUnknown standard configuration %d\n",
+ printk(KERN_ERR "???\nUnknown standard configuration %d\n",
mpc_default_type);
/* fall through */
case 1:
printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
if (mpf->mpf_feature2 & (1<<7)) {
- printk(" IMCR and PIC compatibility mode.\n");
+ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
pic_mode = 1;
} else {
- printk(" Virtual Wire compatibility mode.\n");
+ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
pic_mode = 0;
}
*/
if (mpf->mpf_feature1 != 0) {
- printk("Default MP configuration #%d\n", mpf->mpf_feature1);
+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
construct_default_ISA_mptable(mpf->mpf_feature1);
} else if (mpf->mpf_physptr) {
if (!mp_irq_entries) {
struct mpc_config_bus bus;
- printk("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
bus.mpc_type = MP_BUS;
bus.mpc_busid = 0;
} else
BUG();
- printk("Processors: %d\n", num_processors);
+ printk(KERN_INFO "Processors: %d\n", num_processors);
/*
* Only use the first configuration found.
*/
static int __init smp_scan_config (unsigned long base, unsigned long length)
{
+ extern void __bad_mpf_size(void);
unsigned int *bp = phys_to_virt(base);
struct intel_mp_floating *mpf;
Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
if (sizeof(*mpf) != 16)
- printk("Error: MPF size\n");
+ __bad_mpf_size();
while (length > 0) {
mpf = (struct intel_mp_floating *)bp;
|| (mpf->mpf_specification == 4)) ) {
smp_found_config = 1;
- printk("found SMP MP-table at %08lx\n",
+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
virt_to_phys(mpf));
reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE);
if (mpf->mpf_physptr)
address = *(unsigned short *)phys_to_virt(0x40E);
address <<= 4;
smp_scan_config(address, 0x1000);
- if (smp_found_config)
- printk(KERN_WARNING "WARNING: MP table in the EBDA can be UNSAFE, contact linux-smp@vger.kernel.org if you experience SMP problems!\n");
}
/*
mp_ioapic_routing[idx].irq_end = irq_base +
io_apic_get_redir_entries(idx);
- printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
+ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
"IRQ %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
mp_ioapic_routing[idx].irq_start,
asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
{
- int cpu;
+ int cpu = safe_smp_processor_id();
- nmi_enter();
+ init_tss[cpu].ist[NMI_STACK] -= 2048; /* this shouldn't be needed. */
- cpu = smp_processor_id();
+ nmi_enter();
add_pda(__nmi_count,1);
default_do_nmi(regs);
nmi_exit();
+
+ init_tss[cpu].ist[NMI_STACK] += 2048;
}
void set_nmi_callback(nmi_callback_t callback)
* - fold all the options into a flag word and test it with a single test.
* - could test fs/gs bitsliced
*/
-void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
tss->io_map_base = INVALID_IO_BITMAP_OFFSET;
}
}
+
+ return prev_p;
}
/*
if (!(current->ptrace & PT_PTRACED))
return;
- current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0);
- current->state = TASK_STOPPED;
- notify_parent(current, SIGCHLD);
- schedule();
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
extern int root_mountflags;
extern char _text, _etext, _edata, _end;
-static int disable_x86_fxsr __initdata = 0;
-
char command_line[COMMAND_LINE_SIZE];
char saved_command_line[COMMAND_LINE_SIZE];
#endif
}
-#ifndef CONFIG_X86_TSC
-static int tsc_disable __initdata = 0;
-
-static int __init tsc_setup(char *str)
-{
- tsc_disable = 1;
- return 1;
-}
-
-__setup("notsc", tsc_setup);
-#endif
-
static int __init get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
static void __init display_cacheinfo(struct cpuinfo_x86 *c)
{
- unsigned int n, dummy, ecx, edx;
+ unsigned int n, dummy, eax, ebx, ecx, edx;
n = cpuid_eax(0x80000000);
if (n >= 0x80000005) {
- cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
- c->x86_cache_size=(ecx>>24)+(edx>>24);
+ c->x86_cache_size = (ecx>>24)+(edx>>24);
+ /* DTLB and ITLB together, but only 4K */
+ c->x86_tlbsize = ((ebx >> 16) & 0xff) + (ebx & 0xff);
}
- if (n < 0x80000006)
- return;
-
+ if (n >= 0x80000006) {
+ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
ecx = cpuid_ecx(0x80000006);
c->x86_cache_size = ecx >> 16;
+ c->x86_tlbsize += ((ebx >> 16) & 0xff) + (ebx & 0xff);
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
c->x86_cache_size, ecx & 0xFF);
+ }
+
+ if (n >= 0x80000007)
+ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
+ if (n >= 0x80000008) {
+ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
+ }
}
char *model_names[16];
};
-int __init x86_fxsr_setup(char * s)
-{
- disable_x86_fxsr = 1;
- return 1;
-}
-__setup("nofxsr", x86_fxsr_setup);
-
-
-
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
- if ( c->cpuid_level >= 0x00000001 ) {
+ if (c->cpuid_level >= 0x00000001) {
__u32 misc;
cpuid(0x00000001, &tfms, &misc, &junk,
&c->x86_capability[0]);
- c->x86 = (tfms >> 8) & 15;
- c->x86_model = (tfms >> 4) & 15;
- c->x86_mask = tfms & 15;
+ c->x86 = (tfms >> 8) & 0xf;
+ c->x86_model = (tfms >> 4) & 0xf;
+ c->x86_mask = tfms & 0xf;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
if (c->x86_capability[0] & (1<<19))
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
} else {
break;
}
- printk(KERN_DEBUG "CPU: After vendor init, caps: %08x %08x %08x %08x\n",
- c->x86_capability[0],
- c->x86_capability[1],
- c->x86_capability[2],
- c->x86_capability[3]);
-
- /*
- * The vendor-specific functions might have changed features. Now
- * we do "generic changes."
- */
-
- /* TSC disabled? */
-#ifndef CONFIG_X86_TSC
- if ( tsc_disable )
- clear_bit(X86_FEATURE_TSC, &c->x86_capability);
-#endif
-
- /* FXSR disabled? */
- if (disable_x86_fxsr) {
- clear_bit(X86_FEATURE_FXSR, &c->x86_capability);
- clear_bit(X86_FEATURE_XMM, &c->x86_capability);
- }
-
- /* Now the feature flags better reflect actual CPU features! */
-
- printk(KERN_DEBUG "CPU: After generic, caps: %08x %08x %08x %08x\n",
- c->x86_capability[0],
- c->x86_capability[1],
- c->x86_capability[2],
- c->x86_capability[3]);
-
/*
* On SMP, boot_cpu_data holds the common feature set between
* all CPUs; so make sure that we indicate which features are
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
+ static char *x86_power_flags[] = {
+ "ts", /* temperature sensor */
+ "fid", /* frequency id control */
+ "vid", /* voltage id control */
+ "ttp", /* thermal trip */
+ };
+
#ifdef CONFIG_SMP
if (!(cpu_online_map & (1<<(c-cpu_data))))
seq_printf(m, " %s", x86_cap_flags[i]);
}
- seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
+ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
+ if (c->x86_tlbsize > 0)
+ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
+ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
+
+ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
+ c->x86_phys_bits, c->x86_virt_bits);
+
+ seq_printf(m, "power management:");
+ {
+ int i;
+ for (i = 0; i < 32; i++)
+ if (c->x86_power & (1 << i)) {
+ if (i < ARRAY_SIZE(x86_power_flags))
+ seq_printf(m, " %s", x86_power_flags[i]);
+ else
+ seq_printf(m, " [%d]", i);
+ }
+ }
+
+ seq_printf(m, "\n\n");
+
return 0;
}
pda->irqcount = -1;
pda->cpudata_offset = 0;
pda->kernelstack =
- (unsigned long)current_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
+ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
if (cpu == 0) {
/* others are initialized in smpboot.c */
void flush_tlb_all(void)
{
+ preempt_disable();
smp_call_function (flush_tlb_all_ipi,0,1,1);
do_flush_tlb_all_local();
+ preempt_enable();
}
void smp_kdb_stop(void)
int buggy = 0;
extern unsigned cpu_khz;
- printk("checking TSC synchronization across %u CPUs: ",num_booting_cpus());
+ printk(KERN_INFO "checking TSC synchronization across %u CPUs: ",num_booting_cpus());
one_usec = cpu_khz;
phys_id = GET_APIC_ID(apic_read(APIC_ID));
cpuid = smp_processor_id();
if (test_and_set_bit(cpuid, &cpu_callin_map)) {
- printk("huh, phys CPU#%d, CPU#%d already present??\n",
+ panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
phys_id, cpuid);
- BUG();
}
Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
}
if (!time_before(jiffies, timeout)) {
- printk("BUG: CPU%d started up but did not get a callout!\n",
+ panic("smp_callin: CPU%d started up but did not get a callout!\n",
cpuid);
- BUG();
}
/*
char *names[] = { "ID", "VERSION", "SPIV" };
int timeout, status;
- printk("Inquiring remote APIC #%d...\n", apicid);
+ printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
printk("... APIC #%d %s: ", apicid, names[i]);
Dprintk("After Startup.\n");
if (send_status)
- printk("APIC never delivered???\n");
+ printk(KERN_ERR "APIC never delivered???\n");
if (accept_status)
- printk("APIC delivery error (%lx).\n", accept_status);
+ printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
return (send_status | accept_status);
}
init_tss[cpu].rsp0 = init_rsp;
initial_code = initialize_secondary;
- printk("Booting processor %d/%d rip %lx rsp %lx rsp2 %lx\n", cpu, apicid,
+ printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx rsp2 %lx\n", cpu, apicid,
start_rip, idle->thread.rsp, init_rsp);
/*
if (test_bit(cpu, &cpu_callin_map)) {
/* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n");
- printk("CPU%d: ", cpu);
+ printk("KERN_INFO CPU%d: ", cpu);
print_cpu_info(&cpu_data[cpu]);
Dprintk("CPU has booted.\n");
} else {
cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
- printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
+ printk(KERN_INFO "per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
(long)cacheflush_time/(cpu_khz/1000),
((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
- printk("task migration cache decay timeout: %ld msecs.\n",
+ printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
(cache_decay_ticks + 1) * 1000 / HZ);
}
* Setup boot CPU information
*/
smp_store_cpu_info(0); /* Final full version of the data */
- printk("CPU%d: ", 0);
+ printk(KERN_INFO "CPU%d: ", 0);
print_cpu_info(&cpu_data[0]);
current_thread_info()->cpu = 0;
* CPU too, but we do it for the sake of robustness anyway.
*/
if (!test_bit(boot_cpu_id, &phys_cpu_present_map)) {
- printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
+ printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
boot_cpu_id);
phys_cpu_present_map |= (1 << hard_smp_processor_id());
}
time_esterror = NTP_PHASE_LIMIT;
write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
}
/*
sync_core();
rdtscll(t);
sec = __xtime.tv_sec;
- usec = (__xtime.tv_nsec * 1000) +
+ usec = (__xtime.tv_nsec / 1000) +
(__jiffies - __wall_jiffies) * (1000000 / HZ) +
(t - __hpet.last_tsc) * (1000000 / HZ) / __hpet.ticks + __hpet.offset;
#include <asm/nmi.h>
#include <asm/kdebug.h>
#include <asm/unistd.h>
+#include <asm/delay.h>
extern spinlock_t rtc_lock;
EXPORT_SYMBOL(csum_partial_copy_nocheck);
/* Delay loops */
EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__delay);
EXPORT_SYMBOL(__const_udelay);
/*
- * Precise Delay Loops for i386
+ * Precise Delay Loops for x86-64
*
* Copyright (C) 1993 Linus Torvalds
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
{
__const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
}
+
+void __ndelay(unsigned long nsecs)
+{
+ __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+}
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_DISCONTIGMEM) += numa.o
obj-$(CONFIG_K8_NUMA) += k8topology.o
+
+$(obj)/hugetlbpage.c:
+ @ln -sf ../../i386/mm/hugetlbpage.c $(obj)/hugetlbpage.c
+
+clean-files += hugetlbpage.c
+++ /dev/null
-/*
- * x86-64 Huge TLB Page Support for Kernel.
- *
- * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
- * Minor hacks by Andi Kleen for x86-64
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/sysctl.h>
-#include <asm/mman.h>
-#include <asm/pgalloc.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-
-static long htlbpagemem;
-int htlbpage_max;
-static long htlbzone_pages;
-
-static LIST_HEAD(htlbpage_freelist);
-static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
-
-static struct page *alloc_hugetlb_page(void)
-{
- int i;
- struct page *page;
-
- spin_lock(&htlbpage_lock);
- if (list_empty(&htlbpage_freelist)) {
- spin_unlock(&htlbpage_lock);
- return NULL;
- }
-
- page = list_entry(htlbpage_freelist.next, struct page, list);
- list_del(&page->list);
- htlbpagemem--;
- spin_unlock(&htlbpage_lock);
- set_page_count(page, 1);
- for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
- clear_highpage(&page[i]);
- return page;
-}
-
-static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pmd_t *pmd = NULL;
-
- pgd = pgd_offset(mm, addr);
- pmd = pmd_alloc(mm, pgd, addr);
- return (pte_t *) pmd;
-}
-
-static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pmd_t *pmd = NULL;
-
- pgd = pgd_offset(mm, addr);
- pmd = pmd_offset(pgd, addr);
- return (pte_t *) pmd;
-}
-
-#define mk_pte_huge(entry) {pte_val(entry) |= (_PAGE_PRESENT | _PAGE_PSE);}
-
-static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
-{
- pte_t entry;
-
- mm->rss += (HPAGE_SIZE / PAGE_SIZE);
- if (write_access) {
- entry =
- pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
- } else
- entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
- entry = pte_mkyoung(entry);
- mk_pte_huge(entry);
- set_pte(page_table, entry);
-}
-
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
-int
-copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
- struct vm_area_struct *vma)
-{
- pte_t *src_pte, *dst_pte, entry;
- struct page *ptepage;
- unsigned long addr = vma->vm_start;
- unsigned long end = vma->vm_end;
-
- while (addr < end) {
- dst_pte = huge_pte_alloc(dst, addr);
- if (!dst_pte)
- goto nomem;
- src_pte = huge_pte_offset(src, addr);
- entry = *src_pte;
- ptepage = pte_page(entry);
- get_page(ptepage);
- set_pte(dst_pte, entry);
- dst->rss += (HPAGE_SIZE / PAGE_SIZE);
- addr += HPAGE_SIZE;
- }
- return 0;
-
-nomem:
- return -ENOMEM;
-}
-
-int
-follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
- struct page **pages, struct vm_area_struct **vmas,
- unsigned long *st, int *length, int i)
-{
- pte_t *ptep, pte;
- unsigned long start = *st;
- unsigned long pstart;
- int len = *length;
- struct page *page;
-
- do {
- pstart = start;
- ptep = huge_pte_offset(mm, start);
- pte = *ptep;
-
-back1:
- page = pte_page(pte);
- if (pages) {
- page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
- get_page(page);
- pages[i] = page;
- }
- if (vmas)
- vmas[i] = vma;
- i++;
- len--;
- start += PAGE_SIZE;
- if (((start & HPAGE_MASK) == pstart) && len &&
- (start < vma->vm_end))
- goto back1;
- } while (len && start < vma->vm_end);
- *length = len;
- *st = start;
- return i;
-}
-
-struct page *
-follow_huge_addr(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address, int write)
-{
- return NULL;
-}
-
-struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
-{
- return NULL;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return !!(pmd_val(pmd) & _PAGE_PSE);
-}
-
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page) {
- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
- get_page(page);
- }
- return page;
-}
-
-void free_huge_page(struct page *page)
-{
- BUG_ON(page_count(page));
- BUG_ON(page->mapping);
-
- INIT_LIST_HEAD(&page->list);
-
- spin_lock(&htlbpage_lock);
- list_add(&page->list, &htlbpage_freelist);
- htlbpagemem++;
- spin_unlock(&htlbpage_lock);
-}
-
-void huge_page_release(struct page *page)
-{
- if (!put_page_testzero(page))
- return;
-
- free_huge_page(page);
-}
-
-void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
- pte_t *pte;
- struct page *page;
-
- BUG_ON(start & (HPAGE_SIZE - 1));
- BUG_ON(end & (HPAGE_SIZE - 1));
-
- for (address = start; address < end; address += HPAGE_SIZE) {
- pte = huge_pte_offset(mm, address);
- page = pte_page(*pte);
- huge_page_release(page);
- pte_clear(pte);
- }
- mm->rss -= (end - start) >> PAGE_SHIFT;
- flush_tlb_range(vma, start, end);
-}
-
-void zap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long length)
-{
- struct mm_struct *mm = vma->vm_mm;
- spin_lock(&mm->page_table_lock);
- unmap_hugepage_range(vma, start, start + length);
- spin_unlock(&mm->page_table_lock);
-}
-
-int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr;
- int ret = 0;
-
- BUG_ON(vma->vm_start & ~HPAGE_MASK);
- BUG_ON(vma->vm_end & ~HPAGE_MASK);
-
- spin_lock(&mm->page_table_lock);
- for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
- unsigned long idx;
- pte_t *pte = huge_pte_alloc(mm, addr);
- struct page *page;
-
- if (!pte) {
- ret = -ENOMEM;
- goto out;
- }
- if (!pte_none(*pte))
- continue;
-
- idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
- + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
- page = find_get_page(mapping, idx);
- if (!page) {
-
- page = alloc_hugetlb_page();
- if (!page) {
- ret = -ENOMEM;
- goto out;
- }
- ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
- unlock_page(page);
- if (ret) {
- free_huge_page(page);
- goto out;
- }
- }
- set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
- }
-out:
- spin_unlock(&mm->page_table_lock);
- return ret;
-}
-
-int set_hugetlb_mem_size(int count)
-{
- int j, lcount;
- struct page *page, *map;
- extern long htlbzone_pages;
- extern struct list_head htlbpage_freelist;
-
- if (count < 0)
- lcount = count;
- else
- lcount = count - htlbzone_pages;
-
- if (lcount > 0) { /* Increase the mem size. */
- while (lcount--) {
- page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
- if (page == NULL)
- break;
- spin_lock(&htlbpage_lock);
- list_add(&page->list, &htlbpage_freelist);
- htlbpagemem++;
- htlbzone_pages++;
- spin_unlock(&htlbpage_lock);
- }
- return (int) htlbzone_pages;
- }
- /* Shrink the memory size. */
- while (lcount++) {
- page = alloc_hugetlb_page();
- if (page == NULL)
- break;
- spin_lock(&htlbpage_lock);
- htlbzone_pages--;
- spin_unlock(&htlbpage_lock);
- map = page;
- for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
- map->flags &= ~(1 << PG_locked | 1 << PG_error |
- 1 << PG_referenced |
- 1 << PG_dirty | 1 << PG_active |
- 1 << PG_private | 1<< PG_writeback);
- set_page_count(map, 0);
- map++;
- }
- set_page_count(page, 1);
- __free_pages(page, HUGETLB_PAGE_ORDER);
- }
- return (int) htlbzone_pages;
-}
-
-/* This will likely not work because of fragmentation. */
-int hugetlb_sysctl_handler(ctl_table *table, int write, struct file *file, void *buffer, size_t *length)
-{
- proc_dointvec(table, write, file, buffer, length);
- htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
- return 0;
-}
-
-static int __init hugetlb_setup(char *s)
-{
- if (sscanf(s, "%d", &htlbpage_max) <= 0)
- htlbpage_max = 0;
- return 1;
-}
-__setup("hugepages=", hugetlb_setup);
-
-static int __init hugetlb_init(void)
-{
- int i, j;
- struct page *page;
-
- for (i = 0; i < htlbpage_max; ++i) {
- page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
- if (!page)
- break;
- for (j = 0; j < HPAGE_SIZE/PAGE_SIZE; ++j)
- SetPageReserved(&page[j]);
- spin_lock(&htlbpage_lock);
- list_add(&page->list, &htlbpage_freelist);
- spin_unlock(&htlbpage_lock);
- }
- htlbpage_max = htlbpagemem = htlbzone_pages = i;
- printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
- return 0;
-}
-module_init(hugetlb_init);
-
-int hugetlb_report_meminfo(char *buf)
-{
- return sprintf(buf,
- "HugePages_Total: %5lu\n"
- "HugePages_Free: %5lu\n"
- "Hugepagesize: %5lu kB\n",
- htlbzone_pages,
- htlbpagemem,
- HPAGE_SIZE/1024);
-}
-
-static struct page *
-hugetlb_nopage(struct vm_area_struct *vma, unsigned long address, int unused)
-{
- BUG();
- return NULL;
-}
-
-struct vm_operations_struct hugetlb_vm_ops = {
- .nopage = hugetlb_nopage,
-};
-
-int is_hugepage_mem_enough(size_t size)
-{
- if (size > (htlbpagemem << HPAGE_SHIFT))
- return 0;
- return 1;
-}
static inline void flush_map(unsigned long address)
{
+ preempt_disable();
#ifdef CONFIG_SMP
smp_call_function(flush_kernel_map, (void *)address, 1, 1);
#endif
flush_kernel_map((void *)address);
+ preempt_enable();
}
struct deferred_page {
{
unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
unsigned long set = 0;
- long res, bit = offset&63;
+ unsigned long res, bit = offset&63;
if (bit) {
/*
*/
extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long usecs);
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
__udelay(n))
+#define ndelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+ __ndelay(n))
+
+
#endif /* defined(_X8664_DELAY_H) */
if (!me->used_math)
return 0;
me->used_math = 0;
- if (!test_thread_flag(TIF_USEDFPU))
+ if (me->thread_info->flags & _TIF_USEDFPU)
return 0;
return 1;
}
#define kernel_fpu_end() stts()
#define unlazy_fpu(tsk) do { \
- if ((tsk)->thread_info->flags & TIF_USEDFPU) \
+ if ((tsk)->thread_info->flags & _TIF_USEDFPU) \
save_init_fpu(tsk); \
} while (0)
#define clear_fpu(tsk) do { \
- if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \
+ if ((tsk)->thread_info->flags & _TIF_USEDFPU) { \
asm volatile("fwait"); \
- clear_tsk_thread_flag(tsk,TIF_USEDFPU); \
+ (tsk)->thread_info->flags &= ~_TIF_USEDFPU; \
stts(); \
} \
} while (0)
#include <linux/config.h>
#ifndef MAX_HWIFS
-# ifdef CONFIG_PCI
+# ifdef CONFIG_BLK_DEV_IDEPCI
#define MAX_HWIFS 10
# else
#define MAX_HWIFS 6
# endif
#endif
-static __inline__ int ide_default_irq(ide_ioreg_t base)
+static __inline__ int ide_default_irq(unsigned long base)
{
switch (base) {
case 0x1f0: return 14;
}
}
-static __inline__ ide_ioreg_t ide_default_io_base(int index)
+static __inline__ unsigned long ide_default_io_base(int index)
{
- static unsigned long ata_io_base[MAX_HWIFS] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
-
- return ata_io_base[index];
+ switch (index) {
+ case 0: return 0x1f0;
+ case 1: return 0x170;
+ case 2: return 0x1e8;
+ case 3: return 0x168;
+ case 4: return 0x1e0;
+ case 5: return 0x160;
+ default:
+ return 0;
+ }
}
-static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl_port, int *irq)
+static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
+ unsigned long ctrl_port, int *irq)
{
- ide_ioreg_t reg = data_port;
+ unsigned long reg = data_port;
int i;
for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
static __inline__ void ide_init_default_hwifs(void)
{
-#ifndef CONFIG_PCI
+#ifndef CONFIG_BLK_DEV_IDEPCI
hw_regs_t hw;
int index;
hw.irq = ide_default_irq(ide_default_io_base(index));
ide_register_hw(&hw, NULL);
}
-#endif
+#endif /* CONFIG_BLK_DEV_IDEPCI */
}
+#include <asm-generic/ide_iops.h>
+
#endif /* __KERNEL__ */
#endif /* __ASMx86_64_IDE_H */
#ifndef X86_64_PDA_H
#define X86_64_PDA_H
+#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
unsigned apic_timer_irqs;
} ____cacheline_aligned;
-#define PDA_STACKOFFSET (5*8)
#define IRQSTACK_ORDER 2
#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
#define sub_pda(field,val) pda_to_op("sub",field,val)
#endif
+
+#define PDA_STACKOFFSET (5*8)
+
+#endif
/* page, protection -> pte */
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
/* physical address -> PTE */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
+ pte_val(pte) &= __supported_pte_mask;
return pte;
}
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
char x86_model_id[64];
int x86_cache_size; /* in KB */
int x86_clflush_size;
+ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
+ __u8 x86_virt_bits, x86_phys_bits;
+ __u32 x86_power;
unsigned long loops_per_jiffy;
} ____cacheline_aligned;
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/linkage.h>
+#include <linux/time.h>
/* Avoid too many header ordering problems. */
struct siginfo;
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/bitops.h>
+extern int disable_apic;
#endif
#ifdef CONFIG_X86_LOCAL_APIC
return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
}
-extern int disable_apic;
extern int slow_smp_processor_id(void);
extern inline int safe_smp_processor_id(void)
unsigned long rdx;
unsigned long rsi;
unsigned long rdi;
- unsigned long rax;
unsigned long r15;
unsigned long r14;
unsigned long r13;
frame pointer and the scheduler is compiled with frame pointers. -AK */
#define SAVE_CONTEXT \
__PUSH(r8) __PUSH(r9) __PUSH(r10) __PUSH(r11) __PUSH(r12) __PUSH(r13) \
- __PUSH(r14) __PUSH(r15) __PUSH(rax) \
+ __PUSH(r14) __PUSH(r15) \
__PUSH(rdi) __PUSH(rsi) \
__PUSH(rdx) __PUSH(rcx) \
__PUSH(rbx) __PUSH(rbp)
__POP(rbp) __POP(rbx) \
__POP(rcx) __POP(rdx) \
__POP(rsi) __POP(rdi) \
- __POP(rax) __POP(r15) __POP(r14) __POP(r13) __POP(r12) __POP(r11) __POP(r10) \
+ __POP(r15) __POP(r14) __POP(r13) __POP(r12) __POP(r11) __POP(r10) \
__POP(r9) __POP(r8)
/* RED-PEN: pipeline stall on ret because it is not predicted */
"thread_return:\n\t" \
RESTORE_CONTEXT \
:[prevrsp] "=m" (prev->thread.rsp), \
- [prevrip] "=m" (prev->thread.rip) \
+ [prevrip] "=m" (prev->thread.rip), \
+ "=a" (last) \
:[nextrsp] "m" (next->thread.rsp), \
- [nextrip]"m" (next->thread.rip), \
+ [nextrip] "m" (next->thread.rip), \
[next] "S" (next), [prev] "D" (prev) \
:"memory")
#include <asm/page.h>
#include <asm/types.h>
+#include <asm/pda.h>
/*
* low level task data that entry.S needs immediate access to
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));
+ ti = (void *)read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE;
+ return ti;
+}
+
+/* do not use in interrupt context */
+static inline struct thread_info *stack_thread_info(void)
+{
+ struct thread_info *ti;
+ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~8191UL));
return ti;
}
/* how to get the thread information struct from ASM */
/* only works on the process stack. otherwise get it via the PDA. */
#define GET_THREAD_INFO(reg) \
- movq $CURRENT_MASK, reg; \
- andq %rsp, reg
+ movq %gs:pda_kernelstack,reg ; \
+ subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
#endif
__SYSCALL(__NR_semtimedop, sys_semtimedop)
#define __NR_fadvise64 221
__SYSCALL(__NR_fadvise64, sys_fadvise64)
-
-#define __NR_syscall_max __NR_fadvise64
+#define __NR_timer_create 222
+__SYSCALL(__NR_timer_create, sys_timer_create)
+#define __NR_timer_settime 223
+__SYSCALL(__NR_timer_settime, sys_timer_settime)
+#define __NR_timer_gettime 224
+__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
+#define __NR_timer_getoverrun 225
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_delete 226
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 227
+__SYSCALL(__NR_clock_settime, sys_clock_settime)
+#define __NR_clock_gettime 228
+__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
+#define __NR_clock_getres 229
+__SYSCALL(__NR_clock_getres, sys_clock_getres)
+#define __NR_clock_nanosleep 230
+__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
+
+#define __NR_syscall_max __NR_clock_nanosleep
#ifndef __NO_STUBS
/* user-visible error numbers are in the range -1 - -4095 */