define_int CONFIG_X86_L1_CACHE_SHIFT 6
define_bool CONFIG_X86_TSC y
define_bool CONFIG_X86_GOOD_APIC y
-define_bool CONFIG_X86_CMPXCHG
+define_bool CONFIG_X86_CMPXCHG y
tristate '/dev/cpu/*/msr - Model-specific register support' CONFIG_X86_MSR
tristate '/dev/cpu/*/cpuid - CPU information support' CONFIG_X86_CPUID
if [ "$CONFIG_HOTPLUG" = "y" ] ; then
source drivers/pcmcia/Config.in
+ source drivers/hotplug/Config.in
else
define_bool CONFIG_PCMCIA n
fi
define_bool CONFIG_KCORE_ELF y
fi
# We probably are not going to support a.out, are we? Or should we support a.out in i386 compatibility mode?
-#tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
-tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
+ #tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
+ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
bool 'Power Management support' CONFIG_PM
CONFIG_X86_L1_CACHE_SHIFT=6
CONFIG_X86_TSC=y
CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_CMPXCHG=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
# CONFIG_MATH_EMULATION is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_PM=y
CONFIG_IA32_EMULATION=y
-CONFIG_ACPI=y
-CONFIG_ACPI_DEBUG=y
-CONFIG_ACPI_BUSMGR=y
-CONFIG_ACPI_SYS=y
-CONFIG_ACPI_CPU=y
-CONFIG_ACPI_BUTTON=y
-CONFIG_ACPI_AC=y
-CONFIG_ACPI_EC=y
-CONFIG_ACPI_CMBATT=y
-CONFIG_ACPI_THERMAL=y
+# CONFIG_ACPI is not set
#
# Memory Technology Devices (MTD)
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_INITRD is not set
#
# Multi-device support (RAID and LVM)
# CONFIG_UDF_RW is not set
# CONFIG_UFS_FS is not set
# CONFIG_UFS_FS_WRITE is not set
-CONFIG_SIMICSFS=y
#
# Network File Systems
#include <asm/ptrace.h>
#include <asm/processor.h>
+struct file;
+struct elf_phdr;
+
#define IA32_EMULATOR 1
#define IA32_PAGE_OFFSET 0xE0000000
__asm__("movl %0,%%fs": :"r" (0)); \
__asm__("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); \
wrmsrl(MSR_KERNEL_GS_BASE, 0); \
- set_thread_flag(TIF_IA32); \
(regs)->rip = (new_rip); \
(regs)->rsp = (new_rsp); \
(regs)->eflags = 0x200; \
} while(0)
+#define elf_map elf32_map
+
MODULE_DESCRIPTION("Binary format loader for compatibility with IA32 ELF binaries.");
MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
static void elf32_init(struct pt_regs *regs)
{
+ struct task_struct *me = current;
regs->rdi = 0;
regs->rsi = 0;
regs->rdx = 0;
regs->rax = 0;
regs->rbx = 0;
regs->rbp = 0;
- current->thread.fs = 0; current->thread.gs = 0;
- current->thread.fsindex = 0; current->thread.gsindex = 0;
- current->thread.ds = __USER_DS; current->thread.es == __USER_DS;
+ me->thread.fs = 0;
+ me->thread.gs = 0;
+ me->thread.fsindex = 0;
+ me->thread.gsindex = 0;
+ me->thread.ds = __USER_DS;
+ me->thread.es = __USER_DS;
+ set_thread_flag(TIF_IA32);
}
extern void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address);
return 0;
}
+static unsigned long
+elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
+{
+ unsigned long map_addr;
+ struct task_struct *me = current;
+
+ down_write(&me->mm->mmap_sem);
+ map_addr = do_mmap(filep, ELF_PAGESTART(addr),
+ eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type|MAP_32BIT,
+ eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
+ up_write(&me->mm->mmap_sem);
+ return(map_addr);
+}
COMPATIBLE_IOCTL(BLKROGET)
COMPATIBLE_IOCTL(BLKRRPART)
COMPATIBLE_IOCTL(BLKFLSBUF)
-COMPATIBLE_IOCTL(BLKRASET)
-COMPATIBLE_IOCTL(BLKFRASET)
COMPATIBLE_IOCTL(BLKSECTSET)
COMPATIBLE_IOCTL(BLKSSZGET)
HANDLE_IOCTL(SIOCRTMSG, ret_einval)
HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo)
-HANDLE_IOCTL(BLKRAGET, w_long)
HANDLE_IOCTL(BLKGETSIZE, w_long)
HANDLE_IOCTL(0x1260, broken_blkgetsize)
-HANDLE_IOCTL(BLKFRAGET, w_long)
HANDLE_IOCTL(BLKSECTGET, w_long)
HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
HANDLE_IOCTL(FBIOGETCMAP, fb_ioctl_trans)
spin_lock_irq(¤t->sigmask_lock);
saveset = current->blocked;
siginitset(¤t->blocked, mask);
- recalc_sigpending(current);
+ recalc_sigpending();
spin_unlock_irq(¤t->sigmask_lock);
regs.rax = -EINTR;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sigmask_lock);
current->blocked = set;
- recalc_sigpending(current);
+ recalc_sigpending();
spin_unlock_irq(¤t->sigmask_lock);
if (restore_sigcontext(®s, &frame->sc, &eax))
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sigmask_lock);
current->blocked = set;
- recalc_sigpending(current);
+ recalc_sigpending();
spin_unlock_irq(¤t->sigmask_lock);
if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &eax))
while (!need_resched())
idle();
schedule();
- check_pgt_cache();
}
}
current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0);
+ preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
+ preempt_enable();
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
spin_lock_irq(¤t->sigmask_lock);
saveset = current->blocked;
current->blocked = newset;
- recalc_sigpending(current);
+ recalc_sigpending();
spin_unlock_irq(¤t->sigmask_lock);
#if DEBUG_SIG
printk("rt_sigsuspend savset(%lx) newset(%lx) regs(%p) rip(%lx)\n",
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sigmask_lock);
current->blocked = set;
- recalc_sigpending(current);
+ recalc_sigpending();
spin_unlock_irq(¤t->sigmask_lock);
if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &eax))
spin_lock_irq(¤t->sigmask_lock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
sigaddset(¤t->blocked,sig);
- recalc_sigpending(current);
+ recalc_sigpending();
spin_unlock_irq(¤t->sigmask_lock);
}
}
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
/* Let the debugger run. */
current->exit_code = signr;
+ preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
+ preempt_enable();
/* We're back. Did the debugger cancel the sig? */
if (!(signr = current->exit_code))
case SIGSTOP: {
struct signal_struct *sig;
+ preempt_disable();
current->state = TASK_STOPPED;
current->exit_code = signr;
sig = current->p_pptr->sig;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
+ preempt_enable();
continue;
}
extern spinlock_t i8259A_lock;
+
+static inline unsigned long do_fast_gettimeoffset(void)
+{
+ register unsigned long eax, edx;
+
+ /* Read the Time Stamp Counter */
+
+ rdtsc(eax,edx);
+
+ /* .. relative to previous jiffy (32 bits is enough) */
+ eax -= last_tsc_low; /* tsc_low delta */
+
+ /*
+ * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
+ * = (tsc_low delta) * (usecs_per_clock)
+ * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
+ *
+ * Using a mull instead of a divl saves up to 31 clock cycles
+ * in the critical path.
+ */
+
+ edx = (eax*fast_gettimeoffset_quotient) >> 32;
+
+ /* our adjusted time offset in microseconds */
+ return delay_at_last_interrupt + edx;
+}
+
+/*
+ * This version of gettimeofday has microsecond resolution
+ * and better than microsecond precision on fast x86 machines with TSC.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ unsigned long usec, sec;
+
+ read_lock_irqsave(&xtime_lock, flags);
+ usec = do_gettimeoffset();
+ {
+ unsigned long lost = jiffies - wall_jiffies;
+ if (lost)
+ usec += lost * (1000000 / HZ);
+ }
+ sec = xtime.tv_sec;
+ usec += xtime.tv_usec;
+ read_unlock_irqrestore(&xtime_lock, flags);
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
void do_settimeofday(struct timeval *tv)
{
write_lock_irq(&xtime_lock);
* clock/second. Our precision is about 100 ppm.
*/
{
- cpu_khz = ((1000000*(1UL<<32)) / tsc_quotient); /* FIXME: is it right? */
+ cpu_khz = ((1000*(1UL<<32)) / tsc_quotient);
printk("Detected %ld Hz processor.\n", cpu_khz);
}
}
long __vxtime_sequence[2] __section_vxtime_sequence;
-/* The rest of the kernel knows it as this. */
-extern void do_gettimeofday(struct timeval *tv) __attribute__((alias("do_vgettimeofday")));
-
inline void do_vgettimeofday(struct timeval * tv)
{
long sequence;
EXPORT_SYMBOL(strtok);
EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(simple_strtol);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strncpy_from_user);
mm = tsk->mm;
info.si_code = SEGV_MAPERR;
- if (address >= TASK_SIZE)
+ if (address >= TASK_SIZE && !(error_code & 5))
goto vmalloc_fault;
/*
- * linux/arch/i386/mm/init.c
+ * linux/arch/x86_64/mm/init.c
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2002 Andi Kleen <ak@suse.de>
*/
#include <linux/config.h>
static unsigned long totalram_pages;
-int do_check_pgt_cache(int low, int high)
-{
- int freed = 0;
- if(read_pda(pgtable_cache_sz) > high) {
- do {
- if (read_pda(pgd_quick)) {
- pgd_free_slow(pgd_alloc_one_fast());
- freed++;
- }
- if (read_pda(pmd_quick)) {
- pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
- freed++;
- }
- if (read_pda(pte_quick)) {
- pte_free_slow(pte_alloc_one_fast(NULL, 0));
- freed++;
- }
- } while(read_pda(pgtable_cache_sz) > low);
- }
- return freed;
-}
-
/*
* NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
* physical space so we can cache the place of the first one and move
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached);
- printk("%ld pages in page table cache\n",read_pda(pgtable_cache_sz));
show_buffers();
}
if (pmd_none(*pmd)) {
pte = (pte_t *) spp_getpage();
set_pmd(pmd, __pmd(__pa(pte) + 0x7));
- if (pte != pte_offset(pmd, 0)) {
+ if (pte != pte_offset_kernel(pmd, 0)) {
printk("PAGETABLE BUG #02!\n");
return;
}
}
- pte = pte_offset(pmd, vaddr);
+ pte = pte_offset_kernel(pmd, vaddr);
if (pte_val(*pte))
pte_ERROR(*pte);
set_pte(pte, mk_pte_phys(phys, prot));
if (address >= end)
BUG();
do {
- pte_t * pte = pte_alloc(&init_mm, pmd, address);
+ pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
ENTRY(irqrsp);
ENTRY(irqcount);
ENTRY(irqstack);
- ENTRY(pgd_quick);
- ENTRY(pmd_quick);
- ENTRY(pte_quick);
- ENTRY(pgtable_cache_sz);
ENTRY(cpunumber);
ENTRY(irqstackptr);
ENTRY(me);
#ifdef __KERNEL__
+static inline int sched_find_first_bit(unsigned long *b)
+{
+ if (b[0])
+ return __ffs(b[0]);
+ if (b[1])
+ return __ffs(b[1]) + 64;
+ if (b[2])
+ return __ffs(b[2]) + 128;
+}
+
/**
* ffs - find first bit set
* @x: the word to search
#include <asm/atomic.h>
#include <asm/pgalloc.h>
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 168-bit bitmap where the first 128 bits are
- * unlikely to be set. It's guaranteed that at least one of the 168
- * bits is cleared.
- */
-#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
-# error update this function.
-#endif
-
-static inline int __sched_find_first_bit(unsigned long *b)
-{
- if (b[0])
- return __ffs(b[0]);
- if (b[1])
- return __ffs(b[1]) + 64;
- if (b[2])
- return __ffs(b[2]) + 128;
-}
-
-static inline int sched_find_first_bit(unsigned long *b)
-{
- int n = __sched_find_first_bit(b);
- BUG_ON((unsigned)n > 167);
- return n;
-}
-
/*
* possibly do the LDT unload here?
*/
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
struct task_struct *pcurrent; /* Current process */
int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */
- /* XXX: could be a single list */
- unsigned long *pgd_quick;
- unsigned long *pmd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
char *irqstackptr;
unsigned int __softirq_pending;
unsigned int __local_irq_count;
#include <linux/threads.h>
#include <linux/mm.h>
-#define inc_pgcache_size() add_pda(pgtable_cache_sz,1UL)
-#define dec_pgcache_size() sub_pda(pgtable_cache_sz,1UL)
-
-#define pmd_populate(mm, pmd, pte) \
+#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
#define pgd_populate(mm, pgd, pmd) \
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pmd)))
-extern __inline__ pmd_t *get_pmd_slow(void)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
- pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL);
-
- if (ret)
- memset(ret, 0, PAGE_SIZE);
- return ret;
+ set_pmd(pmd, __pmd(_PAGE_TABLE |
+ ((u64)(pte - mem_map) << PAGE_SHIFT)));
}
-extern __inline__ pmd_t *get_pmd_fast(void)
+extern __inline__ pmd_t *get_pmd(void)
{
- unsigned long *ret;
-
- preempt_disable();
- ret = read_pda(pmd_quick);
- if (ret) {
- write_pda(pmd_quick, (unsigned long *)(*ret));
- ret[0] = 0;
- dec_pgcache_size();
- }
- preempt_enable();
- if (!ret)
- ret = (unsigned long *)get_pmd_slow();
- return (pmd_t *)ret;
+ return (pmd_t *)get_zeroed_page(GFP_KERNEL);
}
extern __inline__ void pmd_free(pmd_t *pmd)
-{
- preempt_disable();
- *(unsigned long *)pmd = (unsigned long) read_pda(pmd_quick);
- write_pda(pmd_quick,(unsigned long *) pmd);
- inc_pgcache_size();
- preempt_enable();
-}
-
-extern __inline__ void pmd_free_slow(pmd_t *pmd)
{
if ((unsigned long)pmd & (PAGE_SIZE-1))
BUG();
free_page((unsigned long)pmd);
}
-static inline pmd_t *pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
-{
- unsigned long *ret;
-
- preempt_disable();
- ret = (unsigned long *)read_pda(pmd_quick);
-
- if (__builtin_expect(ret != NULL, 1)) {
- write_pda(pmd_quick, (unsigned long *)(*ret));
- ret[0] = 0;
- dec_pgcache_size();
- }
- preempt_enable();
- return (pmd_t *)ret;
-}
-
static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
{
- pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
-
- if (__builtin_expect(pmd != NULL, 1))
- clear_page(pmd);
- return pmd;
-}
-
-
-static inline pgd_t *pgd_alloc_one_fast (void)
-{
- unsigned long *ret;
-
- preempt_disable();
- ret = read_pda(pgd_quick);
- if (likely(ret != NULL)) {
- write_pda(pgd_quick,(unsigned long *)(*ret));
- ret[0] = 0;
- dec_pgcache_size();
- }
- preempt_enable();
- return (pgd_t *) ret;
+ return (pmd_t *) get_zeroed_page(GFP_KERNEL);
}
static inline pgd_t *pgd_alloc (struct mm_struct *mm)
{
- /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
- pgd_t *pgd = pgd_alloc_one_fast();
-
- if (pgd == NULL) {
- pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
- if (__builtin_expect(pgd != NULL, 1))
- clear_page(pgd);
- }
- return pgd;
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
}
static inline void pgd_free (pgd_t *pgd)
-{
- preempt_disable();
- *(unsigned long *)pgd = (unsigned long) read_pda(pgd_quick);
- write_pda(pgd_quick,(unsigned long *) pgd);
- inc_pgcache_size();
- preempt_enable();
-}
-
-
-static inline void pgd_free_slow (pgd_t *pgd)
{
if ((unsigned long)pgd & (PAGE_SIZE-1))
BUG();
free_page((unsigned long)pgd);
}
-
-static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
- if (pte)
- clear_page(pte);
- return pte;
+ return (pte_t *) get_zeroed_page(GFP_KERNEL);
}
-extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- unsigned long *ret;
-
- preempt_disable();
- if(__builtin_expect((ret = read_pda(pte_quick)) != NULL, !0)) {
- write_pda(pte_quick, (unsigned long *)(*ret));
- ret[0] = ret[1];
- dec_pgcache_size();
- }
- preempt_enable();
- return (pte_t *)ret;
+ void *p = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!p)
+ return NULL;
+ return virt_to_page(p);
}
-/* Should really implement gc for free page table pages. This could be done with
- a reference count in struct page. */
+/* Should really implement gc for free page table pages. This could be
+ done with a reference count in struct page. */
-extern __inline__ void pte_free(pte_t *pte)
-{
- preempt_disable();
- *(unsigned long *)pte = (unsigned long) read_pda(pte_quick);
- write_pda(pte_quick, (unsigned long *) pte);
- inc_pgcache_size();
- preempt_enable();
-}
-
-extern __inline__ void pte_free_slow(pte_t *pte)
+extern __inline__ void pte_free_kernel(pte_t *pte)
{
if ((unsigned long)pte & (PAGE_SIZE-1))
BUG();
free_page((unsigned long)pte);
}
+extern inline void pte_free(struct page *pte)
+{
+ __free_page(pte);
+}
-extern int do_check_pgt_cache(int, int);
/*
* TLB flushing:
extern pmd_t level2_kernel_pgt[512];
extern void paging_init(void);
-/* Caches aren't brain-dead on the intel. */
+/* Caches aren't brain-dead. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
+#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define __flush_tlb() \
do { \
#define page_pte(page) page_pte_prot(page, __pgprot(0))
-#define pmd_page(pmd) \
+#define pmd_page_kernel(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) \
+ (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
/* to find an entry in a page-table-directory. */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
/* Find an entry in the third-level page table.. */
#define __pte_offset(address) \
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
__pte_offset(address))
+#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
+#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
+#define pte_unmap(pte) /* NOP */
+#define pte_unmap_nested(pte) /* NOP */
+
+
/* never use these in the common code */
#define level4_page(level4) ((unsigned long) __va(level4_val(level4) & PAGE_MASK))
#define level4_index(address) ((address >> LEVEL4_SHIFT) & (PTRS_PER_LEVEL4-1))
#define prepare_to_switch() do { } while(0)
-#define switch_to(prev,next,last) do { \
+#define switch_to(prev,next) do { \
asm volatile("pushq %%rbp\n\t" \
"pushq %%rbx\n\t" \
"pushq %%r8\n\t" \
"pushq %%r14\n\t" \
"pushq %%r15\n\t" \
"movq %%rsp,%0\n\t" /* save RSP */ \
- "movq %3,%%rsp\n\t" /* restore RSP */ \
+ "movq %2,%%rsp\n\t" /* restore RSP */ \
"leaq 1f(%%rip),%%rbp\n\t" \
"movq %%rbp,%1\n\t" /* save RIP */ \
- "pushq %4\n\t" /* setup new RIP */ \
+ "pushq %3\n\t" /* setup new RIP */ \
"jmp __switch_to\n\t" \
"1:\t" \
"popq %%r15\n\t" \
"popq %%r8\n\t" \
"popq %%rbx\n\t" \
"popq %%rbp\n\t" \
- :"=m" (prev->thread.rsp),"=m" (prev->thread.rip), \
- "=b" (last) \
+ :"=m" (prev->thread.rsp),"=m" (prev->thread.rip) \
:"m" (next->thread.rsp),"m" (next->thread.rip), \
"b" (prev), "S" (next), "D" (prev)); \
} while (0)