]> git.hungrycats.org Git - linux/commitdiff
x86/mm/kaiser: re-enable vsyscalls
authorAndrea Arcangeli <aarcange@redhat.com>
Tue, 5 Dec 2017 20:15:07 +0000 (21:15 +0100)
committerBen Hutchings <ben@decadent.org.uk>
Sun, 7 Jan 2018 01:46:49 +0000 (01:46 +0000)
To avoid breaking the kernel ABI.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
[Hugh Dickins: Backported to 3.2:
 - Leave out the PVCLOCK_FIXMAP user mapping, which does not apply to
   this tree
 - For safety added vsyscall_pgprot, and a BUG_ON if _PAGE_USER
   outside of FIXMAP.]
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
arch/x86/include/asm/vsyscall.h
arch/x86/kernel/hpet.c
arch/x86/kernel/vsyscall_64.c
arch/x86/mm/kaiser.c

index eaea1d31f753092cd19ff6cf126f6c7c0f7cedb7..143e98b28081b746ef473dae08f72d5932568ff8 100644 (file)
@@ -22,6 +22,7 @@ enum vsyscall_num {
 /* kernel space (writeable) */
 extern int vgetcpu_mode;
 extern struct timezone sys_tz;
+extern unsigned long vsyscall_pgprot;
 
 #include <asm/vvar.h>
 
index 4970ef070f2f5fac9d030be7b53b9524986c8a69..02fd03bf15dd1cfd928291e5449405664506064e 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/cpu.h>
 #include <linux/pm.h>
 #include <linux/io.h>
+#include <linux/kaiser.h>
 
 #include <asm/fixmap.h>
 #include <asm/hpet.h>
@@ -74,6 +75,8 @@ static inline void hpet_set_mapping(void)
        hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
 #ifdef CONFIG_X86_64
        __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
+       kaiser_add_mapping(__fix_to_virt(VSYSCALL_HPET), PAGE_SIZE,
+                          __PAGE_KERNEL_VVAR_NOCACHE);
 #endif
 }
 
index e4d4a22e8b9430661d28aaa9650fc2f79af6c44b..3178f308609a6987576815920d01c237490a9fb7 100644 (file)
@@ -58,6 +58,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
 };
 
 static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
+unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
 
 static int __init vsyscall_setup(char *str)
 {
@@ -274,10 +275,10 @@ void __init map_vsyscall(void)
        extern char __vvar_page;
        unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
 
+       if (vsyscall_mode != NATIVE)
+               vsyscall_pgprot = __PAGE_KERNEL_VVAR;
        __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
-                    vsyscall_mode == NATIVE
-                    ? PAGE_KERNEL_VSYSCALL
-                    : PAGE_KERNEL_VVAR);
+                    __pgprot(vsyscall_pgprot));
        BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
                     (unsigned long)VSYSCALL_START);
 
index 79b0222ffa746af1782d24375c89ff7a537d010d..ab1dfa6075466cf2b4771df7c4cb7b9a9b3c0049 100644 (file)
@@ -16,6 +16,7 @@ extern struct mm_struct init_mm;
 
 #include <asm/kaiser.h>
 #include <asm/tlbflush.h>      /* to verify its kaiser declarations */
+#include <asm/vsyscall.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/desc.h>
@@ -133,7 +134,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
                        return NULL;
                spin_lock(&shadow_table_allocation_lock);
                if (pud_none(*pud)) {
-                       set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
+                       set_pud(pud, __pud(_PAGE_TABLE | __pa(new_pmd_page)));
                        __inc_zone_page_state(virt_to_page((void *)
                                                new_pmd_page), NR_KAISERTABLE);
                } else
@@ -153,7 +154,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
                        return NULL;
                spin_lock(&shadow_table_allocation_lock);
                if (pmd_none(*pmd)) {
-                       set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
+                       set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(new_pte_page)));
                        __inc_zone_page_state(virt_to_page((void *)
                                                new_pte_page), NR_KAISERTABLE);
                } else
@@ -174,6 +175,9 @@ int kaiser_add_user_map(const void *__start_addr, unsigned long size,
        unsigned long end_addr = PAGE_ALIGN(start_addr + size);
        unsigned long target_address;
 
+       if (flags & _PAGE_USER)
+               BUG_ON(address < FIXADDR_START || end_addr >= FIXADDR_TOP);
+
        for (; address < end_addr; address += PAGE_SIZE) {
                target_address = get_pa_from_mapping(address);
                if (target_address == -1) {
@@ -227,7 +231,7 @@ static void __init kaiser_init_all_pgds(void)
                        break;
                }
                inc_zone_page_state(virt_to_page(pud), NR_KAISERTABLE);
-               new_pgd = __pgd(_KERNPG_TABLE |__pa(pud));
+               new_pgd = __pgd(_PAGE_TABLE |__pa(pud));
                /*
                 * Make sure not to stomp on some other pgd entry.
                 */
@@ -285,6 +289,10 @@ void __init kaiser_init(void)
        kaiser_add_user_map_early((void *)idt_descr.address,
                                  sizeof(gate_desc) * NR_VECTORS,
                                  __PAGE_KERNEL_RO);
+       kaiser_add_user_map_early((void *)VVAR_ADDRESS, PAGE_SIZE,
+                                 __PAGE_KERNEL_VVAR);
+       kaiser_add_user_map_early((void *)VSYSCALL_START, PAGE_SIZE,
+                                 vsyscall_pgprot);
        kaiser_add_user_map_early(&x86_cr3_pcid_noflush,
                                  sizeof(x86_cr3_pcid_noflush),
                                  __PAGE_KERNEL);