From: Andy Lutomirski Date: Sun, 28 May 2017 17:00:14 +0000 (-0700) Subject: x86/mm: Remove the UP asm/tlbflush.h code, always use the (formerly) SMP code X-Git-Tag: v3.2.98~31 X-Git-Url: http://git.hungrycats.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ebcd6aa6fb360b3e8eff5e1668f16ecffe7a327c;p=linux x86/mm: Remove the UP asm/tlbflush.h code, always use the (formerly) SMP code commit ce4a4e565f5264909a18c733b864c3f74467f69e upstream. The UP asm/tlbflush.h generates somewhat nicer code than the SMP version. Aside from that, it's fallen quite a bit behind the SMP code: - flush_tlb_mm_range() didn't flush individual pages if the range was small. - The lazy TLB code was much weaker. This usually wouldn't matter, but, if a kernel thread flushed its lazy "active_mm" more than once (due to reclaim or similar), it wouldn't be unlazied and would instead pointlessly flush repeatedly. - Tracepoints were missing. Aside from that, simply having the UP code around was a maintanence burden, since it means that any change to the TLB flush code had to make sure not to break it. Simplify everything by deleting the UP code. Signed-off-by: Andy Lutomirski Cc: Andrew Morton Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dave Hansen Cc: Linus Torvalds Cc: Mel Gorman Cc: Michal Hocko Cc: Nadav Amit Cc: Nadav Amit Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar [Hugh Dickins: Backported to 3.2] Signed-off-by: Hugh Dickins [bwh: Fix allnoconfig build failure due to direct use of 'apic' in flush_tlb_others_ipi()] Signed-off-by: Ben Hutchings --- diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 55e4de613f0e..33c8d1dea7a6 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -18,8 +18,8 @@ typedef struct { #ifdef CONFIG_SMP unsigned int irq_resched_count; unsigned int irq_call_count; - unsigned int irq_tlb_count; #endif + unsigned int irq_tlb_count; #ifdef CONFIG_X86_THERMAL_VECTOR unsigned int irq_thermal_count; #endif diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 926f67263287..b19dbcf5bdd6 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -20,12 +20,6 @@ typedef struct { void *vdso; } mm_context_t; -#ifdef CONFIG_SMP void leave_mm(int cpu); -#else -static inline void leave_mm(int cpu) -{ -} -#endif #endif /* _ASM_X86_MMU_H */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index d21af0363abe..6ab95775921d 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -69,10 +69,8 @@ void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { -#ifdef CONFIG_SMP if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); -#endif } extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 190b1cf3a7ce..4a5cd20bd79f 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -6,6 +6,7 @@ #include #include +#include static inline void __invpcid(unsigned long pcid, unsigned long addr, unsigned long type) @@ -145,52 +146,8 @@ static inline void __flush_tlb_one(unsigned long addr) * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. - * - * x86-64 can only flush individual pages or full VMs. For a range flush - * we always do the full VM. Might be worth trying if for a small - * range a few INVLPGs in a row are a win. */ -#ifndef CONFIG_SMP - -#define flush_tlb() __flush_tlb() -#define flush_tlb_all() __flush_tlb_all() -#define local_flush_tlb() __flush_tlb() - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - if (mm == current->active_mm) - __flush_tlb(); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - if (vma->vm_mm == current->active_mm) - __flush_tlb_one(addr); -} - -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - if (vma->vm_mm == current->active_mm) - __flush_tlb(); -} - -static inline void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, - unsigned long va) -{ -} - -static inline void reset_lazy_tlbstate(void) -{ -} - -#else /* SMP */ - -#include - #define local_flush_tlb() __flush_tlb() extern void flush_tlb_all(void); @@ -224,8 +181,6 @@ static inline void reset_lazy_tlbstate(void) percpu_write(cpu_tlbstate.active_mm, &init_mm); } -#endif /* SMP */ - #ifndef CONFIG_PARAVIRT #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va) #endif diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 847ef7c11099..4f5ca8f04c0d 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -17,7 +17,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0, }; /* - * Smarter SMP flushing macros. + * TLB flushing, formerly SMP-only * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about @@ -38,8 +38,6 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) * fixed, at the cost of triggering multiple IPIs in some cases. */ -#ifdef CONFIG_SMP - union smp_flush_state { struct { struct mm_struct *flush_mm; @@ -71,8 +69,6 @@ void leave_mm(int cpu) } EXPORT_SYMBOL_GPL(leave_mm); -#endif /* CONFIG_SMP */ - void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -89,10 +85,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, unsigned cpu = smp_processor_id(); if (likely(prev != next)) { -#ifdef CONFIG_SMP percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.active_mm, next); -#endif cpumask_set_cpu(cpu, mm_cpumask(next)); /* @@ -133,9 +127,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, */ if (unlikely(prev->context.ldt != next->context.ldt)) load_mm_ldt(next); - } -#ifdef CONFIG_SMP - else { + } else { percpu_write(cpu_tlbstate.state, TLBSTATE_OK); BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); @@ -151,11 +143,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, load_mm_ldt(next); } } -#endif } -#ifdef CONFIG_SMP - /* * * The flush IPI assumes that a thread switch happens in this order: @@ -259,6 +248,7 @@ out: static void flush_tlb_others_ipi(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va) { +#ifdef CONFIG_SMP unsigned int sender; union smp_flush_state *f; @@ -287,6 +277,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, f->flush_va = 0; if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) raw_spin_unlock(&f->tlbstate_lock); +#endif } void native_flush_tlb_others(const struct cpumask *cpumask, @@ -437,5 +428,3 @@ void flush_tlb_all(void) { on_each_cpu(do_flush_tlb_all, NULL, 1); } - -#endif /* CONFIG_SMP */