]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
x86/mm: Pass flush_tlb_info to flush_tlb_others() etc
authorAndy Lutomirski <luto@kernel.org>
Sun, 28 May 2017 17:00:10 +0000 (10:00 -0700)
committerIngo Molnar <mingo@kernel.org>
Mon, 5 Jun 2017 07:59:35 +0000 (09:59 +0200)
Rather than passing all the contents of flush_tlb_info to
flush_tlb_others(), pass a pointer to the structure directly. For
consistency, this also removes the unnecessary cpu parameter from
uv_flush_tlb_others() to make its signature match the other
*flush_tlb_others() functions.

This serves two purposes:

 - It will dramatically simplify future patches that change struct
   flush_tlb_info, which I'm planning to do.

 - struct flush_tlb_info is an adequate description of what to do
   for a local flush, too, so by reusing it we can remove duplicated
   code between local and remove flushes in a future patch.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
[ Fix build warning. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uv/uv.h
arch/x86/mm/tlb.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/xen/mmu_pv.c

index 55fa56fe4e4592b8e21ac6fc37a73d240292b346..9a15739d9f4b9bdf2b4d31c88ebb61ca2b19a430 100644 (file)
@@ -312,11 +312,9 @@ static inline void __flush_tlb_single(unsigned long addr)
 }
 
 static inline void flush_tlb_others(const struct cpumask *cpumask,
-                                   struct mm_struct *mm,
-                                   unsigned long start,
-                                   unsigned long end)
+                                   const struct flush_tlb_info *info)
 {
-       PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
+       PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
 }
 
 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
index 7465d6fe336f5c35d1398f7582f669de15115807..cb976bab62996332f8b808ebaa18315d35005679 100644 (file)
@@ -51,6 +51,7 @@ struct mm_struct;
 struct desc_struct;
 struct task_struct;
 struct cpumask;
+struct flush_tlb_info;
 
 /*
  * Wrapper type for pointers to code which uses the non-standard
@@ -223,9 +224,7 @@ struct pv_mmu_ops {
        void (*flush_tlb_kernel)(void);
        void (*flush_tlb_single)(unsigned long addr);
        void (*flush_tlb_others)(const struct cpumask *cpus,
-                                struct mm_struct *mm,
-                                unsigned long start,
-                                unsigned long end);
+                                const struct flush_tlb_info *info);
 
        /* Hooks for allocating and freeing a pagetable top-level */
        int  (*pgd_alloc)(struct mm_struct *mm);
index 8f6e2f87511b64a1c12d635e5ebe56770d321e0d..6f439ac92026a6fb60e7c7052d4eee7e7aacb14f 100644 (file)
@@ -220,12 +220,18 @@ static inline void __flush_tlb_one(unsigned long addr)
  *  - flush_tlb_page(vma, vmaddr) flushes one page
  *  - flush_tlb_range(vma, start, end) flushes a range of pages
  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
+ *  - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
  *
  * ..but the i386 has somewhat limited tlb flushing capabilities,
  * and page-granular flushes are available only on i486 and up.
  */
 
+struct flush_tlb_info {
+       struct mm_struct *mm;
+       unsigned long start;
+       unsigned long end;
+};
+
 #ifndef CONFIG_SMP
 
 /* "_up" is for UniProcessor.
@@ -279,9 +285,7 @@ static inline void flush_tlb_mm_range(struct mm_struct *mm,
 }
 
 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
-                                          struct mm_struct *mm,
-                                          unsigned long start,
-                                          unsigned long end)
+                                          const struct flush_tlb_info *info)
 {
 }
 
@@ -317,8 +321,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
-                               struct mm_struct *mm,
-                               unsigned long start, unsigned long end);
+                            const struct flush_tlb_info *info);
 
 #define TLBSTATE_OK    1
 #define TLBSTATE_LAZY  2
@@ -340,8 +343,8 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 #endif /* SMP */
 
 #ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, start, end) \
-       native_flush_tlb_others(mask, mm, start, end)
+#define flush_tlb_others(mask, info)   \
+       native_flush_tlb_others(mask, info)
 #endif
 
 #endif /* _ASM_X86_TLBFLUSH_H */
index 6686820feae9e64dfd9145d06d24ebdf104343a7..b5a32231abd89a5925897a19d7f753a0c9ac4d60 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_UV_UV_H
 #define _ASM_X86_UV_UV_H
 
+#include <asm/tlbflush.h>
+
 enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
 
 struct cpumask;
@@ -15,10 +17,7 @@ extern void uv_cpu_init(void);
 extern void uv_nmi_init(void);
 extern void uv_system_init(void);
 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
-                                                struct mm_struct *mm,
-                                                unsigned long start,
-                                                unsigned long end,
-                                                unsigned int cpu);
+                                                const struct flush_tlb_info *info);
 
 #else  /* X86_UV */
 
@@ -28,8 +27,8 @@ static inline int is_uv_hubless(void) { return 0; }
 static inline void uv_cpu_init(void)   { }
 static inline void uv_system_init(void)        { }
 static inline const struct cpumask *
-uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
-                   unsigned long start, unsigned long end, unsigned int cpu)
+uv_flush_tlb_others(const struct cpumask *cpumask,
+                   const struct flush_tlb_info *info)
 { return cpumask; }
 
 #endif /* X86_UV */
index 743e4c6b4529b5d60af4cb83a4d462ace057fdbf..776469cc54e0b41bda6d9bd6f767f013da77fa2f 100644 (file)
 
 #ifdef CONFIG_SMP
 
-struct flush_tlb_info {
-       struct mm_struct *flush_mm;
-       unsigned long flush_start;
-       unsigned long flush_end;
-};
-
 /*
  * We cannot call mmdrop() because we are in interrupt context,
  * instead update mm->cpu_vm_mask.
@@ -229,11 +223,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  */
 static void flush_tlb_func(void *info)
 {
-       struct flush_tlb_info *f = info;
+       const struct flush_tlb_info *f = info;
 
        inc_irq_stat(irq_tlb_count);
 
-       if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+       if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm))
                return;
 
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@@ -243,15 +237,15 @@ static void flush_tlb_func(void *info)
                return;
        }
 
-       if (f->flush_end == TLB_FLUSH_ALL) {
+       if (f->end == TLB_FLUSH_ALL) {
                local_flush_tlb();
                trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
        } else {
                unsigned long addr;
                unsigned long nr_pages =
-                       (f->flush_end - f->flush_start) / PAGE_SIZE;
-               addr = f->flush_start;
-               while (addr < f->flush_end) {
+                       (f->end - f->start) / PAGE_SIZE;
+               addr = f->start;
+               while (addr < f->end) {
                        __flush_tlb_single(addr);
                        addr += PAGE_SIZE;
                }
@@ -260,33 +254,27 @@ static void flush_tlb_func(void *info)
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
-                                struct mm_struct *mm, unsigned long start,
-                                unsigned long end)
+                            const struct flush_tlb_info *info)
 {
-       struct flush_tlb_info info;
-
-       info.flush_mm = mm;
-       info.flush_start = start;
-       info.flush_end = end;
-
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
-       if (end == TLB_FLUSH_ALL)
+       if (info->end == TLB_FLUSH_ALL)
                trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
        else
                trace_tlb_flush(TLB_REMOTE_SEND_IPI,
-                               (end - start) >> PAGE_SHIFT);
+                               (info->end - info->start) >> PAGE_SHIFT);
 
        if (is_uv_system()) {
                unsigned int cpu;
 
                cpu = smp_processor_id();
-               cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
+               cpumask = uv_flush_tlb_others(cpumask, info);
                if (cpumask)
                        smp_call_function_many(cpumask, flush_tlb_func,
-                                                               &info, 1);
+                                              (void *)info, 1);
                return;
        }
-       smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
+       smp_call_function_many(cpumask, flush_tlb_func,
+                              (void *)info, 1);
 }
 
 /*
@@ -305,6 +293,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                                unsigned long end, unsigned long vmflag)
 {
        unsigned long addr;
+       struct flush_tlb_info info;
        /* do a global flush by default */
        unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
 
@@ -347,15 +336,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
        }
        trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
 out:
+       info.mm = mm;
        if (base_pages_to_flush == TLB_FLUSH_ALL) {
-               start = 0UL;
-               end = TLB_FLUSH_ALL;
+               info.start = 0UL;
+               info.end = TLB_FLUSH_ALL;
+       } else {
+               info.start = start;
+               info.end = end;
        }
        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-               flush_tlb_others(mm_cpumask(mm), mm, start, end);
+               flush_tlb_others(mm_cpumask(mm), &info);
        preempt_enable();
 }
 
+
 static void do_flush_tlb_all(void *info)
 {
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@@ -376,7 +370,7 @@ static void do_kernel_range_flush(void *info)
        unsigned long addr;
 
        /* flush range by one by one 'invlpg' */
-       for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
+       for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
                __flush_tlb_single(addr);
 }
 
@@ -389,14 +383,20 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
                on_each_cpu(do_flush_tlb_all, NULL, 1);
        } else {
                struct flush_tlb_info info;
-               info.flush_start = start;
-               info.flush_end = end;
+               info.start = start;
+               info.end = end;
                on_each_cpu(do_kernel_range_flush, &info, 1);
        }
 }
 
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 {
+       struct flush_tlb_info info = {
+               .mm = NULL,
+               .start = 0UL,
+               .end = TLB_FLUSH_ALL,
+       };
+
        int cpu = get_cpu();
 
        if (cpumask_test_cpu(cpu, &batch->cpumask)) {
@@ -406,7 +406,7 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
        }
 
        if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
-               flush_tlb_others(&batch->cpumask, NULL, 0, TLB_FLUSH_ALL);
+               flush_tlb_others(&batch->cpumask, &info);
        cpumask_clear(&batch->cpumask);
 
        put_cpu();
index 42e65fee5673e26a273fff2c12b712913f7d727a..5a22d77ffc2ac9d151455c7d31223f7605fea834 100644 (file)
@@ -1121,11 +1121,9 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  * done.  The returned pointer is valid till preemption is re-enabled.
  */
 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
-                                               struct mm_struct *mm,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               unsigned int cpu)
+                                         const struct flush_tlb_info *info)
 {
+       unsigned int cpu = smp_processor_id();
        int locals = 0, remotes = 0, hubs = 0;
        struct bau_desc *bau_desc;
        struct cpumask *flush_mask;
@@ -1179,8 +1177,8 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 
        record_send_statistics(stat, locals, hubs, remotes, bau_desc);
 
-       if (!end || (end - start) <= PAGE_SIZE)
-               address = start;
+       if (!info->end || (info->end - info->start) <= PAGE_SIZE)
+               address = info->start;
        else
                address = TLB_FLUSH_ALL;
 
index 1f386d7fdf708489bf09b40c04ed338281c3e3c2..4b926c6b813c9a9a08c2e5ac5084a3e2da961c2c 100644 (file)
@@ -1366,8 +1366,7 @@ static void xen_flush_tlb_single(unsigned long addr)
 }
 
 static void xen_flush_tlb_others(const struct cpumask *cpus,
-                                struct mm_struct *mm, unsigned long start,
-                                unsigned long end)
+                                const struct flush_tlb_info *info)
 {
        struct {
                struct mmuext_op op;
@@ -1379,7 +1378,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
        } *args;
        struct multicall_space mcs;
 
-       trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
+       trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
 
        if (cpumask_empty(cpus))
                return;         /* nothing to do */
@@ -1393,9 +1392,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
        cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
 
        args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-       if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
+       if (info->end != TLB_FLUSH_ALL &&
+           (info->end - info->start) <= PAGE_SIZE) {
                args->op.cmd = MMUEXT_INVLPG_MULTI;
-               args->op.arg1.linear_addr = start;
+               args->op.arg1.linear_addr = info->start;
        }
 
        MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);