]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/memory.c
mm: refactor TLB gathering API
[karo-tx-linux.git] / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  *
38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39  */
40
41 #include <linux/kernel_stat.h>
42 #include <linux/mm.h>
43 #include <linux/sched/mm.h>
44 #include <linux/sched/coredump.h>
45 #include <linux/sched/numa_balancing.h>
46 #include <linux/sched/task.h>
47 #include <linux/hugetlb.h>
48 #include <linux/mman.h>
49 #include <linux/swap.h>
50 #include <linux/highmem.h>
51 #include <linux/pagemap.h>
52 #include <linux/ksm.h>
53 #include <linux/rmap.h>
54 #include <linux/export.h>
55 #include <linux/delayacct.h>
56 #include <linux/init.h>
57 #include <linux/pfn_t.h>
58 #include <linux/writeback.h>
59 #include <linux/memcontrol.h>
60 #include <linux/mmu_notifier.h>
61 #include <linux/kallsyms.h>
62 #include <linux/swapops.h>
63 #include <linux/elf.h>
64 #include <linux/gfp.h>
65 #include <linux/migrate.h>
66 #include <linux/string.h>
67 #include <linux/dma-debug.h>
68 #include <linux/debugfs.h>
69 #include <linux/userfaultfd_k.h>
70 #include <linux/dax.h>
71
72 #include <asm/io.h>
73 #include <asm/mmu_context.h>
74 #include <asm/pgalloc.h>
75 #include <linux/uaccess.h>
76 #include <asm/tlb.h>
77 #include <asm/tlbflush.h>
78 #include <asm/pgtable.h>
79
80 #include "internal.h"
81
82 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
83 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
84 #endif
85
86 #ifndef CONFIG_NEED_MULTIPLE_NODES
87 /* use the per-pgdat data instead for discontigmem - mbligh */
88 unsigned long max_mapnr;
89 EXPORT_SYMBOL(max_mapnr);
90
91 struct page *mem_map;
92 EXPORT_SYMBOL(mem_map);
93 #endif
94
95 /*
96  * A number of key systems in x86 including ioremap() rely on the assumption
97  * that high_memory defines the upper bound on direct map memory, then end
98  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
99  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
100  * and ZONE_HIGHMEM.
101  */
102 void *high_memory;
103 EXPORT_SYMBOL(high_memory);
104
105 /*
106  * Randomize the address space (stacks, mmaps, brk, etc.).
107  *
108  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
109  *   as ancient (libc5 based) binaries can segfault. )
110  */
111 int randomize_va_space __read_mostly =
112 #ifdef CONFIG_COMPAT_BRK
113                                         1;
114 #else
115                                         2;
116 #endif
117
118 static int __init disable_randmaps(char *s)
119 {
120         randomize_va_space = 0;
121         return 1;
122 }
123 __setup("norandmaps", disable_randmaps);
124
125 unsigned long zero_pfn __read_mostly;
126 EXPORT_SYMBOL(zero_pfn);
127
128 unsigned long highest_memmap_pfn __read_mostly;
129
130 /*
131  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
132  */
133 static int __init init_zero_pfn(void)
134 {
135         zero_pfn = page_to_pfn(ZERO_PAGE(0));
136         return 0;
137 }
138 core_initcall(init_zero_pfn);
139
140
141 #if defined(SPLIT_RSS_COUNTING)
142
143 void sync_mm_rss(struct mm_struct *mm)
144 {
145         int i;
146
147         for (i = 0; i < NR_MM_COUNTERS; i++) {
148                 if (current->rss_stat.count[i]) {
149                         add_mm_counter(mm, i, current->rss_stat.count[i]);
150                         current->rss_stat.count[i] = 0;
151                 }
152         }
153         current->rss_stat.events = 0;
154 }
155
156 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
157 {
158         struct task_struct *task = current;
159
160         if (likely(task->mm == mm))
161                 task->rss_stat.count[member] += val;
162         else
163                 add_mm_counter(mm, member, val);
164 }
165 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
166 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
167
168 /* sync counter once per 64 page faults */
169 #define TASK_RSS_EVENTS_THRESH  (64)
170 static void check_sync_rss_stat(struct task_struct *task)
171 {
172         if (unlikely(task != current))
173                 return;
174         if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
175                 sync_mm_rss(task->mm);
176 }
177 #else /* SPLIT_RSS_COUNTING */
178
179 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
180 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
181
182 static void check_sync_rss_stat(struct task_struct *task)
183 {
184 }
185
186 #endif /* SPLIT_RSS_COUNTING */
187
188 #ifdef HAVE_GENERIC_MMU_GATHER
189
190 static bool tlb_next_batch(struct mmu_gather *tlb)
191 {
192         struct mmu_gather_batch *batch;
193
194         batch = tlb->active;
195         if (batch->next) {
196                 tlb->active = batch->next;
197                 return true;
198         }
199
200         if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
201                 return false;
202
203         batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
204         if (!batch)
205                 return false;
206
207         tlb->batch_count++;
208         batch->next = NULL;
209         batch->nr   = 0;
210         batch->max  = MAX_GATHER_BATCH;
211
212         tlb->active->next = batch;
213         tlb->active = batch;
214
215         return true;
216 }
217
218 void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
219                                 unsigned long start, unsigned long end)
220 {
221         tlb->mm = mm;
222
223         /* Is it from 0 to ~0? */
224         tlb->fullmm     = !(start | (end+1));
225         tlb->need_flush_all = 0;
226         tlb->local.next = NULL;
227         tlb->local.nr   = 0;
228         tlb->local.max  = ARRAY_SIZE(tlb->__pages);
229         tlb->active     = &tlb->local;
230         tlb->batch_count = 0;
231
232 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
233         tlb->batch = NULL;
234 #endif
235         tlb->page_size = 0;
236
237         __tlb_reset_range(tlb);
238 }
239
240 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
241 {
242         if (!tlb->end)
243                 return;
244
245         tlb_flush(tlb);
246         mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
247 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
248         tlb_table_flush(tlb);
249 #endif
250         __tlb_reset_range(tlb);
251 }
252
253 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
254 {
255         struct mmu_gather_batch *batch;
256
257         for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
258                 free_pages_and_swap_cache(batch->pages, batch->nr);
259                 batch->nr = 0;
260         }
261         tlb->active = &tlb->local;
262 }
263
264 void tlb_flush_mmu(struct mmu_gather *tlb)
265 {
266         tlb_flush_mmu_tlbonly(tlb);
267         tlb_flush_mmu_free(tlb);
268 }
269
270 /* tlb_finish_mmu
271  *      Called at the end of the shootdown operation to free up any resources
272  *      that were required.
273  */
274 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
275                 unsigned long start, unsigned long end)
276 {
277         struct mmu_gather_batch *batch, *next;
278
279         tlb_flush_mmu(tlb);
280
281         /* keep the page table cache within bounds */
282         check_pgt_cache();
283
284         for (batch = tlb->local.next; batch; batch = next) {
285                 next = batch->next;
286                 free_pages((unsigned long)batch, 0);
287         }
288         tlb->local.next = NULL;
289 }
290
291 /* __tlb_remove_page
292  *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
293  *      handling the additional races in SMP caused by other CPUs caching valid
294  *      mappings in their TLBs. Returns the number of free page slots left.
295  *      When out of page slots we must call tlb_flush_mmu().
296  *returns true if the caller should flush.
297  */
298 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
299 {
300         struct mmu_gather_batch *batch;
301
302         VM_BUG_ON(!tlb->end);
303         VM_WARN_ON(tlb->page_size != page_size);
304
305         batch = tlb->active;
306         /*
307          * Add the page and check if we are full. If so
308          * force a flush.
309          */
310         batch->pages[batch->nr++] = page;
311         if (batch->nr == batch->max) {
312                 if (!tlb_next_batch(tlb))
313                         return true;
314                 batch = tlb->active;
315         }
316         VM_BUG_ON_PAGE(batch->nr > batch->max, page);
317
318         return false;
319 }
320
321 #endif /* HAVE_GENERIC_MMU_GATHER */
322
323 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
324
325 /*
326  * See the comment near struct mmu_table_batch.
327  */
328
329 static void tlb_remove_table_smp_sync(void *arg)
330 {
331         /* Simply deliver the interrupt */
332 }
333
334 static void tlb_remove_table_one(void *table)
335 {
336         /*
337          * This isn't an RCU grace period and hence the page-tables cannot be
338          * assumed to be actually RCU-freed.
339          *
340          * It is however sufficient for software page-table walkers that rely on
341          * IRQ disabling. See the comment near struct mmu_table_batch.
342          */
343         smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
344         __tlb_remove_table(table);
345 }
346
347 static void tlb_remove_table_rcu(struct rcu_head *head)
348 {
349         struct mmu_table_batch *batch;
350         int i;
351
352         batch = container_of(head, struct mmu_table_batch, rcu);
353
354         for (i = 0; i < batch->nr; i++)
355                 __tlb_remove_table(batch->tables[i]);
356
357         free_page((unsigned long)batch);
358 }
359
360 void tlb_table_flush(struct mmu_gather *tlb)
361 {
362         struct mmu_table_batch **batch = &tlb->batch;
363
364         if (*batch) {
365                 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
366                 *batch = NULL;
367         }
368 }
369
370 void tlb_remove_table(struct mmu_gather *tlb, void *table)
371 {
372         struct mmu_table_batch **batch = &tlb->batch;
373
374         /*
375          * When there's less then two users of this mm there cannot be a
376          * concurrent page-table walk.
377          */
378         if (atomic_read(&tlb->mm->mm_users) < 2) {
379                 __tlb_remove_table(table);
380                 return;
381         }
382
383         if (*batch == NULL) {
384                 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
385                 if (*batch == NULL) {
386                         tlb_remove_table_one(table);
387                         return;
388                 }
389                 (*batch)->nr = 0;
390         }
391         (*batch)->tables[(*batch)->nr++] = table;
392         if ((*batch)->nr == MAX_TABLE_BATCH)
393                 tlb_table_flush(tlb);
394 }
395
396 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
397
398 /* tlb_gather_mmu
399  *      Called to initialize an (on-stack) mmu_gather structure for page-table
400  *      tear-down from @mm. The @fullmm argument is used when @mm is without
401  *      users and we're going to destroy the full address space (exit/execve).
402  */
403 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
404                         unsigned long start, unsigned long end)
405 {
406         arch_tlb_gather_mmu(tlb, mm, start, end);
407 }
408
409 void tlb_finish_mmu(struct mmu_gather *tlb,
410                 unsigned long start, unsigned long end)
411 {
412         arch_tlb_finish_mmu(tlb, start, end);
413 }
414
415 /*
416  * Note: this doesn't free the actual pages themselves. That
417  * has been handled earlier when unmapping all the memory regions.
418  */
419 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
420                            unsigned long addr)
421 {
422         pgtable_t token = pmd_pgtable(*pmd);
423         pmd_clear(pmd);
424         pte_free_tlb(tlb, token, addr);
425         atomic_long_dec(&tlb->mm->nr_ptes);
426 }
427
428 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
429                                 unsigned long addr, unsigned long end,
430                                 unsigned long floor, unsigned long ceiling)
431 {
432         pmd_t *pmd;
433         unsigned long next;
434         unsigned long start;
435
436         start = addr;
437         pmd = pmd_offset(pud, addr);
438         do {
439                 next = pmd_addr_end(addr, end);
440                 if (pmd_none_or_clear_bad(pmd))
441                         continue;
442                 free_pte_range(tlb, pmd, addr);
443         } while (pmd++, addr = next, addr != end);
444
445         start &= PUD_MASK;
446         if (start < floor)
447                 return;
448         if (ceiling) {
449                 ceiling &= PUD_MASK;
450                 if (!ceiling)
451                         return;
452         }
453         if (end - 1 > ceiling - 1)
454                 return;
455
456         pmd = pmd_offset(pud, start);
457         pud_clear(pud);
458         pmd_free_tlb(tlb, pmd, start);
459         mm_dec_nr_pmds(tlb->mm);
460 }
461
462 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
463                                 unsigned long addr, unsigned long end,
464                                 unsigned long floor, unsigned long ceiling)
465 {
466         pud_t *pud;
467         unsigned long next;
468         unsigned long start;
469
470         start = addr;
471         pud = pud_offset(p4d, addr);
472         do {
473                 next = pud_addr_end(addr, end);
474                 if (pud_none_or_clear_bad(pud))
475                         continue;
476                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
477         } while (pud++, addr = next, addr != end);
478
479         start &= P4D_MASK;
480         if (start < floor)
481                 return;
482         if (ceiling) {
483                 ceiling &= P4D_MASK;
484                 if (!ceiling)
485                         return;
486         }
487         if (end - 1 > ceiling - 1)
488                 return;
489
490         pud = pud_offset(p4d, start);
491         p4d_clear(p4d);
492         pud_free_tlb(tlb, pud, start);
493 }
494
495 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
496                                 unsigned long addr, unsigned long end,
497                                 unsigned long floor, unsigned long ceiling)
498 {
499         p4d_t *p4d;
500         unsigned long next;
501         unsigned long start;
502
503         start = addr;
504         p4d = p4d_offset(pgd, addr);
505         do {
506                 next = p4d_addr_end(addr, end);
507                 if (p4d_none_or_clear_bad(p4d))
508                         continue;
509                 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
510         } while (p4d++, addr = next, addr != end);
511
512         start &= PGDIR_MASK;
513         if (start < floor)
514                 return;
515         if (ceiling) {
516                 ceiling &= PGDIR_MASK;
517                 if (!ceiling)
518                         return;
519         }
520         if (end - 1 > ceiling - 1)
521                 return;
522
523         p4d = p4d_offset(pgd, start);
524         pgd_clear(pgd);
525         p4d_free_tlb(tlb, p4d, start);
526 }
527
528 /*
529  * This function frees user-level page tables of a process.
530  */
531 void free_pgd_range(struct mmu_gather *tlb,
532                         unsigned long addr, unsigned long end,
533                         unsigned long floor, unsigned long ceiling)
534 {
535         pgd_t *pgd;
536         unsigned long next;
537
538         /*
539          * The next few lines have given us lots of grief...
540          *
541          * Why are we testing PMD* at this top level?  Because often
542          * there will be no work to do at all, and we'd prefer not to
543          * go all the way down to the bottom just to discover that.
544          *
545          * Why all these "- 1"s?  Because 0 represents both the bottom
546          * of the address space and the top of it (using -1 for the
547          * top wouldn't help much: the masks would do the wrong thing).
548          * The rule is that addr 0 and floor 0 refer to the bottom of
549          * the address space, but end 0 and ceiling 0 refer to the top
550          * Comparisons need to use "end - 1" and "ceiling - 1" (though
551          * that end 0 case should be mythical).
552          *
553          * Wherever addr is brought up or ceiling brought down, we must
554          * be careful to reject "the opposite 0" before it confuses the
555          * subsequent tests.  But what about where end is brought down
556          * by PMD_SIZE below? no, end can't go down to 0 there.
557          *
558          * Whereas we round start (addr) and ceiling down, by different
559          * masks at different levels, in order to test whether a table
560          * now has no other vmas using it, so can be freed, we don't
561          * bother to round floor or end up - the tests don't need that.
562          */
563
564         addr &= PMD_MASK;
565         if (addr < floor) {
566                 addr += PMD_SIZE;
567                 if (!addr)
568                         return;
569         }
570         if (ceiling) {
571                 ceiling &= PMD_MASK;
572                 if (!ceiling)
573                         return;
574         }
575         if (end - 1 > ceiling - 1)
576                 end -= PMD_SIZE;
577         if (addr > end - 1)
578                 return;
579         /*
580          * We add page table cache pages with PAGE_SIZE,
581          * (see pte_free_tlb()), flush the tlb if we need
582          */
583         tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
584         pgd = pgd_offset(tlb->mm, addr);
585         do {
586                 next = pgd_addr_end(addr, end);
587                 if (pgd_none_or_clear_bad(pgd))
588                         continue;
589                 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
590         } while (pgd++, addr = next, addr != end);
591 }
592
593 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
594                 unsigned long floor, unsigned long ceiling)
595 {
596         while (vma) {
597                 struct vm_area_struct *next = vma->vm_next;
598                 unsigned long addr = vma->vm_start;
599
600                 /*
601                  * Hide vma from rmap and truncate_pagecache before freeing
602                  * pgtables
603                  */
604                 unlink_anon_vmas(vma);
605                 unlink_file_vma(vma);
606
607                 if (is_vm_hugetlb_page(vma)) {
608                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
609                                 floor, next ? next->vm_start : ceiling);
610                 } else {
611                         /*
612                          * Optimization: gather nearby vmas into one call down
613                          */
614                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
615                                && !is_vm_hugetlb_page(next)) {
616                                 vma = next;
617                                 next = vma->vm_next;
618                                 unlink_anon_vmas(vma);
619                                 unlink_file_vma(vma);
620                         }
621                         free_pgd_range(tlb, addr, vma->vm_end,
622                                 floor, next ? next->vm_start : ceiling);
623                 }
624                 vma = next;
625         }
626 }
627
628 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
629 {
630         spinlock_t *ptl;
631         pgtable_t new = pte_alloc_one(mm, address);
632         if (!new)
633                 return -ENOMEM;
634
635         /*
636          * Ensure all pte setup (eg. pte page lock and page clearing) are
637          * visible before the pte is made visible to other CPUs by being
638          * put into page tables.
639          *
640          * The other side of the story is the pointer chasing in the page
641          * table walking code (when walking the page table without locking;
642          * ie. most of the time). Fortunately, these data accesses consist
643          * of a chain of data-dependent loads, meaning most CPUs (alpha
644          * being the notable exception) will already guarantee loads are
645          * seen in-order. See the alpha page table accessors for the
646          * smp_read_barrier_depends() barriers in page table walking code.
647          */
648         smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
649
650         ptl = pmd_lock(mm, pmd);
651         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
652                 atomic_long_inc(&mm->nr_ptes);
653                 pmd_populate(mm, pmd, new);
654                 new = NULL;
655         }
656         spin_unlock(ptl);
657         if (new)
658                 pte_free(mm, new);
659         return 0;
660 }
661
662 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
663 {
664         pte_t *new = pte_alloc_one_kernel(&init_mm, address);
665         if (!new)
666                 return -ENOMEM;
667
668         smp_wmb(); /* See comment in __pte_alloc */
669
670         spin_lock(&init_mm.page_table_lock);
671         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
672                 pmd_populate_kernel(&init_mm, pmd, new);
673                 new = NULL;
674         }
675         spin_unlock(&init_mm.page_table_lock);
676         if (new)
677                 pte_free_kernel(&init_mm, new);
678         return 0;
679 }
680
681 static inline void init_rss_vec(int *rss)
682 {
683         memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
684 }
685
686 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
687 {
688         int i;
689
690         if (current->mm == mm)
691                 sync_mm_rss(mm);
692         for (i = 0; i < NR_MM_COUNTERS; i++)
693                 if (rss[i])
694                         add_mm_counter(mm, i, rss[i]);
695 }
696
697 /*
698  * This function is called to print an error when a bad pte
699  * is found. For example, we might have a PFN-mapped pte in
700  * a region that doesn't allow it.
701  *
702  * The calling function must still handle the error.
703  */
704 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
705                           pte_t pte, struct page *page)
706 {
707         pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
708         p4d_t *p4d = p4d_offset(pgd, addr);
709         pud_t *pud = pud_offset(p4d, addr);
710         pmd_t *pmd = pmd_offset(pud, addr);
711         struct address_space *mapping;
712         pgoff_t index;
713         static unsigned long resume;
714         static unsigned long nr_shown;
715         static unsigned long nr_unshown;
716
717         /*
718          * Allow a burst of 60 reports, then keep quiet for that minute;
719          * or allow a steady drip of one report per second.
720          */
721         if (nr_shown == 60) {
722                 if (time_before(jiffies, resume)) {
723                         nr_unshown++;
724                         return;
725                 }
726                 if (nr_unshown) {
727                         pr_alert("BUG: Bad page map: %lu messages suppressed\n",
728                                  nr_unshown);
729                         nr_unshown = 0;
730                 }
731                 nr_shown = 0;
732         }
733         if (nr_shown++ == 0)
734                 resume = jiffies + 60 * HZ;
735
736         mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
737         index = linear_page_index(vma, addr);
738
739         pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
740                  current->comm,
741                  (long long)pte_val(pte), (long long)pmd_val(*pmd));
742         if (page)
743                 dump_page(page, "bad pte");
744         pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
745                  (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
746         /*
747          * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
748          */
749         pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
750                  vma->vm_file,
751                  vma->vm_ops ? vma->vm_ops->fault : NULL,
752                  vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
753                  mapping ? mapping->a_ops->readpage : NULL);
754         dump_stack();
755         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
756 }
757
758 /*
759  * vm_normal_page -- This function gets the "struct page" associated with a pte.
760  *
761  * "Special" mappings do not wish to be associated with a "struct page" (either
762  * it doesn't exist, or it exists but they don't want to touch it). In this
763  * case, NULL is returned here. "Normal" mappings do have a struct page.
764  *
765  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
766  * pte bit, in which case this function is trivial. Secondly, an architecture
767  * may not have a spare pte bit, which requires a more complicated scheme,
768  * described below.
769  *
770  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
771  * special mapping (even if there are underlying and valid "struct pages").
772  * COWed pages of a VM_PFNMAP are always normal.
773  *
774  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
775  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
776  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
777  * mapping will always honor the rule
778  *
779  *      pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
780  *
781  * And for normal mappings this is false.
782  *
783  * This restricts such mappings to be a linear translation from virtual address
784  * to pfn. To get around this restriction, we allow arbitrary mappings so long
785  * as the vma is not a COW mapping; in that case, we know that all ptes are
786  * special (because none can have been COWed).
787  *
788  *
789  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
790  *
791  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
792  * page" backing, however the difference is that _all_ pages with a struct
793  * page (that is, those where pfn_valid is true) are refcounted and considered
794  * normal pages by the VM. The disadvantage is that pages are refcounted
795  * (which can be slower and simply not an option for some PFNMAP users). The
796  * advantage is that we don't have to follow the strict linearity rule of
797  * PFNMAP mappings in order to support COWable mappings.
798  *
799  */
800 #ifdef __HAVE_ARCH_PTE_SPECIAL
801 # define HAVE_PTE_SPECIAL 1
802 #else
803 # define HAVE_PTE_SPECIAL 0
804 #endif
805 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
806                                 pte_t pte)
807 {
808         unsigned long pfn = pte_pfn(pte);
809
810         if (HAVE_PTE_SPECIAL) {
811                 if (likely(!pte_special(pte)))
812                         goto check_pfn;
813                 if (vma->vm_ops && vma->vm_ops->find_special_page)
814                         return vma->vm_ops->find_special_page(vma, addr);
815                 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
816                         return NULL;
817                 if (!is_zero_pfn(pfn))
818                         print_bad_pte(vma, addr, pte, NULL);
819                 return NULL;
820         }
821
822         /* !HAVE_PTE_SPECIAL case follows: */
823
824         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
825                 if (vma->vm_flags & VM_MIXEDMAP) {
826                         if (!pfn_valid(pfn))
827                                 return NULL;
828                         goto out;
829                 } else {
830                         unsigned long off;
831                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
832                         if (pfn == vma->vm_pgoff + off)
833                                 return NULL;
834                         if (!is_cow_mapping(vma->vm_flags))
835                                 return NULL;
836                 }
837         }
838
839         if (is_zero_pfn(pfn))
840                 return NULL;
841 check_pfn:
842         if (unlikely(pfn > highest_memmap_pfn)) {
843                 print_bad_pte(vma, addr, pte, NULL);
844                 return NULL;
845         }
846
847         /*
848          * NOTE! We still have PageReserved() pages in the page tables.
849          * eg. VDSO mappings can cause them to exist.
850          */
851 out:
852         return pfn_to_page(pfn);
853 }
854
855 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
856 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
857                                 pmd_t pmd)
858 {
859         unsigned long pfn = pmd_pfn(pmd);
860
861         /*
862          * There is no pmd_special() but there may be special pmds, e.g.
863          * in a direct-access (dax) mapping, so let's just replicate the
864          * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
865          */
866         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
867                 if (vma->vm_flags & VM_MIXEDMAP) {
868                         if (!pfn_valid(pfn))
869                                 return NULL;
870                         goto out;
871                 } else {
872                         unsigned long off;
873                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
874                         if (pfn == vma->vm_pgoff + off)
875                                 return NULL;
876                         if (!is_cow_mapping(vma->vm_flags))
877                                 return NULL;
878                 }
879         }
880
881         if (is_zero_pfn(pfn))
882                 return NULL;
883         if (unlikely(pfn > highest_memmap_pfn))
884                 return NULL;
885
886         /*
887          * NOTE! We still have PageReserved() pages in the page tables.
888          * eg. VDSO mappings can cause them to exist.
889          */
890 out:
891         return pfn_to_page(pfn);
892 }
893 #endif
894
895 /*
896  * copy one vm_area from one task to the other. Assumes the page tables
897  * already present in the new task to be cleared in the whole range
898  * covered by this vma.
899  */
900
901 static inline unsigned long
902 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
903                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
904                 unsigned long addr, int *rss)
905 {
906         unsigned long vm_flags = vma->vm_flags;
907         pte_t pte = *src_pte;
908         struct page *page;
909
910         /* pte contains position in swap or file, so copy. */
911         if (unlikely(!pte_present(pte))) {
912                 swp_entry_t entry = pte_to_swp_entry(pte);
913
914                 if (likely(!non_swap_entry(entry))) {
915                         if (swap_duplicate(entry) < 0)
916                                 return entry.val;
917
918                         /* make sure dst_mm is on swapoff's mmlist. */
919                         if (unlikely(list_empty(&dst_mm->mmlist))) {
920                                 spin_lock(&mmlist_lock);
921                                 if (list_empty(&dst_mm->mmlist))
922                                         list_add(&dst_mm->mmlist,
923                                                         &src_mm->mmlist);
924                                 spin_unlock(&mmlist_lock);
925                         }
926                         rss[MM_SWAPENTS]++;
927                 } else if (is_migration_entry(entry)) {
928                         page = migration_entry_to_page(entry);
929
930                         rss[mm_counter(page)]++;
931
932                         if (is_write_migration_entry(entry) &&
933                                         is_cow_mapping(vm_flags)) {
934                                 /*
935                                  * COW mappings require pages in both
936                                  * parent and child to be set to read.
937                                  */
938                                 make_migration_entry_read(&entry);
939                                 pte = swp_entry_to_pte(entry);
940                                 if (pte_swp_soft_dirty(*src_pte))
941                                         pte = pte_swp_mksoft_dirty(pte);
942                                 set_pte_at(src_mm, addr, src_pte, pte);
943                         }
944                 }
945                 goto out_set_pte;
946         }
947
948         /*
949          * If it's a COW mapping, write protect it both
950          * in the parent and the child
951          */
952         if (is_cow_mapping(vm_flags)) {
953                 ptep_set_wrprotect(src_mm, addr, src_pte);
954                 pte = pte_wrprotect(pte);
955         }
956
957         /*
958          * If it's a shared mapping, mark it clean in
959          * the child
960          */
961         if (vm_flags & VM_SHARED)
962                 pte = pte_mkclean(pte);
963         pte = pte_mkold(pte);
964
965         page = vm_normal_page(vma, addr, pte);
966         if (page) {
967                 get_page(page);
968                 page_dup_rmap(page, false);
969                 rss[mm_counter(page)]++;
970         }
971
972 out_set_pte:
973         set_pte_at(dst_mm, addr, dst_pte, pte);
974         return 0;
975 }
976
977 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
978                    pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
979                    unsigned long addr, unsigned long end)
980 {
981         pte_t *orig_src_pte, *orig_dst_pte;
982         pte_t *src_pte, *dst_pte;
983         spinlock_t *src_ptl, *dst_ptl;
984         int progress = 0;
985         int rss[NR_MM_COUNTERS];
986         swp_entry_t entry = (swp_entry_t){0};
987
988 again:
989         init_rss_vec(rss);
990
991         dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
992         if (!dst_pte)
993                 return -ENOMEM;
994         src_pte = pte_offset_map(src_pmd, addr);
995         src_ptl = pte_lockptr(src_mm, src_pmd);
996         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
997         orig_src_pte = src_pte;
998         orig_dst_pte = dst_pte;
999         arch_enter_lazy_mmu_mode();
1000
1001         do {
1002                 /*
1003                  * We are holding two locks at this point - either of them
1004                  * could generate latencies in another task on another CPU.
1005                  */
1006                 if (progress >= 32) {
1007                         progress = 0;
1008                         if (need_resched() ||
1009                             spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1010                                 break;
1011                 }
1012                 if (pte_none(*src_pte)) {
1013                         progress++;
1014                         continue;
1015                 }
1016                 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
1017                                                         vma, addr, rss);
1018                 if (entry.val)
1019                         break;
1020                 progress += 8;
1021         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1022
1023         arch_leave_lazy_mmu_mode();
1024         spin_unlock(src_ptl);
1025         pte_unmap(orig_src_pte);
1026         add_mm_rss_vec(dst_mm, rss);
1027         pte_unmap_unlock(orig_dst_pte, dst_ptl);
1028         cond_resched();
1029
1030         if (entry.val) {
1031                 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
1032                         return -ENOMEM;
1033                 progress = 0;
1034         }
1035         if (addr != end)
1036                 goto again;
1037         return 0;
1038 }
1039
1040 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1041                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
1042                 unsigned long addr, unsigned long end)
1043 {
1044         pmd_t *src_pmd, *dst_pmd;
1045         unsigned long next;
1046
1047         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1048         if (!dst_pmd)
1049                 return -ENOMEM;
1050         src_pmd = pmd_offset(src_pud, addr);
1051         do {
1052                 next = pmd_addr_end(addr, end);
1053                 if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
1054                         int err;
1055                         VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
1056                         err = copy_huge_pmd(dst_mm, src_mm,
1057                                             dst_pmd, src_pmd, addr, vma);
1058                         if (err == -ENOMEM)
1059                                 return -ENOMEM;
1060                         if (!err)
1061                                 continue;
1062                         /* fall through */
1063                 }
1064                 if (pmd_none_or_clear_bad(src_pmd))
1065                         continue;
1066                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1067                                                 vma, addr, next))
1068                         return -ENOMEM;
1069         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1070         return 0;
1071 }
1072
1073 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1074                 p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
1075                 unsigned long addr, unsigned long end)
1076 {
1077         pud_t *src_pud, *dst_pud;
1078         unsigned long next;
1079
1080         dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1081         if (!dst_pud)
1082                 return -ENOMEM;
1083         src_pud = pud_offset(src_p4d, addr);
1084         do {
1085                 next = pud_addr_end(addr, end);
1086                 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1087                         int err;
1088
1089                         VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
1090                         err = copy_huge_pud(dst_mm, src_mm,
1091                                             dst_pud, src_pud, addr, vma);
1092                         if (err == -ENOMEM)
1093                                 return -ENOMEM;
1094                         if (!err)
1095                                 continue;
1096                         /* fall through */
1097                 }
1098                 if (pud_none_or_clear_bad(src_pud))
1099                         continue;
1100                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1101                                                 vma, addr, next))
1102                         return -ENOMEM;
1103         } while (dst_pud++, src_pud++, addr = next, addr != end);
1104         return 0;
1105 }
1106
1107 static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1108                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1109                 unsigned long addr, unsigned long end)
1110 {
1111         p4d_t *src_p4d, *dst_p4d;
1112         unsigned long next;
1113
1114         dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1115         if (!dst_p4d)
1116                 return -ENOMEM;
1117         src_p4d = p4d_offset(src_pgd, addr);
1118         do {
1119                 next = p4d_addr_end(addr, end);
1120                 if (p4d_none_or_clear_bad(src_p4d))
1121                         continue;
1122                 if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
1123                                                 vma, addr, next))
1124                         return -ENOMEM;
1125         } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1126         return 0;
1127 }
1128
1129 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1130                 struct vm_area_struct *vma)
1131 {
1132         pgd_t *src_pgd, *dst_pgd;
1133         unsigned long next;
1134         unsigned long addr = vma->vm_start;
1135         unsigned long end = vma->vm_end;
1136         unsigned long mmun_start;       /* For mmu_notifiers */
1137         unsigned long mmun_end;         /* For mmu_notifiers */
1138         bool is_cow;
1139         int ret;
1140
1141         /*
1142          * Don't copy ptes where a page fault will fill them correctly.
1143          * Fork becomes much lighter when there are big shared or private
1144          * readonly mappings. The tradeoff is that copy_page_range is more
1145          * efficient than faulting.
1146          */
1147         if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1148                         !vma->anon_vma)
1149                 return 0;
1150
1151         if (is_vm_hugetlb_page(vma))
1152                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1153
1154         if (unlikely(vma->vm_flags & VM_PFNMAP)) {
1155                 /*
1156                  * We do not free on error cases below as remove_vma
1157                  * gets called on error from higher level routine
1158                  */
1159                 ret = track_pfn_copy(vma);
1160                 if (ret)
1161                         return ret;
1162         }
1163
1164         /*
1165          * We need to invalidate the secondary MMU mappings only when
1166          * there could be a permission downgrade on the ptes of the
1167          * parent mm. And a permission downgrade will only happen if
1168          * is_cow_mapping() returns true.
1169          */
1170         is_cow = is_cow_mapping(vma->vm_flags);
1171         mmun_start = addr;
1172         mmun_end   = end;
1173         if (is_cow)
1174                 mmu_notifier_invalidate_range_start(src_mm, mmun_start,
1175                                                     mmun_end);
1176
1177         ret = 0;
1178         dst_pgd = pgd_offset(dst_mm, addr);
1179         src_pgd = pgd_offset(src_mm, addr);
1180         do {
1181                 next = pgd_addr_end(addr, end);
1182                 if (pgd_none_or_clear_bad(src_pgd))
1183                         continue;
1184                 if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
1185                                             vma, addr, next))) {
1186                         ret = -ENOMEM;
1187                         break;
1188                 }
1189         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1190
1191         if (is_cow)
1192                 mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
1193         return ret;
1194 }
1195
1196 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1197                                 struct vm_area_struct *vma, pmd_t *pmd,
1198                                 unsigned long addr, unsigned long end,
1199                                 struct zap_details *details)
1200 {
1201         struct mm_struct *mm = tlb->mm;
1202         int force_flush = 0;
1203         int rss[NR_MM_COUNTERS];
1204         spinlock_t *ptl;
1205         pte_t *start_pte;
1206         pte_t *pte;
1207         swp_entry_t entry;
1208
1209         tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
1210 again:
1211         init_rss_vec(rss);
1212         start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1213         pte = start_pte;
1214         flush_tlb_batched_pending(mm);
1215         arch_enter_lazy_mmu_mode();
1216         do {
1217                 pte_t ptent = *pte;
1218                 if (pte_none(ptent))
1219                         continue;
1220
1221                 if (pte_present(ptent)) {
1222                         struct page *page;
1223
1224                         page = vm_normal_page(vma, addr, ptent);
1225                         if (unlikely(details) && page) {
1226                                 /*
1227                                  * unmap_shared_mapping_pages() wants to
1228                                  * invalidate cache without truncating:
1229                                  * unmap shared but keep private pages.
1230                                  */
1231                                 if (details->check_mapping &&
1232                                     details->check_mapping != page_rmapping(page))
1233                                         continue;
1234                         }
1235                         ptent = ptep_get_and_clear_full(mm, addr, pte,
1236                                                         tlb->fullmm);
1237                         tlb_remove_tlb_entry(tlb, pte, addr);
1238                         if (unlikely(!page))
1239                                 continue;
1240
1241                         if (!PageAnon(page)) {
1242                                 if (pte_dirty(ptent)) {
1243                                         force_flush = 1;
1244                                         set_page_dirty(page);
1245                                 }
1246                                 if (pte_young(ptent) &&
1247                                     likely(!(vma->vm_flags & VM_SEQ_READ)))
1248                                         mark_page_accessed(page);
1249                         }
1250                         rss[mm_counter(page)]--;
1251                         page_remove_rmap(page, false);
1252                         if (unlikely(page_mapcount(page) < 0))
1253                                 print_bad_pte(vma, addr, ptent, page);
1254                         if (unlikely(__tlb_remove_page(tlb, page))) {
1255                                 force_flush = 1;
1256                                 addr += PAGE_SIZE;
1257                                 break;
1258                         }
1259                         continue;
1260                 }
1261                 /* If details->check_mapping, we leave swap entries. */
1262                 if (unlikely(details))
1263                         continue;
1264
1265                 entry = pte_to_swp_entry(ptent);
1266                 if (!non_swap_entry(entry))
1267                         rss[MM_SWAPENTS]--;
1268                 else if (is_migration_entry(entry)) {
1269                         struct page *page;
1270
1271                         page = migration_entry_to_page(entry);
1272                         rss[mm_counter(page)]--;
1273                 }
1274                 if (unlikely(!free_swap_and_cache(entry)))
1275                         print_bad_pte(vma, addr, ptent, NULL);
1276                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1277         } while (pte++, addr += PAGE_SIZE, addr != end);
1278
1279         add_mm_rss_vec(mm, rss);
1280         arch_leave_lazy_mmu_mode();
1281
1282         /* Do the actual TLB flush before dropping ptl */
1283         if (force_flush)
1284                 tlb_flush_mmu_tlbonly(tlb);
1285         pte_unmap_unlock(start_pte, ptl);
1286
1287         /*
1288          * If we forced a TLB flush (either due to running out of
1289          * batch buffers or because we needed to flush dirty TLB
1290          * entries before releasing the ptl), free the batched
1291          * memory too. Restart if we didn't do everything.
1292          */
1293         if (force_flush) {
1294                 force_flush = 0;
1295                 tlb_flush_mmu_free(tlb);
1296                 if (addr != end)
1297                         goto again;
1298         }
1299
1300         return addr;
1301 }
1302
1303 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1304                                 struct vm_area_struct *vma, pud_t *pud,
1305                                 unsigned long addr, unsigned long end,
1306                                 struct zap_details *details)
1307 {
1308         pmd_t *pmd;
1309         unsigned long next;
1310
1311         pmd = pmd_offset(pud, addr);
1312         do {
1313                 next = pmd_addr_end(addr, end);
1314                 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1315                         if (next - addr != HPAGE_PMD_SIZE) {
1316                                 VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
1317                                     !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1318                                 __split_huge_pmd(vma, pmd, addr, false, NULL);
1319                         } else if (zap_huge_pmd(tlb, vma, pmd, addr))
1320                                 goto next;
1321                         /* fall through */
1322                 }
1323                 /*
1324                  * Here there can be other concurrent MADV_DONTNEED or
1325                  * trans huge page faults running, and if the pmd is
1326                  * none or trans huge it can change under us. This is
1327                  * because MADV_DONTNEED holds the mmap_sem in read
1328                  * mode.
1329                  */
1330                 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1331                         goto next;
1332                 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1333 next:
1334                 cond_resched();
1335         } while (pmd++, addr = next, addr != end);
1336
1337         return addr;
1338 }
1339
1340 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1341                                 struct vm_area_struct *vma, p4d_t *p4d,
1342                                 unsigned long addr, unsigned long end,
1343                                 struct zap_details *details)
1344 {
1345         pud_t *pud;
1346         unsigned long next;
1347
1348         pud = pud_offset(p4d, addr);
1349         do {
1350                 next = pud_addr_end(addr, end);
1351                 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1352                         if (next - addr != HPAGE_PUD_SIZE) {
1353                                 VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1354                                 split_huge_pud(vma, pud, addr);
1355                         } else if (zap_huge_pud(tlb, vma, pud, addr))
1356                                 goto next;
1357                         /* fall through */
1358                 }
1359                 if (pud_none_or_clear_bad(pud))
1360                         continue;
1361                 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1362 next:
1363                 cond_resched();
1364         } while (pud++, addr = next, addr != end);
1365
1366         return addr;
1367 }
1368
1369 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1370                                 struct vm_area_struct *vma, pgd_t *pgd,
1371                                 unsigned long addr, unsigned long end,
1372                                 struct zap_details *details)
1373 {
1374         p4d_t *p4d;
1375         unsigned long next;
1376
1377         p4d = p4d_offset(pgd, addr);
1378         do {
1379                 next = p4d_addr_end(addr, end);
1380                 if (p4d_none_or_clear_bad(p4d))
1381                         continue;
1382                 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1383         } while (p4d++, addr = next, addr != end);
1384
1385         return addr;
1386 }
1387
1388 void unmap_page_range(struct mmu_gather *tlb,
1389                              struct vm_area_struct *vma,
1390                              unsigned long addr, unsigned long end,
1391                              struct zap_details *details)
1392 {
1393         pgd_t *pgd;
1394         unsigned long next;
1395
1396         BUG_ON(addr >= end);
1397         tlb_start_vma(tlb, vma);
1398         pgd = pgd_offset(vma->vm_mm, addr);
1399         do {
1400                 next = pgd_addr_end(addr, end);
1401                 if (pgd_none_or_clear_bad(pgd))
1402                         continue;
1403                 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1404         } while (pgd++, addr = next, addr != end);
1405         tlb_end_vma(tlb, vma);
1406 }
1407
1408
1409 static void unmap_single_vma(struct mmu_gather *tlb,
1410                 struct vm_area_struct *vma, unsigned long start_addr,
1411                 unsigned long end_addr,
1412                 struct zap_details *details)
1413 {
1414         unsigned long start = max(vma->vm_start, start_addr);
1415         unsigned long end;
1416
1417         if (start >= vma->vm_end)
1418                 return;
1419         end = min(vma->vm_end, end_addr);
1420         if (end <= vma->vm_start)
1421                 return;
1422
1423         if (vma->vm_file)
1424                 uprobe_munmap(vma, start, end);
1425
1426         if (unlikely(vma->vm_flags & VM_PFNMAP))
1427                 untrack_pfn(vma, 0, 0);
1428
1429         if (start != end) {
1430                 if (unlikely(is_vm_hugetlb_page(vma))) {
1431                         /*
1432                          * It is undesirable to test vma->vm_file as it
1433                          * should be non-null for valid hugetlb area.
1434                          * However, vm_file will be NULL in the error
1435                          * cleanup path of mmap_region. When
1436                          * hugetlbfs ->mmap method fails,
1437                          * mmap_region() nullifies vma->vm_file
1438                          * before calling this function to clean up.
1439                          * Since no pte has actually been setup, it is
1440                          * safe to do nothing in this case.
1441                          */
1442                         if (vma->vm_file) {
1443                                 i_mmap_lock_write(vma->vm_file->f_mapping);
1444                                 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1445                                 i_mmap_unlock_write(vma->vm_file->f_mapping);
1446                         }
1447                 } else
1448                         unmap_page_range(tlb, vma, start, end, details);
1449         }
1450 }
1451
1452 /**
1453  * unmap_vmas - unmap a range of memory covered by a list of vma's
1454  * @tlb: address of the caller's struct mmu_gather
1455  * @vma: the starting vma
1456  * @start_addr: virtual address at which to start unmapping
1457  * @end_addr: virtual address at which to end unmapping
1458  *
1459  * Unmap all pages in the vma list.
1460  *
1461  * Only addresses between `start' and `end' will be unmapped.
1462  *
1463  * The VMA list must be sorted in ascending virtual address order.
1464  *
1465  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1466  * range after unmap_vmas() returns.  So the only responsibility here is to
1467  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1468  * drops the lock and schedules.
1469  */
1470 void unmap_vmas(struct mmu_gather *tlb,
1471                 struct vm_area_struct *vma, unsigned long start_addr,
1472                 unsigned long end_addr)
1473 {
1474         struct mm_struct *mm = vma->vm_mm;
1475
1476         mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1477         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1478                 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1479         mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1480 }
1481
1482 /**
1483  * zap_page_range - remove user pages in a given range
1484  * @vma: vm_area_struct holding the applicable pages
1485  * @start: starting address of pages to zap
1486  * @size: number of bytes to zap
1487  *
1488  * Caller must protect the VMA list
1489  */
1490 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1491                 unsigned long size)
1492 {
1493         struct mm_struct *mm = vma->vm_mm;
1494         struct mmu_gather tlb;
1495         unsigned long end = start + size;
1496
1497         lru_add_drain();
1498         tlb_gather_mmu(&tlb, mm, start, end);
1499         update_hiwater_rss(mm);
1500         mmu_notifier_invalidate_range_start(mm, start, end);
1501         for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
1502                 unmap_single_vma(&tlb, vma, start, end, NULL);
1503         mmu_notifier_invalidate_range_end(mm, start, end);
1504         tlb_finish_mmu(&tlb, start, end);
1505 }
1506
1507 /**
1508  * zap_page_range_single - remove user pages in a given range
1509  * @vma: vm_area_struct holding the applicable pages
1510  * @address: starting address of pages to zap
1511  * @size: number of bytes to zap
1512  * @details: details of shared cache invalidation
1513  *
1514  * The range must fit into one VMA.
1515  */
1516 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1517                 unsigned long size, struct zap_details *details)
1518 {
1519         struct mm_struct *mm = vma->vm_mm;
1520         struct mmu_gather tlb;
1521         unsigned long end = address + size;
1522
1523         lru_add_drain();
1524         tlb_gather_mmu(&tlb, mm, address, end);
1525         update_hiwater_rss(mm);
1526         mmu_notifier_invalidate_range_start(mm, address, end);
1527         unmap_single_vma(&tlb, vma, address, end, details);
1528         mmu_notifier_invalidate_range_end(mm, address, end);
1529         tlb_finish_mmu(&tlb, address, end);
1530 }
1531
1532 /**
1533  * zap_vma_ptes - remove ptes mapping the vma
1534  * @vma: vm_area_struct holding ptes to be zapped
1535  * @address: starting address of pages to zap
1536  * @size: number of bytes to zap
1537  *
1538  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1539  *
1540  * The entire address range must be fully contained within the vma.
1541  *
1542  * Returns 0 if successful.
1543  */
1544 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1545                 unsigned long size)
1546 {
1547         if (address < vma->vm_start || address + size > vma->vm_end ||
1548                         !(vma->vm_flags & VM_PFNMAP))
1549                 return -1;
1550         zap_page_range_single(vma, address, size, NULL);
1551         return 0;
1552 }
1553 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1554
1555 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1556                         spinlock_t **ptl)
1557 {
1558         pgd_t *pgd;
1559         p4d_t *p4d;
1560         pud_t *pud;
1561         pmd_t *pmd;
1562
1563         pgd = pgd_offset(mm, addr);
1564         p4d = p4d_alloc(mm, pgd, addr);
1565         if (!p4d)
1566                 return NULL;
1567         pud = pud_alloc(mm, p4d, addr);
1568         if (!pud)
1569                 return NULL;
1570         pmd = pmd_alloc(mm, pud, addr);
1571         if (!pmd)
1572                 return NULL;
1573
1574         VM_BUG_ON(pmd_trans_huge(*pmd));
1575         return pte_alloc_map_lock(mm, pmd, addr, ptl);
1576 }
1577
1578 /*
1579  * This is the old fallback for page remapping.
1580  *
1581  * For historical reasons, it only allows reserved pages. Only
1582  * old drivers should use this, and they needed to mark their
1583  * pages reserved for the old functions anyway.
1584  */
1585 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1586                         struct page *page, pgprot_t prot)
1587 {
1588         struct mm_struct *mm = vma->vm_mm;
1589         int retval;
1590         pte_t *pte;
1591         spinlock_t *ptl;
1592
1593         retval = -EINVAL;
1594         if (PageAnon(page))
1595                 goto out;
1596         retval = -ENOMEM;
1597         flush_dcache_page(page);
1598         pte = get_locked_pte(mm, addr, &ptl);
1599         if (!pte)
1600                 goto out;
1601         retval = -EBUSY;
1602         if (!pte_none(*pte))
1603                 goto out_unlock;
1604
1605         /* Ok, finally just insert the thing.. */
1606         get_page(page);
1607         inc_mm_counter_fast(mm, mm_counter_file(page));
1608         page_add_file_rmap(page, false);
1609         set_pte_at(mm, addr, pte, mk_pte(page, prot));
1610
1611         retval = 0;
1612         pte_unmap_unlock(pte, ptl);
1613         return retval;
1614 out_unlock:
1615         pte_unmap_unlock(pte, ptl);
1616 out:
1617         return retval;
1618 }
1619
1620 /**
1621  * vm_insert_page - insert single page into user vma
1622  * @vma: user vma to map to
1623  * @addr: target user address of this page
1624  * @page: source kernel page
1625  *
1626  * This allows drivers to insert individual pages they've allocated
1627  * into a user vma.
1628  *
1629  * The page has to be a nice clean _individual_ kernel allocation.
1630  * If you allocate a compound page, you need to have marked it as
1631  * such (__GFP_COMP), or manually just split the page up yourself
1632  * (see split_page()).
1633  *
1634  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1635  * took an arbitrary page protection parameter. This doesn't allow
1636  * that. Your vma protection will have to be set up correctly, which
1637  * means that if you want a shared writable mapping, you'd better
1638  * ask for a shared writable mapping!
1639  *
1640  * The page does not need to be reserved.
1641  *
1642  * Usually this function is called from f_op->mmap() handler
1643  * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
1644  * Caller must set VM_MIXEDMAP on vma if it wants to call this
1645  * function from other places, for example from page-fault handler.
1646  */
1647 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1648                         struct page *page)
1649 {
1650         if (addr < vma->vm_start || addr >= vma->vm_end)
1651                 return -EFAULT;
1652         if (!page_count(page))
1653                 return -EINVAL;
1654         if (!(vma->vm_flags & VM_MIXEDMAP)) {
1655                 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
1656                 BUG_ON(vma->vm_flags & VM_PFNMAP);
1657                 vma->vm_flags |= VM_MIXEDMAP;
1658         }
1659         return insert_page(vma, addr, page, vma->vm_page_prot);
1660 }
1661 EXPORT_SYMBOL(vm_insert_page);
1662
1663 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1664                         pfn_t pfn, pgprot_t prot)
1665 {
1666         struct mm_struct *mm = vma->vm_mm;
1667         int retval;
1668         pte_t *pte, entry;
1669         spinlock_t *ptl;
1670
1671         retval = -ENOMEM;
1672         pte = get_locked_pte(mm, addr, &ptl);
1673         if (!pte)
1674                 goto out;
1675         retval = -EBUSY;
1676         if (!pte_none(*pte))
1677                 goto out_unlock;
1678
1679         /* Ok, finally just insert the thing.. */
1680         if (pfn_t_devmap(pfn))
1681                 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1682         else
1683                 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
1684         set_pte_at(mm, addr, pte, entry);
1685         update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1686
1687         retval = 0;
1688 out_unlock:
1689         pte_unmap_unlock(pte, ptl);
1690 out:
1691         return retval;
1692 }
1693
1694 /**
1695  * vm_insert_pfn - insert single pfn into user vma
1696  * @vma: user vma to map to
1697  * @addr: target user address of this page
1698  * @pfn: source kernel pfn
1699  *
1700  * Similar to vm_insert_page, this allows drivers to insert individual pages
1701  * they've allocated into a user vma. Same comments apply.
1702  *
1703  * This function should only be called from a vm_ops->fault handler, and
1704  * in that case the handler should return NULL.
1705  *
1706  * vma cannot be a COW mapping.
1707  *
1708  * As this is called only for pages that do not currently exist, we
1709  * do not need to flush old virtual caches or the TLB.
1710  */
1711 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1712                         unsigned long pfn)
1713 {
1714         return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
1715 }
1716 EXPORT_SYMBOL(vm_insert_pfn);
1717
1718 /**
1719  * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1720  * @vma: user vma to map to
1721  * @addr: target user address of this page
1722  * @pfn: source kernel pfn
1723  * @pgprot: pgprot flags for the inserted page
1724  *
1725  * This is exactly like vm_insert_pfn, except that it allows drivers to
1726  * to override pgprot on a per-page basis.
1727  *
1728  * This only makes sense for IO mappings, and it makes no sense for
1729  * cow mappings.  In general, using multiple vmas is preferable;
1730  * vm_insert_pfn_prot should only be used if using multiple VMAs is
1731  * impractical.
1732  */
1733 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1734                         unsigned long pfn, pgprot_t pgprot)
1735 {
1736         int ret;
1737         /*
1738          * Technically, architectures with pte_special can avoid all these
1739          * restrictions (same for remap_pfn_range).  However we would like
1740          * consistency in testing and feature parity among all, so we should
1741          * try to keep these invariants in place for everybody.
1742          */
1743         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1744         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1745                                                 (VM_PFNMAP|VM_MIXEDMAP));
1746         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1747         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1748
1749         if (addr < vma->vm_start || addr >= vma->vm_end)
1750                 return -EFAULT;
1751
1752         track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
1753
1754         ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
1755
1756         return ret;
1757 }
1758 EXPORT_SYMBOL(vm_insert_pfn_prot);
1759
1760 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1761                         pfn_t pfn)
1762 {
1763         pgprot_t pgprot = vma->vm_page_prot;
1764
1765         BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1766
1767         if (addr < vma->vm_start || addr >= vma->vm_end)
1768                 return -EFAULT;
1769
1770         track_pfn_insert(vma, &pgprot, pfn);
1771
1772         /*
1773          * If we don't have pte special, then we have to use the pfn_valid()
1774          * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
1775          * refcount the page if pfn_valid is true (hence insert_page rather
1776          * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
1777          * without pte special, it would there be refcounted as a normal page.
1778          */
1779         if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
1780                 struct page *page;
1781
1782                 /*
1783                  * At this point we are committed to insert_page()
1784                  * regardless of whether the caller specified flags that
1785                  * result in pfn_t_has_page() == false.
1786                  */
1787                 page = pfn_to_page(pfn_t_to_pfn(pfn));
1788                 return insert_page(vma, addr, page, pgprot);
1789         }
1790         return insert_pfn(vma, addr, pfn, pgprot);
1791 }
1792 EXPORT_SYMBOL(vm_insert_mixed);
1793
1794 /*
1795  * maps a range of physical memory into the requested pages. the old
1796  * mappings are removed. any references to nonexistent pages results
1797  * in null mappings (currently treated as "copy-on-access")
1798  */
1799 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1800                         unsigned long addr, unsigned long end,
1801                         unsigned long pfn, pgprot_t prot)
1802 {
1803         pte_t *pte;
1804         spinlock_t *ptl;
1805
1806         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1807         if (!pte)
1808                 return -ENOMEM;
1809         arch_enter_lazy_mmu_mode();
1810         do {
1811                 BUG_ON(!pte_none(*pte));
1812                 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1813                 pfn++;
1814         } while (pte++, addr += PAGE_SIZE, addr != end);
1815         arch_leave_lazy_mmu_mode();
1816         pte_unmap_unlock(pte - 1, ptl);
1817         return 0;
1818 }
1819
1820 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1821                         unsigned long addr, unsigned long end,
1822                         unsigned long pfn, pgprot_t prot)
1823 {
1824         pmd_t *pmd;
1825         unsigned long next;
1826
1827         pfn -= addr >> PAGE_SHIFT;
1828         pmd = pmd_alloc(mm, pud, addr);
1829         if (!pmd)
1830                 return -ENOMEM;
1831         VM_BUG_ON(pmd_trans_huge(*pmd));
1832         do {
1833                 next = pmd_addr_end(addr, end);
1834                 if (remap_pte_range(mm, pmd, addr, next,
1835                                 pfn + (addr >> PAGE_SHIFT), prot))
1836                         return -ENOMEM;
1837         } while (pmd++, addr = next, addr != end);
1838         return 0;
1839 }
1840
1841 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
1842                         unsigned long addr, unsigned long end,
1843                         unsigned long pfn, pgprot_t prot)
1844 {
1845         pud_t *pud;
1846         unsigned long next;
1847
1848         pfn -= addr >> PAGE_SHIFT;
1849         pud = pud_alloc(mm, p4d, addr);
1850         if (!pud)
1851                 return -ENOMEM;
1852         do {
1853                 next = pud_addr_end(addr, end);
1854                 if (remap_pmd_range(mm, pud, addr, next,
1855                                 pfn + (addr >> PAGE_SHIFT), prot))
1856                         return -ENOMEM;
1857         } while (pud++, addr = next, addr != end);
1858         return 0;
1859 }
1860
1861 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
1862                         unsigned long addr, unsigned long end,
1863                         unsigned long pfn, pgprot_t prot)
1864 {
1865         p4d_t *p4d;
1866         unsigned long next;
1867
1868         pfn -= addr >> PAGE_SHIFT;
1869         p4d = p4d_alloc(mm, pgd, addr);
1870         if (!p4d)
1871                 return -ENOMEM;
1872         do {
1873                 next = p4d_addr_end(addr, end);
1874                 if (remap_pud_range(mm, p4d, addr, next,
1875                                 pfn + (addr >> PAGE_SHIFT), prot))
1876                         return -ENOMEM;
1877         } while (p4d++, addr = next, addr != end);
1878         return 0;
1879 }
1880
1881 /**
1882  * remap_pfn_range - remap kernel memory to userspace
1883  * @vma: user vma to map to
1884  * @addr: target user address to start at
1885  * @pfn: physical address of kernel memory
1886  * @size: size of map area
1887  * @prot: page protection flags for this mapping
1888  *
1889  *  Note: this is only safe if the mm semaphore is held when called.
1890  */
1891 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1892                     unsigned long pfn, unsigned long size, pgprot_t prot)
1893 {
1894         pgd_t *pgd;
1895         unsigned long next;
1896         unsigned long end = addr + PAGE_ALIGN(size);
1897         struct mm_struct *mm = vma->vm_mm;
1898         unsigned long remap_pfn = pfn;
1899         int err;
1900
1901         /*
1902          * Physically remapped pages are special. Tell the
1903          * rest of the world about it:
1904          *   VM_IO tells people not to look at these pages
1905          *      (accesses can have side effects).
1906          *   VM_PFNMAP tells the core MM that the base pages are just
1907          *      raw PFN mappings, and do not have a "struct page" associated
1908          *      with them.
1909          *   VM_DONTEXPAND
1910          *      Disable vma merging and expanding with mremap().
1911          *   VM_DONTDUMP
1912          *      Omit vma from core dump, even when VM_IO turned off.
1913          *
1914          * There's a horrible special case to handle copy-on-write
1915          * behaviour that some programs depend on. We mark the "original"
1916          * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1917          * See vm_normal_page() for details.
1918          */
1919         if (is_cow_mapping(vma->vm_flags)) {
1920                 if (addr != vma->vm_start || end != vma->vm_end)
1921                         return -EINVAL;
1922                 vma->vm_pgoff = pfn;
1923         }
1924
1925         err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
1926         if (err)
1927                 return -EINVAL;
1928
1929         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1930
1931         BUG_ON(addr >= end);
1932         pfn -= addr >> PAGE_SHIFT;
1933         pgd = pgd_offset(mm, addr);
1934         flush_cache_range(vma, addr, end);
1935         do {
1936                 next = pgd_addr_end(addr, end);
1937                 err = remap_p4d_range(mm, pgd, addr, next,
1938                                 pfn + (addr >> PAGE_SHIFT), prot);
1939                 if (err)
1940                         break;
1941         } while (pgd++, addr = next, addr != end);
1942
1943         if (err)
1944                 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
1945
1946         return err;
1947 }
1948 EXPORT_SYMBOL(remap_pfn_range);
1949
1950 /**
1951  * vm_iomap_memory - remap memory to userspace
1952  * @vma: user vma to map to
1953  * @start: start of area
1954  * @len: size of area
1955  *
1956  * This is a simplified io_remap_pfn_range() for common driver use. The
1957  * driver just needs to give us the physical memory range to be mapped,
1958  * we'll figure out the rest from the vma information.
1959  *
1960  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
1961  * whatever write-combining details or similar.
1962  */
1963 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1964 {
1965         unsigned long vm_len, pfn, pages;
1966
1967         /* Check that the physical memory area passed in looks valid */
1968         if (start + len < start)
1969                 return -EINVAL;
1970         /*
1971          * You *really* shouldn't map things that aren't page-aligned,
1972          * but we've historically allowed it because IO memory might
1973          * just have smaller alignment.
1974          */
1975         len += start & ~PAGE_MASK;
1976         pfn = start >> PAGE_SHIFT;
1977         pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
1978         if (pfn + pages < pfn)
1979                 return -EINVAL;
1980
1981         /* We start the mapping 'vm_pgoff' pages into the area */
1982         if (vma->vm_pgoff > pages)
1983                 return -EINVAL;
1984         pfn += vma->vm_pgoff;
1985         pages -= vma->vm_pgoff;
1986
1987         /* Can we fit all of the mapping? */
1988         vm_len = vma->vm_end - vma->vm_start;
1989         if (vm_len >> PAGE_SHIFT > pages)
1990                 return -EINVAL;
1991
1992         /* Ok, let it rip */
1993         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1994 }
1995 EXPORT_SYMBOL(vm_iomap_memory);
1996
1997 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1998                                      unsigned long addr, unsigned long end,
1999                                      pte_fn_t fn, void *data)
2000 {
2001         pte_t *pte;
2002         int err;
2003         pgtable_t token;
2004         spinlock_t *uninitialized_var(ptl);
2005
2006         pte = (mm == &init_mm) ?
2007                 pte_alloc_kernel(pmd, addr) :
2008                 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2009         if (!pte)
2010                 return -ENOMEM;
2011
2012         BUG_ON(pmd_huge(*pmd));
2013
2014         arch_enter_lazy_mmu_mode();
2015
2016         token = pmd_pgtable(*pmd);
2017
2018         do {
2019                 err = fn(pte++, token, addr, data);
2020                 if (err)
2021                         break;
2022         } while (addr += PAGE_SIZE, addr != end);
2023
2024         arch_leave_lazy_mmu_mode();
2025
2026         if (mm != &init_mm)
2027                 pte_unmap_unlock(pte-1, ptl);
2028         return err;
2029 }
2030
2031 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2032                                      unsigned long addr, unsigned long end,
2033                                      pte_fn_t fn, void *data)
2034 {
2035         pmd_t *pmd;
2036         unsigned long next;
2037         int err;
2038
2039         BUG_ON(pud_huge(*pud));
2040
2041         pmd = pmd_alloc(mm, pud, addr);
2042         if (!pmd)
2043                 return -ENOMEM;
2044         do {
2045                 next = pmd_addr_end(addr, end);
2046                 err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
2047                 if (err)
2048                         break;
2049         } while (pmd++, addr = next, addr != end);
2050         return err;
2051 }
2052
2053 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2054                                      unsigned long addr, unsigned long end,
2055                                      pte_fn_t fn, void *data)
2056 {
2057         pud_t *pud;
2058         unsigned long next;
2059         int err;
2060
2061         pud = pud_alloc(mm, p4d, addr);
2062         if (!pud)
2063                 return -ENOMEM;
2064         do {
2065                 next = pud_addr_end(addr, end);
2066                 err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
2067                 if (err)
2068                         break;
2069         } while (pud++, addr = next, addr != end);
2070         return err;
2071 }
2072
2073 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2074                                      unsigned long addr, unsigned long end,
2075                                      pte_fn_t fn, void *data)
2076 {
2077         p4d_t *p4d;
2078         unsigned long next;
2079         int err;
2080
2081         p4d = p4d_alloc(mm, pgd, addr);
2082         if (!p4d)
2083                 return -ENOMEM;
2084         do {
2085                 next = p4d_addr_end(addr, end);
2086                 err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
2087                 if (err)
2088                         break;
2089         } while (p4d++, addr = next, addr != end);
2090         return err;
2091 }
2092
2093 /*
2094  * Scan a region of virtual memory, filling in page tables as necessary
2095  * and calling a provided function on each leaf page table.
2096  */
2097 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2098                         unsigned long size, pte_fn_t fn, void *data)
2099 {
2100         pgd_t *pgd;
2101         unsigned long next;
2102         unsigned long end = addr + size;
2103         int err;
2104
2105         if (WARN_ON(addr >= end))
2106                 return -EINVAL;
2107
2108         pgd = pgd_offset(mm, addr);
2109         do {
2110                 next = pgd_addr_end(addr, end);
2111                 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
2112                 if (err)
2113                         break;
2114         } while (pgd++, addr = next, addr != end);
2115
2116         return err;
2117 }
2118 EXPORT_SYMBOL_GPL(apply_to_page_range);
2119
2120 /*
2121  * handle_pte_fault chooses page fault handler according to an entry which was
2122  * read non-atomically.  Before making any commitment, on those architectures
2123  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2124  * parts, do_swap_page must check under lock before unmapping the pte and
2125  * proceeding (but do_wp_page is only called after already making such a check;
2126  * and do_anonymous_page can safely check later on).
2127  */
2128 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2129                                 pte_t *page_table, pte_t orig_pte)
2130 {
2131         int same = 1;
2132 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2133         if (sizeof(pte_t) > sizeof(unsigned long)) {
2134                 spinlock_t *ptl = pte_lockptr(mm, pmd);
2135                 spin_lock(ptl);
2136                 same = pte_same(*page_table, orig_pte);
2137                 spin_unlock(ptl);
2138         }
2139 #endif
2140         pte_unmap(page_table);
2141         return same;
2142 }
2143
2144 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
2145 {
2146         debug_dma_assert_idle(src);
2147
2148         /*
2149          * If the source page was a PFN mapping, we don't have
2150          * a "struct page" for it. We do a best-effort copy by
2151          * just copying from the original user address. If that
2152          * fails, we just zero-fill it. Live with it.
2153          */
2154         if (unlikely(!src)) {
2155                 void *kaddr = kmap_atomic(dst);
2156                 void __user *uaddr = (void __user *)(va & PAGE_MASK);
2157
2158                 /*
2159                  * This really shouldn't fail, because the page is there
2160                  * in the page tables. But it might just be unreadable,
2161                  * in which case we just give up and fill the result with
2162                  * zeroes.
2163                  */
2164                 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
2165                         clear_page(kaddr);
2166                 kunmap_atomic(kaddr);
2167                 flush_dcache_page(dst);
2168         } else
2169                 copy_user_highpage(dst, src, va, vma);
2170 }
2171
2172 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2173 {
2174         struct file *vm_file = vma->vm_file;
2175
2176         if (vm_file)
2177                 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2178
2179         /*
2180          * Special mappings (e.g. VDSO) do not have any file so fake
2181          * a default GFP_KERNEL for them.
2182          */
2183         return GFP_KERNEL;
2184 }
2185
2186 /*
2187  * Notify the address space that the page is about to become writable so that
2188  * it can prohibit this or wait for the page to get into an appropriate state.
2189  *
2190  * We do this without the lock held, so that it can sleep if it needs to.
2191  */
2192 static int do_page_mkwrite(struct vm_fault *vmf)
2193 {
2194         int ret;
2195         struct page *page = vmf->page;
2196         unsigned int old_flags = vmf->flags;
2197
2198         vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2199
2200         ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2201         /* Restore original flags so that caller is not surprised */
2202         vmf->flags = old_flags;
2203         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2204                 return ret;
2205         if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2206                 lock_page(page);
2207                 if (!page->mapping) {
2208                         unlock_page(page);
2209                         return 0; /* retry */
2210                 }
2211                 ret |= VM_FAULT_LOCKED;
2212         } else
2213                 VM_BUG_ON_PAGE(!PageLocked(page), page);
2214         return ret;
2215 }
2216
2217 /*
2218  * Handle dirtying of a page in shared file mapping on a write fault.
2219  *
2220  * The function expects the page to be locked and unlocks it.
2221  */
2222 static void fault_dirty_shared_page(struct vm_area_struct *vma,
2223                                     struct page *page)
2224 {
2225         struct address_space *mapping;
2226         bool dirtied;
2227         bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2228
2229         dirtied = set_page_dirty(page);
2230         VM_BUG_ON_PAGE(PageAnon(page), page);
2231         /*
2232          * Take a local copy of the address_space - page.mapping may be zeroed
2233          * by truncate after unlock_page().   The address_space itself remains
2234          * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
2235          * release semantics to prevent the compiler from undoing this copying.
2236          */
2237         mapping = page_rmapping(page);
2238         unlock_page(page);
2239
2240         if ((dirtied || page_mkwrite) && mapping) {
2241                 /*
2242                  * Some device drivers do not set page.mapping
2243                  * but still dirty their pages
2244                  */
2245                 balance_dirty_pages_ratelimited(mapping);
2246         }
2247
2248         if (!page_mkwrite)
2249                 file_update_time(vma->vm_file);
2250 }
2251
2252 /*
2253  * Handle write page faults for pages that can be reused in the current vma
2254  *
2255  * This can happen either due to the mapping being with the VM_SHARED flag,
2256  * or due to us being the last reference standing to the page. In either
2257  * case, all we need to do here is to mark the page as writable and update
2258  * any related book-keeping.
2259  */
2260 static inline void wp_page_reuse(struct vm_fault *vmf)
2261         __releases(vmf->ptl)
2262 {
2263         struct vm_area_struct *vma = vmf->vma;
2264         struct page *page = vmf->page;
2265         pte_t entry;
2266         /*
2267          * Clear the pages cpupid information as the existing
2268          * information potentially belongs to a now completely
2269          * unrelated process.
2270          */
2271         if (page)
2272                 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2273
2274         flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2275         entry = pte_mkyoung(vmf->orig_pte);
2276         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2277         if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2278                 update_mmu_cache(vma, vmf->address, vmf->pte);
2279         pte_unmap_unlock(vmf->pte, vmf->ptl);
2280 }
2281
2282 /*
2283  * Handle the case of a page which we actually need to copy to a new page.
2284  *
2285  * Called with mmap_sem locked and the old page referenced, but
2286  * without the ptl held.
2287  *
2288  * High level logic flow:
2289  *
2290  * - Allocate a page, copy the content of the old page to the new one.
2291  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2292  * - Take the PTL. If the pte changed, bail out and release the allocated page
2293  * - If the pte is still the way we remember it, update the page table and all
2294  *   relevant references. This includes dropping the reference the page-table
2295  *   held to the old page, as well as updating the rmap.
2296  * - In any case, unlock the PTL and drop the reference we took to the old page.
2297  */
2298 static int wp_page_copy(struct vm_fault *vmf)
2299 {
2300         struct vm_area_struct *vma = vmf->vma;
2301         struct mm_struct *mm = vma->vm_mm;
2302         struct page *old_page = vmf->page;
2303         struct page *new_page = NULL;
2304         pte_t entry;
2305         int page_copied = 0;
2306         const unsigned long mmun_start = vmf->address & PAGE_MASK;
2307         const unsigned long mmun_end = mmun_start + PAGE_SIZE;
2308         struct mem_cgroup *memcg;
2309
2310         if (unlikely(anon_vma_prepare(vma)))
2311                 goto oom;
2312
2313         if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2314                 new_page = alloc_zeroed_user_highpage_movable(vma,
2315                                                               vmf->address);
2316                 if (!new_page)
2317                         goto oom;
2318         } else {
2319                 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
2320                                 vmf->address);
2321                 if (!new_page)
2322                         goto oom;
2323                 cow_user_page(new_page, old_page, vmf->address, vma);
2324         }
2325
2326         if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
2327                 goto oom_free_new;
2328
2329         __SetPageUptodate(new_page);
2330
2331         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2332
2333         /*
2334          * Re-check the pte - we dropped the lock
2335          */
2336         vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
2337         if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2338                 if (old_page) {
2339                         if (!PageAnon(old_page)) {
2340                                 dec_mm_counter_fast(mm,
2341                                                 mm_counter_file(old_page));
2342                                 inc_mm_counter_fast(mm, MM_ANONPAGES);
2343                         }
2344                 } else {
2345                         inc_mm_counter_fast(mm, MM_ANONPAGES);
2346                 }
2347                 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2348                 entry = mk_pte(new_page, vma->vm_page_prot);
2349                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2350                 /*
2351                  * Clear the pte entry and flush it first, before updating the
2352                  * pte with the new entry. This will avoid a race condition
2353                  * seen in the presence of one thread doing SMC and another
2354                  * thread doing COW.
2355                  */
2356                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2357                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
2358                 mem_cgroup_commit_charge(new_page, memcg, false, false);
2359                 lru_cache_add_active_or_unevictable(new_page, vma);
2360                 /*
2361                  * We call the notify macro here because, when using secondary
2362                  * mmu page tables (such as kvm shadow page tables), we want the
2363                  * new page to be mapped directly into the secondary page table.
2364                  */
2365                 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2366                 update_mmu_cache(vma, vmf->address, vmf->pte);
2367                 if (old_page) {
2368                         /*
2369                          * Only after switching the pte to the new page may
2370                          * we remove the mapcount here. Otherwise another
2371                          * process may come and find the rmap count decremented
2372                          * before the pte is switched to the new page, and
2373                          * "reuse" the old page writing into it while our pte
2374                          * here still points into it and can be read by other
2375                          * threads.
2376                          *
2377                          * The critical issue is to order this
2378                          * page_remove_rmap with the ptp_clear_flush above.
2379                          * Those stores are ordered by (if nothing else,)
2380                          * the barrier present in the atomic_add_negative
2381                          * in page_remove_rmap.
2382                          *
2383                          * Then the TLB flush in ptep_clear_flush ensures that
2384                          * no process can access the old page before the
2385                          * decremented mapcount is visible. And the old page
2386                          * cannot be reused until after the decremented
2387                          * mapcount is visible. So transitively, TLBs to
2388                          * old page will be flushed before it can be reused.
2389                          */
2390                         page_remove_rmap(old_page, false);
2391                 }
2392
2393                 /* Free the old page.. */
2394                 new_page = old_page;
2395                 page_copied = 1;
2396         } else {
2397                 mem_cgroup_cancel_charge(new_page, memcg, false);
2398         }
2399
2400         if (new_page)
2401                 put_page(new_page);
2402
2403         pte_unmap_unlock(vmf->pte, vmf->ptl);
2404         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2405         if (old_page) {
2406                 /*
2407                  * Don't let another task, with possibly unlocked vma,
2408                  * keep the mlocked page.
2409                  */
2410                 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2411                         lock_page(old_page);    /* LRU manipulation */
2412                         if (PageMlocked(old_page))
2413                                 munlock_vma_page(old_page);
2414                         unlock_page(old_page);
2415                 }
2416                 put_page(old_page);
2417         }
2418         return page_copied ? VM_FAULT_WRITE : 0;
2419 oom_free_new:
2420         put_page(new_page);
2421 oom:
2422         if (old_page)
2423                 put_page(old_page);
2424         return VM_FAULT_OOM;
2425 }
2426
2427 /**
2428  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
2429  *                        writeable once the page is prepared
2430  *
2431  * @vmf: structure describing the fault
2432  *
2433  * This function handles all that is needed to finish a write page fault in a
2434  * shared mapping due to PTE being read-only once the mapped page is prepared.
2435  * It handles locking of PTE and modifying it. The function returns
2436  * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
2437  * lock.
2438  *
2439  * The function expects the page to be locked or other protection against
2440  * concurrent faults / writeback (such as DAX radix tree locks).
2441  */
2442 int finish_mkwrite_fault(struct vm_fault *vmf)
2443 {
2444         WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
2445         vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
2446                                        &vmf->ptl);
2447         /*
2448          * We might have raced with another page fault while we released the
2449          * pte_offset_map_lock.
2450          */
2451         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2452                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2453                 return VM_FAULT_NOPAGE;
2454         }
2455         wp_page_reuse(vmf);
2456         return 0;
2457 }
2458
2459 /*
2460  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
2461  * mapping
2462  */
2463 static int wp_pfn_shared(struct vm_fault *vmf)
2464 {
2465         struct vm_area_struct *vma = vmf->vma;
2466
2467         if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
2468                 int ret;
2469
2470                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2471                 vmf->flags |= FAULT_FLAG_MKWRITE;
2472                 ret = vma->vm_ops->pfn_mkwrite(vmf);
2473                 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
2474                         return ret;
2475                 return finish_mkwrite_fault(vmf);
2476         }
2477         wp_page_reuse(vmf);
2478         return VM_FAULT_WRITE;
2479 }
2480
2481 static int wp_page_shared(struct vm_fault *vmf)
2482         __releases(vmf->ptl)
2483 {
2484         struct vm_area_struct *vma = vmf->vma;
2485
2486         get_page(vmf->page);
2487
2488         if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2489                 int tmp;
2490
2491                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2492                 tmp = do_page_mkwrite(vmf);
2493                 if (unlikely(!tmp || (tmp &
2494                                       (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2495                         put_page(vmf->page);
2496                         return tmp;
2497                 }
2498                 tmp = finish_mkwrite_fault(vmf);
2499                 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2500                         unlock_page(vmf->page);
2501                         put_page(vmf->page);
2502                         return tmp;
2503                 }
2504         } else {
2505                 wp_page_reuse(vmf);
2506                 lock_page(vmf->page);
2507         }
2508         fault_dirty_shared_page(vma, vmf->page);
2509         put_page(vmf->page);
2510
2511         return VM_FAULT_WRITE;
2512 }
2513
2514 /*
2515  * This routine handles present pages, when users try to write
2516  * to a shared page. It is done by copying the page to a new address
2517  * and decrementing the shared-page counter for the old page.
2518  *
2519  * Note that this routine assumes that the protection checks have been
2520  * done by the caller (the low-level page fault routine in most cases).
2521  * Thus we can safely just mark it writable once we've done any necessary
2522  * COW.
2523  *
2524  * We also mark the page dirty at this point even though the page will
2525  * change only once the write actually happens. This avoids a few races,
2526  * and potentially makes it more efficient.
2527  *
2528  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2529  * but allow concurrent faults), with pte both mapped and locked.
2530  * We return with mmap_sem still held, but pte unmapped and unlocked.
2531  */
2532 static int do_wp_page(struct vm_fault *vmf)
2533         __releases(vmf->ptl)
2534 {
2535         struct vm_area_struct *vma = vmf->vma;
2536
2537         vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
2538         if (!vmf->page) {
2539                 /*
2540                  * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
2541                  * VM_PFNMAP VMA.
2542                  *
2543                  * We should not cow pages in a shared writeable mapping.
2544                  * Just mark the pages writable and/or call ops->pfn_mkwrite.
2545                  */
2546                 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2547                                      (VM_WRITE|VM_SHARED))
2548                         return wp_pfn_shared(vmf);
2549
2550                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2551                 return wp_page_copy(vmf);
2552         }
2553
2554         /*
2555          * Take out anonymous pages first, anonymous shared vmas are
2556          * not dirty accountable.
2557          */
2558         if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
2559                 int total_mapcount;
2560                 if (!trylock_page(vmf->page)) {
2561                         get_page(vmf->page);
2562                         pte_unmap_unlock(vmf->pte, vmf->ptl);
2563                         lock_page(vmf->page);
2564                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2565                                         vmf->address, &vmf->ptl);
2566                         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2567                                 unlock_page(vmf->page);
2568                                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2569                                 put_page(vmf->page);
2570                                 return 0;
2571                         }
2572                         put_page(vmf->page);
2573                 }
2574                 if (reuse_swap_page(vmf->page, &total_mapcount)) {
2575                         if (total_mapcount == 1) {
2576                                 /*
2577                                  * The page is all ours. Move it to
2578                                  * our anon_vma so the rmap code will
2579                                  * not search our parent or siblings.
2580                                  * Protected against the rmap code by
2581                                  * the page lock.
2582                                  */
2583                                 page_move_anon_rmap(vmf->page, vma);
2584                         }
2585                         unlock_page(vmf->page);
2586                         wp_page_reuse(vmf);
2587                         return VM_FAULT_WRITE;
2588                 }
2589                 unlock_page(vmf->page);
2590         } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2591                                         (VM_WRITE|VM_SHARED))) {
2592                 return wp_page_shared(vmf);
2593         }
2594
2595         /*
2596          * Ok, we need to copy. Oh, well..
2597          */
2598         get_page(vmf->page);
2599
2600         pte_unmap_unlock(vmf->pte, vmf->ptl);
2601         return wp_page_copy(vmf);
2602 }
2603
2604 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
2605                 unsigned long start_addr, unsigned long end_addr,
2606                 struct zap_details *details)
2607 {
2608         zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
2609 }
2610
2611 static inline void unmap_mapping_range_tree(struct rb_root *root,
2612                                             struct zap_details *details)
2613 {
2614         struct vm_area_struct *vma;
2615         pgoff_t vba, vea, zba, zea;
2616
2617         vma_interval_tree_foreach(vma, root,
2618                         details->first_index, details->last_index) {
2619
2620                 vba = vma->vm_pgoff;
2621                 vea = vba + vma_pages(vma) - 1;
2622                 zba = details->first_index;
2623                 if (zba < vba)
2624                         zba = vba;
2625                 zea = details->last_index;
2626                 if (zea > vea)
2627                         zea = vea;
2628
2629                 unmap_mapping_range_vma(vma,
2630                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2631                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2632                                 details);
2633         }
2634 }
2635
2636 /**
2637  * unmap_mapping_range - unmap the portion of all mmaps in the specified
2638  * address_space corresponding to the specified page range in the underlying
2639  * file.
2640  *
2641  * @mapping: the address space containing mmaps to be unmapped.
2642  * @holebegin: byte in first page to unmap, relative to the start of
2643  * the underlying file.  This will be rounded down to a PAGE_SIZE
2644  * boundary.  Note that this is different from truncate_pagecache(), which
2645  * must keep the partial page.  In contrast, we must get rid of
2646  * partial pages.
2647  * @holelen: size of prospective hole in bytes.  This will be rounded
2648  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
2649  * end of the file.
2650  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
2651  * but 0 when invalidating pagecache, don't throw away private data.
2652  */
2653 void unmap_mapping_range(struct address_space *mapping,
2654                 loff_t const holebegin, loff_t const holelen, int even_cows)
2655 {
2656         struct zap_details details = { };
2657         pgoff_t hba = holebegin >> PAGE_SHIFT;
2658         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2659
2660         /* Check for overflow. */
2661         if (sizeof(holelen) > sizeof(hlen)) {
2662                 long long holeend =
2663                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2664                 if (holeend & ~(long long)ULONG_MAX)
2665                         hlen = ULONG_MAX - hba + 1;
2666         }
2667
2668         details.check_mapping = even_cows ? NULL : mapping;
2669         details.first_index = hba;
2670         details.last_index = hba + hlen - 1;
2671         if (details.last_index < details.first_index)
2672                 details.last_index = ULONG_MAX;
2673
2674         i_mmap_lock_write(mapping);
2675         if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
2676                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2677         i_mmap_unlock_write(mapping);
2678 }
2679 EXPORT_SYMBOL(unmap_mapping_range);
2680
2681 /*
2682  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2683  * but allow concurrent faults), and pte mapped but not yet locked.
2684  * We return with pte unmapped and unlocked.
2685  *
2686  * We return with the mmap_sem locked or unlocked in the same cases
2687  * as does filemap_fault().
2688  */
2689 int do_swap_page(struct vm_fault *vmf)
2690 {
2691         struct vm_area_struct *vma = vmf->vma;
2692         struct page *page, *swapcache;
2693         struct mem_cgroup *memcg;
2694         swp_entry_t entry;
2695         pte_t pte;
2696         int locked;
2697         int exclusive = 0;
2698         int ret = 0;
2699
2700         if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
2701                 goto out;
2702
2703         entry = pte_to_swp_entry(vmf->orig_pte);
2704         if (unlikely(non_swap_entry(entry))) {
2705                 if (is_migration_entry(entry)) {
2706                         migration_entry_wait(vma->vm_mm, vmf->pmd,
2707                                              vmf->address);
2708                 } else if (is_hwpoison_entry(entry)) {
2709                         ret = VM_FAULT_HWPOISON;
2710                 } else {
2711                         print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
2712                         ret = VM_FAULT_SIGBUS;
2713                 }
2714                 goto out;
2715         }
2716         delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2717         page = lookup_swap_cache(entry);
2718         if (!page) {
2719                 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vma,
2720                                         vmf->address);
2721                 if (!page) {
2722                         /*
2723                          * Back out if somebody else faulted in this pte
2724                          * while we released the pte lock.
2725                          */
2726                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2727                                         vmf->address, &vmf->ptl);
2728                         if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
2729                                 ret = VM_FAULT_OOM;
2730                         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2731                         goto unlock;
2732                 }
2733
2734                 /* Had to read the page from swap area: Major fault */
2735                 ret = VM_FAULT_MAJOR;
2736                 count_vm_event(PGMAJFAULT);
2737                 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
2738         } else if (PageHWPoison(page)) {
2739                 /*
2740                  * hwpoisoned dirty swapcache pages are kept for killing
2741                  * owner processes (which may be unknown at hwpoison time)
2742                  */
2743                 ret = VM_FAULT_HWPOISON;
2744                 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2745                 swapcache = page;
2746                 goto out_release;
2747         }
2748
2749         swapcache = page;
2750         locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
2751
2752         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2753         if (!locked) {
2754                 ret |= VM_FAULT_RETRY;
2755                 goto out_release;
2756         }
2757
2758         /*
2759          * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2760          * release the swapcache from under us.  The page pin, and pte_same
2761          * test below, are not enough to exclude that.  Even if it is still
2762          * swapcache, we need to check that the page's swap has not changed.
2763          */
2764         if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
2765                 goto out_page;
2766
2767         page = ksm_might_need_to_copy(page, vma, vmf->address);
2768         if (unlikely(!page)) {
2769                 ret = VM_FAULT_OOM;
2770                 page = swapcache;
2771                 goto out_page;
2772         }
2773
2774         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
2775                                 &memcg, false)) {
2776                 ret = VM_FAULT_OOM;
2777                 goto out_page;
2778         }
2779
2780         /*
2781          * Back out if somebody else already faulted in this pte.
2782          */
2783         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
2784                         &vmf->ptl);
2785         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
2786                 goto out_nomap;
2787
2788         if (unlikely(!PageUptodate(page))) {
2789                 ret = VM_FAULT_SIGBUS;
2790                 goto out_nomap;
2791         }
2792
2793         /*
2794          * The page isn't present yet, go ahead with the fault.
2795          *
2796          * Be careful about the sequence of operations here.
2797          * To get its accounting right, reuse_swap_page() must be called
2798          * while the page is counted on swap but not yet in mapcount i.e.
2799          * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
2800          * must be called after the swap_free(), or it will never succeed.
2801          */
2802
2803         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2804         dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
2805         pte = mk_pte(page, vma->vm_page_prot);
2806         if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
2807                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2808                 vmf->flags &= ~FAULT_FLAG_WRITE;
2809                 ret |= VM_FAULT_WRITE;
2810                 exclusive = RMAP_EXCLUSIVE;
2811         }
2812         flush_icache_page(vma, page);
2813         if (pte_swp_soft_dirty(vmf->orig_pte))
2814                 pte = pte_mksoft_dirty(pte);
2815         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
2816         vmf->orig_pte = pte;
2817         if (page == swapcache) {
2818                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
2819                 mem_cgroup_commit_charge(page, memcg, true, false);
2820                 activate_page(page);
2821         } else { /* ksm created a completely new copy */
2822                 page_add_new_anon_rmap(page, vma, vmf->address, false);
2823                 mem_cgroup_commit_charge(page, memcg, false, false);
2824                 lru_cache_add_active_or_unevictable(page, vma);
2825         }
2826
2827         swap_free(entry);
2828         if (mem_cgroup_swap_full(page) ||
2829             (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2830                 try_to_free_swap(page);
2831         unlock_page(page);
2832         if (page != swapcache) {
2833                 /*
2834                  * Hold the lock to avoid the swap entry to be reused
2835                  * until we take the PT lock for the pte_same() check
2836                  * (to avoid false positives from pte_same). For
2837                  * further safety release the lock after the swap_free
2838                  * so that the swap count won't change under a
2839                  * parallel locked swapcache.
2840                  */
2841                 unlock_page(swapcache);
2842                 put_page(swapcache);
2843         }
2844
2845         if (vmf->flags & FAULT_FLAG_WRITE) {
2846                 ret |= do_wp_page(vmf);
2847                 if (ret & VM_FAULT_ERROR)
2848                         ret &= VM_FAULT_ERROR;
2849                 goto out;
2850         }
2851
2852         /* No need to invalidate - it was non-present before */
2853         update_mmu_cache(vma, vmf->address, vmf->pte);
2854 unlock:
2855         pte_unmap_unlock(vmf->pte, vmf->ptl);
2856 out:
2857         return ret;
2858 out_nomap:
2859         mem_cgroup_cancel_charge(page, memcg, false);
2860         pte_unmap_unlock(vmf->pte, vmf->ptl);
2861 out_page:
2862         unlock_page(page);
2863 out_release:
2864         put_page(page);
2865         if (page != swapcache) {
2866                 unlock_page(swapcache);
2867                 put_page(swapcache);
2868         }
2869         return ret;
2870 }
2871
2872 /*
2873  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2874  * but allow concurrent faults), and pte mapped but not yet locked.
2875  * We return with mmap_sem still held, but pte unmapped and unlocked.
2876  */
2877 static int do_anonymous_page(struct vm_fault *vmf)
2878 {
2879         struct vm_area_struct *vma = vmf->vma;
2880         struct mem_cgroup *memcg;
2881         struct page *page;
2882         pte_t entry;
2883
2884         /* File mapping without ->vm_ops ? */
2885         if (vma->vm_flags & VM_SHARED)
2886                 return VM_FAULT_SIGBUS;
2887
2888         /*
2889          * Use pte_alloc() instead of pte_alloc_map().  We can't run
2890          * pte_offset_map() on pmds where a huge pmd might be created
2891          * from a different thread.
2892          *
2893          * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2894          * parallel threads are excluded by other means.
2895          *
2896          * Here we only have down_read(mmap_sem).
2897          */
2898         if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
2899                 return VM_FAULT_OOM;
2900
2901         /* See the comment in pte_alloc_one_map() */
2902         if (unlikely(pmd_trans_unstable(vmf->pmd)))
2903                 return 0;
2904
2905         /* Use the zero-page for reads */
2906         if (!(vmf->flags & FAULT_FLAG_WRITE) &&
2907                         !mm_forbids_zeropage(vma->vm_mm)) {
2908                 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
2909                                                 vma->vm_page_prot));
2910                 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2911                                 vmf->address, &vmf->ptl);
2912                 if (!pte_none(*vmf->pte))
2913                         goto unlock;
2914                 /* Deliver the page fault to userland, check inside PT lock */
2915                 if (userfaultfd_missing(vma)) {
2916                         pte_unmap_unlock(vmf->pte, vmf->ptl);
2917                         return handle_userfault(vmf, VM_UFFD_MISSING);
2918                 }
2919                 goto setpte;
2920         }
2921
2922         /* Allocate our own private page. */
2923         if (unlikely(anon_vma_prepare(vma)))
2924                 goto oom;
2925         page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
2926         if (!page)
2927                 goto oom;
2928
2929         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2930                 goto oom_free_page;
2931
2932         /*
2933          * The memory barrier inside __SetPageUptodate makes sure that
2934          * preceeding stores to the page contents become visible before
2935          * the set_pte_at() write.
2936          */
2937         __SetPageUptodate(page);
2938
2939         entry = mk_pte(page, vma->vm_page_prot);
2940         if (vma->vm_flags & VM_WRITE)
2941                 entry = pte_mkwrite(pte_mkdirty(entry));
2942
2943         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
2944                         &vmf->ptl);
2945         if (!pte_none(*vmf->pte))
2946                 goto release;
2947
2948         /* Deliver the page fault to userland, check inside PT lock */
2949         if (userfaultfd_missing(vma)) {
2950                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2951                 mem_cgroup_cancel_charge(page, memcg, false);
2952                 put_page(page);
2953                 return handle_userfault(vmf, VM_UFFD_MISSING);
2954         }
2955
2956         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2957         page_add_new_anon_rmap(page, vma, vmf->address, false);
2958         mem_cgroup_commit_charge(page, memcg, false, false);
2959         lru_cache_add_active_or_unevictable(page, vma);
2960 setpte:
2961         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
2962
2963         /* No need to invalidate - it was non-present before */
2964         update_mmu_cache(vma, vmf->address, vmf->pte);
2965 unlock:
2966         pte_unmap_unlock(vmf->pte, vmf->ptl);
2967         return 0;
2968 release:
2969         mem_cgroup_cancel_charge(page, memcg, false);
2970         put_page(page);
2971         goto unlock;
2972 oom_free_page:
2973         put_page(page);
2974 oom:
2975         return VM_FAULT_OOM;
2976 }
2977
2978 /*
2979  * The mmap_sem must have been held on entry, and may have been
2980  * released depending on flags and vma->vm_ops->fault() return value.
2981  * See filemap_fault() and __lock_page_retry().
2982  */
2983 static int __do_fault(struct vm_fault *vmf)
2984 {
2985         struct vm_area_struct *vma = vmf->vma;
2986         int ret;
2987
2988         ret = vma->vm_ops->fault(vmf);
2989         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
2990                             VM_FAULT_DONE_COW)))
2991                 return ret;
2992
2993         if (unlikely(PageHWPoison(vmf->page))) {
2994                 if (ret & VM_FAULT_LOCKED)
2995                         unlock_page(vmf->page);
2996                 put_page(vmf->page);
2997                 vmf->page = NULL;
2998                 return VM_FAULT_HWPOISON;
2999         }
3000
3001         if (unlikely(!(ret & VM_FAULT_LOCKED)))
3002                 lock_page(vmf->page);
3003         else
3004                 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3005
3006         return ret;
3007 }
3008
3009 /*
3010  * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
3011  * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
3012  * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
3013  * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
3014  */
3015 static int pmd_devmap_trans_unstable(pmd_t *pmd)
3016 {
3017         return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3018 }
3019
3020 static int pte_alloc_one_map(struct vm_fault *vmf)
3021 {
3022         struct vm_area_struct *vma = vmf->vma;
3023
3024         if (!pmd_none(*vmf->pmd))
3025                 goto map_pte;
3026         if (vmf->prealloc_pte) {
3027                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3028                 if (unlikely(!pmd_none(*vmf->pmd))) {
3029                         spin_unlock(vmf->ptl);
3030                         goto map_pte;
3031                 }
3032
3033                 atomic_long_inc(&vma->vm_mm->nr_ptes);
3034                 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3035                 spin_unlock(vmf->ptl);
3036                 vmf->prealloc_pte = NULL;
3037         } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
3038                 return VM_FAULT_OOM;
3039         }
3040 map_pte:
3041         /*
3042          * If a huge pmd materialized under us just retry later.  Use
3043          * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3044          * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3045          * under us and then back to pmd_none, as a result of MADV_DONTNEED
3046          * running immediately after a huge pmd fault in a different thread of
3047          * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3048          * All we have to ensure is that it is a regular pmd that we can walk
3049          * with pte_offset_map() and we can do that through an atomic read in
3050          * C, which is what pmd_trans_unstable() provides.
3051          */
3052         if (pmd_devmap_trans_unstable(vmf->pmd))
3053                 return VM_FAULT_NOPAGE;
3054
3055         /*
3056          * At this point we know that our vmf->pmd points to a page of ptes
3057          * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3058          * for the duration of the fault.  If a racing MADV_DONTNEED runs and
3059          * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3060          * be valid and we will re-check to make sure the vmf->pte isn't
3061          * pte_none() under vmf->ptl protection when we return to
3062          * alloc_set_pte().
3063          */
3064         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3065                         &vmf->ptl);
3066         return 0;
3067 }
3068
3069 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3070
3071 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
3072 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
3073                 unsigned long haddr)
3074 {
3075         if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
3076                         (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
3077                 return false;
3078         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
3079                 return false;
3080         return true;
3081 }
3082
3083 static void deposit_prealloc_pte(struct vm_fault *vmf)
3084 {
3085         struct vm_area_struct *vma = vmf->vma;
3086
3087         pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3088         /*
3089          * We are going to consume the prealloc table,
3090          * count that as nr_ptes.
3091          */
3092         atomic_long_inc(&vma->vm_mm->nr_ptes);
3093         vmf->prealloc_pte = NULL;
3094 }
3095
3096 static int do_set_pmd(struct vm_fault *vmf, struct page *page)
3097 {
3098         struct vm_area_struct *vma = vmf->vma;
3099         bool write = vmf->flags & FAULT_FLAG_WRITE;
3100         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3101         pmd_t entry;
3102         int i, ret;
3103
3104         if (!transhuge_vma_suitable(vma, haddr))
3105                 return VM_FAULT_FALLBACK;
3106
3107         ret = VM_FAULT_FALLBACK;
3108         page = compound_head(page);
3109
3110         /*
3111          * Archs like ppc64 need additonal space to store information
3112          * related to pte entry. Use the preallocated table for that.
3113          */
3114         if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3115                 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
3116                 if (!vmf->prealloc_pte)
3117                         return VM_FAULT_OOM;
3118                 smp_wmb(); /* See comment in __pte_alloc() */
3119         }
3120
3121         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3122         if (unlikely(!pmd_none(*vmf->pmd)))
3123                 goto out;
3124
3125         for (i = 0; i < HPAGE_PMD_NR; i++)
3126                 flush_icache_page(vma, page + i);
3127
3128         entry = mk_huge_pmd(page, vma->vm_page_prot);
3129         if (write)
3130                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3131
3132         add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
3133         page_add_file_rmap(page, true);
3134         /*
3135          * deposit and withdraw with pmd lock held
3136          */
3137         if (arch_needs_pgtable_deposit())
3138                 deposit_prealloc_pte(vmf);
3139
3140         set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3141
3142         update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3143
3144         /* fault is handled */
3145         ret = 0;
3146         count_vm_event(THP_FILE_MAPPED);
3147 out:
3148         spin_unlock(vmf->ptl);
3149         return ret;
3150 }
3151 #else
3152 static int do_set_pmd(struct vm_fault *vmf, struct page *page)
3153 {
3154         BUILD_BUG();
3155         return 0;
3156 }
3157 #endif
3158
3159 /**
3160  * alloc_set_pte - setup new PTE entry for given page and add reverse page
3161  * mapping. If needed, the fucntion allocates page table or use pre-allocated.
3162  *
3163  * @vmf: fault environment
3164  * @memcg: memcg to charge page (only for private mappings)
3165  * @page: page to map
3166  *
3167  * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3168  * return.
3169  *
3170  * Target users are page handler itself and implementations of
3171  * vm_ops->map_pages.
3172  */
3173 int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
3174                 struct page *page)
3175 {
3176         struct vm_area_struct *vma = vmf->vma;
3177         bool write = vmf->flags & FAULT_FLAG_WRITE;
3178         pte_t entry;
3179         int ret;
3180
3181         if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
3182                         IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
3183                 /* THP on COW? */
3184                 VM_BUG_ON_PAGE(memcg, page);
3185
3186                 ret = do_set_pmd(vmf, page);
3187                 if (ret != VM_FAULT_FALLBACK)
3188                         return ret;
3189         }
3190
3191         if (!vmf->pte) {
3192                 ret = pte_alloc_one_map(vmf);
3193                 if (ret)
3194                         return ret;
3195         }
3196
3197         /* Re-check under ptl */
3198         if (unlikely(!pte_none(*vmf->pte)))
3199                 return VM_FAULT_NOPAGE;
3200
3201         flush_icache_page(vma, page);
3202         entry = mk_pte(page, vma->vm_page_prot);
3203         if (write)
3204                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3205         /* copy-on-write page */
3206         if (write && !(vma->vm_flags & VM_SHARED)) {
3207                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3208                 page_add_new_anon_rmap(page, vma, vmf->address, false);
3209                 mem_cgroup_commit_charge(page, memcg, false, false);
3210                 lru_cache_add_active_or_unevictable(page, vma);
3211         } else {
3212                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
3213                 page_add_file_rmap(page, false);
3214         }
3215         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3216
3217         /* no need to invalidate: a not-present page won't be cached */
3218         update_mmu_cache(vma, vmf->address, vmf->pte);
3219
3220         return 0;
3221 }
3222
3223
3224 /**
3225  * finish_fault - finish page fault once we have prepared the page to fault
3226  *
3227  * @vmf: structure describing the fault
3228  *
3229  * This function handles all that is needed to finish a page fault once the
3230  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3231  * given page, adds reverse page mapping, handles memcg charges and LRU
3232  * addition. The function returns 0 on success, VM_FAULT_ code in case of
3233  * error.
3234  *
3235  * The function expects the page to be locked and on success it consumes a
3236  * reference of a page being mapped (for the PTE which maps it).
3237  */
3238 int finish_fault(struct vm_fault *vmf)
3239 {
3240         struct page *page;
3241         int ret;
3242
3243         /* Did we COW the page? */
3244         if ((vmf->flags & FAULT_FLAG_WRITE) &&
3245             !(vmf->vma->vm_flags & VM_SHARED))
3246                 page = vmf->cow_page;
3247         else
3248                 page = vmf->page;
3249         ret = alloc_set_pte(vmf, vmf->memcg, page);
3250         if (vmf->pte)
3251                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3252         return ret;
3253 }
3254
3255 static unsigned long fault_around_bytes __read_mostly =
3256         rounddown_pow_of_two(65536);
3257
3258 #ifdef CONFIG_DEBUG_FS
3259 static int fault_around_bytes_get(void *data, u64 *val)
3260 {
3261         *val = fault_around_bytes;
3262         return 0;
3263 }
3264
3265 /*
3266  * fault_around_pages() and fault_around_mask() expects fault_around_bytes
3267  * rounded down to nearest page order. It's what do_fault_around() expects to
3268  * see.
3269  */
3270 static int fault_around_bytes_set(void *data, u64 val)
3271 {
3272         if (val / PAGE_SIZE > PTRS_PER_PTE)
3273                 return -EINVAL;
3274         if (val > PAGE_SIZE)
3275                 fault_around_bytes = rounddown_pow_of_two(val);
3276         else
3277                 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
3278         return 0;
3279 }
3280 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
3281                 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
3282
3283 static int __init fault_around_debugfs(void)
3284 {
3285         void *ret;
3286
3287         ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
3288                         &fault_around_bytes_fops);
3289         if (!ret)
3290                 pr_warn("Failed to create fault_around_bytes in debugfs");
3291         return 0;
3292 }
3293 late_initcall(fault_around_debugfs);
3294 #endif
3295
3296 /*
3297  * do_fault_around() tries to map few pages around the fault address. The hope
3298  * is that the pages will be needed soon and this will lower the number of
3299  * faults to handle.
3300  *
3301  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
3302  * not ready to be mapped: not up-to-date, locked, etc.
3303  *
3304  * This function is called with the page table lock taken. In the split ptlock
3305  * case the page table lock only protects only those entries which belong to
3306  * the page table corresponding to the fault address.
3307  *
3308  * This function doesn't cross the VMA boundaries, in order to call map_pages()
3309  * only once.
3310  *
3311  * fault_around_pages() defines how many pages we'll try to map.
3312  * do_fault_around() expects it to return a power of two less than or equal to
3313  * PTRS_PER_PTE.
3314  *
3315  * The virtual address of the area that we map is naturally aligned to the
3316  * fault_around_pages() value (and therefore to page order).  This way it's
3317  * easier to guarantee that we don't cross page table boundaries.
3318  */
3319 static int do_fault_around(struct vm_fault *vmf)
3320 {
3321         unsigned long address = vmf->address, nr_pages, mask;
3322         pgoff_t start_pgoff = vmf->pgoff;
3323         pgoff_t end_pgoff;
3324         int off, ret = 0;
3325
3326         nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
3327         mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
3328
3329         vmf->address = max(address & mask, vmf->vma->vm_start);
3330         off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
3331         start_pgoff -= off;
3332
3333         /*
3334          *  end_pgoff is either end of page table or end of vma
3335          *  or fault_around_pages() from start_pgoff, depending what is nearest.
3336          */
3337         end_pgoff = start_pgoff -
3338                 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
3339                 PTRS_PER_PTE - 1;
3340         end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
3341                         start_pgoff + nr_pages - 1);
3342
3343         if (pmd_none(*vmf->pmd)) {
3344                 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
3345                                                   vmf->address);
3346                 if (!vmf->prealloc_pte)
3347                         goto out;
3348                 smp_wmb(); /* See comment in __pte_alloc() */
3349         }
3350
3351         vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
3352
3353         /* Huge page is mapped? Page fault is solved */
3354         if (pmd_trans_huge(*vmf->pmd)) {
3355                 ret = VM_FAULT_NOPAGE;
3356                 goto out;
3357         }
3358
3359         /* ->map_pages() haven't done anything useful. Cold page cache? */
3360         if (!vmf->pte)
3361                 goto out;
3362
3363         /* check if the page fault is solved */
3364         vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
3365         if (!pte_none(*vmf->pte))
3366                 ret = VM_FAULT_NOPAGE;
3367         pte_unmap_unlock(vmf->pte, vmf->ptl);
3368 out:
3369         vmf->address = address;
3370         vmf->pte = NULL;
3371         return ret;
3372 }
3373
3374 static int do_read_fault(struct vm_fault *vmf)
3375 {
3376         struct vm_area_struct *vma = vmf->vma;
3377         int ret = 0;
3378
3379         /*
3380          * Let's call ->map_pages() first and use ->fault() as fallback
3381          * if page by the offset is not ready to be mapped (cold cache or
3382          * something).
3383          */
3384         if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
3385                 ret = do_fault_around(vmf);
3386                 if (ret)
3387                         return ret;
3388         }
3389
3390         ret = __do_fault(vmf);
3391         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3392                 return ret;
3393
3394         ret |= finish_fault(vmf);
3395         unlock_page(vmf->page);
3396         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3397                 put_page(vmf->page);
3398         return ret;
3399 }
3400
3401 static int do_cow_fault(struct vm_fault *vmf)
3402 {
3403         struct vm_area_struct *vma = vmf->vma;
3404         int ret;
3405
3406         if (unlikely(anon_vma_prepare(vma)))
3407                 return VM_FAULT_OOM;
3408
3409         vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
3410         if (!vmf->cow_page)
3411                 return VM_FAULT_OOM;
3412
3413         if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
3414                                 &vmf->memcg, false)) {
3415                 put_page(vmf->cow_page);
3416                 return VM_FAULT_OOM;
3417         }
3418
3419         ret = __do_fault(vmf);
3420         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3421                 goto uncharge_out;
3422         if (ret & VM_FAULT_DONE_COW)
3423                 return ret;
3424
3425         copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
3426         __SetPageUptodate(vmf->cow_page);
3427
3428         ret |= finish_fault(vmf);
3429         unlock_page(vmf->page);
3430         put_page(vmf->page);
3431         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3432                 goto uncharge_out;
3433         return ret;
3434 uncharge_out:
3435         mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
3436         put_page(vmf->cow_page);
3437         return ret;
3438 }
3439
3440 static int do_shared_fault(struct vm_fault *vmf)
3441 {
3442         struct vm_area_struct *vma = vmf->vma;
3443         int ret, tmp;
3444
3445         ret = __do_fault(vmf);
3446         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3447                 return ret;
3448
3449         /*
3450          * Check if the backing address space wants to know that the page is
3451          * about to become writable
3452          */
3453         if (vma->vm_ops->page_mkwrite) {
3454                 unlock_page(vmf->page);
3455                 tmp = do_page_mkwrite(vmf);
3456                 if (unlikely(!tmp ||
3457                                 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3458                         put_page(vmf->page);
3459                         return tmp;
3460                 }
3461         }
3462
3463         ret |= finish_fault(vmf);
3464         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
3465                                         VM_FAULT_RETRY))) {
3466                 unlock_page(vmf->page);
3467                 put_page(vmf->page);
3468                 return ret;
3469         }
3470
3471         fault_dirty_shared_page(vma, vmf->page);
3472         return ret;
3473 }
3474
3475 /*
3476  * We enter with non-exclusive mmap_sem (to exclude vma changes,
3477  * but allow concurrent faults).
3478  * The mmap_sem may have been released depending on flags and our
3479  * return value.  See filemap_fault() and __lock_page_or_retry().
3480  */
3481 static int do_fault(struct vm_fault *vmf)
3482 {
3483         struct vm_area_struct *vma = vmf->vma;
3484         int ret;
3485
3486         /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
3487         if (!vma->vm_ops->fault)
3488                 ret = VM_FAULT_SIGBUS;
3489         else if (!(vmf->flags & FAULT_FLAG_WRITE))
3490                 ret = do_read_fault(vmf);
3491         else if (!(vma->vm_flags & VM_SHARED))
3492                 ret = do_cow_fault(vmf);
3493         else
3494                 ret = do_shared_fault(vmf);
3495
3496         /* preallocated pagetable is unused: free it */
3497         if (vmf->prealloc_pte) {
3498                 pte_free(vma->vm_mm, vmf->prealloc_pte);
3499                 vmf->prealloc_pte = NULL;
3500         }
3501         return ret;
3502 }
3503
3504 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
3505                                 unsigned long addr, int page_nid,
3506                                 int *flags)
3507 {
3508         get_page(page);
3509
3510         count_vm_numa_event(NUMA_HINT_FAULTS);
3511         if (page_nid == numa_node_id()) {
3512                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
3513                 *flags |= TNF_FAULT_LOCAL;
3514         }
3515
3516         return mpol_misplaced(page, vma, addr);
3517 }
3518
3519 static int do_numa_page(struct vm_fault *vmf)
3520 {
3521         struct vm_area_struct *vma = vmf->vma;
3522         struct page *page = NULL;
3523         int page_nid = -1;
3524         int last_cpupid;
3525         int target_nid;
3526         bool migrated = false;
3527         pte_t pte;
3528         bool was_writable = pte_savedwrite(vmf->orig_pte);
3529         int flags = 0;
3530
3531         /*
3532          * The "pte" at this point cannot be used safely without
3533          * validation through pte_unmap_same(). It's of NUMA type but
3534          * the pfn may be screwed if the read is non atomic.
3535          */
3536         vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
3537         spin_lock(vmf->ptl);
3538         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
3539                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3540                 goto out;
3541         }
3542
3543         /*
3544          * Make it present again, Depending on how arch implementes non
3545          * accessible ptes, some can allow access by kernel mode.
3546          */
3547         pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte);
3548         pte = pte_modify(pte, vma->vm_page_prot);
3549         pte = pte_mkyoung(pte);
3550         if (was_writable)
3551                 pte = pte_mkwrite(pte);
3552         ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
3553         update_mmu_cache(vma, vmf->address, vmf->pte);
3554
3555         page = vm_normal_page(vma, vmf->address, pte);
3556         if (!page) {
3557                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3558                 return 0;
3559         }
3560
3561         /* TODO: handle PTE-mapped THP */
3562         if (PageCompound(page)) {
3563                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3564                 return 0;
3565         }
3566
3567         /*
3568          * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
3569          * much anyway since they can be in shared cache state. This misses
3570          * the case where a mapping is writable but the process never writes
3571          * to it but pte_write gets cleared during protection updates and
3572          * pte_dirty has unpredictable behaviour between PTE scan updates,
3573          * background writeback, dirty balancing and application behaviour.
3574          */
3575         if (!pte_write(pte))
3576                 flags |= TNF_NO_GROUP;
3577
3578         /*
3579          * Flag if the page is shared between multiple address spaces. This
3580          * is later used when determining whether to group tasks together
3581          */
3582         if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
3583                 flags |= TNF_SHARED;
3584
3585         last_cpupid = page_cpupid_last(page);
3586         page_nid = page_to_nid(page);
3587         target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
3588                         &flags);
3589         pte_unmap_unlock(vmf->pte, vmf->ptl);
3590         if (target_nid == -1) {
3591                 put_page(page);
3592                 goto out;
3593         }
3594
3595         /* Migrate to the requested node */
3596         migrated = migrate_misplaced_page(page, vma, target_nid);
3597         if (migrated) {
3598                 page_nid = target_nid;
3599                 flags |= TNF_MIGRATED;
3600         } else
3601                 flags |= TNF_MIGRATE_FAIL;
3602
3603 out:
3604         if (page_nid != -1)
3605                 task_numa_fault(last_cpupid, page_nid, 1, flags);
3606         return 0;
3607 }
3608
3609 static inline int create_huge_pmd(struct vm_fault *vmf)
3610 {
3611         if (vma_is_anonymous(vmf->vma))
3612                 return do_huge_pmd_anonymous_page(vmf);
3613         if (vmf->vma->vm_ops->huge_fault)
3614                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
3615         return VM_FAULT_FALLBACK;
3616 }
3617
3618 static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
3619 {
3620         if (vma_is_anonymous(vmf->vma))
3621                 return do_huge_pmd_wp_page(vmf, orig_pmd);
3622         if (vmf->vma->vm_ops->huge_fault)
3623                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
3624
3625         /* COW handled on pte level: split pmd */
3626         VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
3627         __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
3628
3629         return VM_FAULT_FALLBACK;
3630 }
3631
3632 static inline bool vma_is_accessible(struct vm_area_struct *vma)
3633 {
3634         return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
3635 }
3636
3637 static int create_huge_pud(struct vm_fault *vmf)
3638 {
3639 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3640         /* No support for anonymous transparent PUD pages yet */
3641         if (vma_is_anonymous(vmf->vma))
3642                 return VM_FAULT_FALLBACK;
3643         if (vmf->vma->vm_ops->huge_fault)
3644                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
3645 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3646         return VM_FAULT_FALLBACK;
3647 }
3648
3649 static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
3650 {
3651 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3652         /* No support for anonymous transparent PUD pages yet */
3653         if (vma_is_anonymous(vmf->vma))
3654                 return VM_FAULT_FALLBACK;
3655         if (vmf->vma->vm_ops->huge_fault)
3656                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
3657 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3658         return VM_FAULT_FALLBACK;
3659 }
3660
3661 /*
3662  * These routines also need to handle stuff like marking pages dirty
3663  * and/or accessed for architectures that don't do it in hardware (most
3664  * RISC architectures).  The early dirtying is also good on the i386.
3665  *
3666  * There is also a hook called "update_mmu_cache()" that architectures
3667  * with external mmu caches can use to update those (ie the Sparc or
3668  * PowerPC hashed page tables that act as extended TLBs).
3669  *
3670  * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
3671  * concurrent faults).
3672  *
3673  * The mmap_sem may have been released depending on flags and our return value.
3674  * See filemap_fault() and __lock_page_or_retry().
3675  */
3676 static int handle_pte_fault(struct vm_fault *vmf)
3677 {
3678         pte_t entry;
3679
3680         if (unlikely(pmd_none(*vmf->pmd))) {
3681                 /*
3682                  * Leave __pte_alloc() until later: because vm_ops->fault may
3683                  * want to allocate huge page, and if we expose page table
3684                  * for an instant, it will be difficult to retract from
3685                  * concurrent faults and from rmap lookups.
3686                  */
3687                 vmf->pte = NULL;
3688         } else {
3689                 /* See comment in pte_alloc_one_map() */
3690                 if (pmd_devmap_trans_unstable(vmf->pmd))
3691                         return 0;
3692                 /*
3693                  * A regular pmd is established and it can't morph into a huge
3694                  * pmd from under us anymore at this point because we hold the
3695                  * mmap_sem read mode and khugepaged takes it in write mode.
3696                  * So now it's safe to run pte_offset_map().
3697                  */
3698                 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
3699                 vmf->orig_pte = *vmf->pte;
3700
3701                 /*
3702                  * some architectures can have larger ptes than wordsize,
3703                  * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
3704                  * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee
3705                  * atomic accesses.  The code below just needs a consistent
3706                  * view for the ifs and we later double check anyway with the
3707                  * ptl lock held. So here a barrier will do.
3708                  */
3709                 barrier();
3710                 if (pte_none(vmf->orig_pte)) {
3711                         pte_unmap(vmf->pte);
3712                         vmf->pte = NULL;
3713                 }
3714         }
3715
3716         if (!vmf->pte) {
3717                 if (vma_is_anonymous(vmf->vma))
3718                         return do_anonymous_page(vmf);
3719                 else
3720                         return do_fault(vmf);
3721         }
3722
3723         if (!pte_present(vmf->orig_pte))
3724                 return do_swap_page(vmf);
3725
3726         if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
3727                 return do_numa_page(vmf);
3728
3729         vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
3730         spin_lock(vmf->ptl);
3731         entry = vmf->orig_pte;
3732         if (unlikely(!pte_same(*vmf->pte, entry)))
3733                 goto unlock;
3734         if (vmf->flags & FAULT_FLAG_WRITE) {
3735                 if (!pte_write(entry))
3736                         return do_wp_page(vmf);
3737                 entry = pte_mkdirty(entry);
3738         }
3739         entry = pte_mkyoung(entry);
3740         if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
3741                                 vmf->flags & FAULT_FLAG_WRITE)) {
3742                 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
3743         } else {
3744                 /*
3745                  * This is needed only for protection faults but the arch code
3746                  * is not yet telling us if this is a protection fault or not.
3747                  * This still avoids useless tlb flushes for .text page faults
3748                  * with threads.
3749                  */
3750                 if (vmf->flags & FAULT_FLAG_WRITE)
3751                         flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
3752         }
3753 unlock:
3754         pte_unmap_unlock(vmf->pte, vmf->ptl);
3755         return 0;
3756 }
3757
3758 /*
3759  * By the time we get here, we already hold the mm semaphore
3760  *
3761  * The mmap_sem may have been released depending on flags and our
3762  * return value.  See filemap_fault() and __lock_page_or_retry().
3763  */
3764 static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3765                 unsigned int flags)
3766 {
3767         struct vm_fault vmf = {
3768                 .vma = vma,
3769                 .address = address & PAGE_MASK,
3770                 .flags = flags,
3771                 .pgoff = linear_page_index(vma, address),
3772                 .gfp_mask = __get_fault_gfp_mask(vma),
3773         };
3774         struct mm_struct *mm = vma->vm_mm;
3775         pgd_t *pgd;
3776         p4d_t *p4d;
3777         int ret;
3778
3779         pgd = pgd_offset(mm, address);
3780         p4d = p4d_alloc(mm, pgd, address);
3781         if (!p4d)
3782                 return VM_FAULT_OOM;
3783
3784         vmf.pud = pud_alloc(mm, p4d, address);
3785         if (!vmf.pud)
3786                 return VM_FAULT_OOM;
3787         if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
3788                 ret = create_huge_pud(&vmf);
3789                 if (!(ret & VM_FAULT_FALLBACK))
3790                         return ret;
3791         } else {
3792                 pud_t orig_pud = *vmf.pud;
3793
3794                 barrier();
3795                 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
3796                         unsigned int dirty = flags & FAULT_FLAG_WRITE;
3797
3798                         /* NUMA case for anonymous PUDs would go here */
3799
3800                         if (dirty && !pud_write(orig_pud)) {
3801                                 ret = wp_huge_pud(&vmf, orig_pud);
3802                                 if (!(ret & VM_FAULT_FALLBACK))
3803                                         return ret;
3804                         } else {
3805                                 huge_pud_set_accessed(&vmf, orig_pud);
3806                                 return 0;
3807                         }
3808                 }
3809         }
3810
3811         vmf.pmd = pmd_alloc(mm, vmf.pud, address);
3812         if (!vmf.pmd)
3813                 return VM_FAULT_OOM;
3814         if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
3815                 ret = create_huge_pmd(&vmf);
3816                 if (!(ret & VM_FAULT_FALLBACK))
3817                         return ret;
3818         } else {
3819                 pmd_t orig_pmd = *vmf.pmd;
3820
3821                 barrier();
3822                 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
3823                         if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
3824                                 return do_huge_pmd_numa_page(&vmf, orig_pmd);
3825
3826                         if ((vmf.flags & FAULT_FLAG_WRITE) &&
3827                                         !pmd_write(orig_pmd)) {
3828                                 ret = wp_huge_pmd(&vmf, orig_pmd);
3829                                 if (!(ret & VM_FAULT_FALLBACK))
3830                                         return ret;
3831                         } else {
3832                                 huge_pmd_set_accessed(&vmf, orig_pmd);
3833                                 return 0;
3834                         }
3835                 }
3836         }
3837
3838         return handle_pte_fault(&vmf);
3839 }
3840
3841 /*
3842  * By the time we get here, we already hold the mm semaphore
3843  *
3844  * The mmap_sem may have been released depending on flags and our
3845  * return value.  See filemap_fault() and __lock_page_or_retry().
3846  */
3847 int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3848                 unsigned int flags)
3849 {
3850         int ret;
3851
3852         __set_current_state(TASK_RUNNING);
3853
3854         count_vm_event(PGFAULT);
3855         count_memcg_event_mm(vma->vm_mm, PGFAULT);
3856
3857         /* do counter updates before entering really critical section. */
3858         check_sync_rss_stat(current);
3859
3860         /*
3861          * Enable the memcg OOM handling for faults triggered in user
3862          * space.  Kernel faults are handled more gracefully.
3863          */
3864         if (flags & FAULT_FLAG_USER)
3865                 mem_cgroup_oom_enable();
3866
3867         if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
3868                                             flags & FAULT_FLAG_INSTRUCTION,
3869                                             flags & FAULT_FLAG_REMOTE))
3870                 return VM_FAULT_SIGSEGV;
3871
3872         if (unlikely(is_vm_hugetlb_page(vma)))
3873                 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
3874         else
3875                 ret = __handle_mm_fault(vma, address, flags);
3876
3877         if (flags & FAULT_FLAG_USER) {
3878                 mem_cgroup_oom_disable();
3879                 /*
3880                  * The task may have entered a memcg OOM situation but
3881                  * if the allocation error was handled gracefully (no
3882                  * VM_FAULT_OOM), there is no need to kill anything.
3883                  * Just clean up the OOM state peacefully.
3884                  */
3885                 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
3886                         mem_cgroup_oom_synchronize(false);
3887         }
3888
3889         /*
3890          * This mm has been already reaped by the oom reaper and so the
3891          * refault cannot be trusted in general. Anonymous refaults would
3892          * lose data and give a zero page instead e.g. This is especially
3893          * problem for use_mm() because regular tasks will just die and
3894          * the corrupted data will not be visible anywhere while kthread
3895          * will outlive the oom victim and potentially propagate the date
3896          * further.
3897          */
3898         if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
3899                                 && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
3900                 ret = VM_FAULT_SIGBUS;
3901
3902         return ret;
3903 }
3904 EXPORT_SYMBOL_GPL(handle_mm_fault);
3905
3906 #ifndef __PAGETABLE_P4D_FOLDED
3907 /*
3908  * Allocate p4d page table.
3909  * We've already handled the fast-path in-line.
3910  */
3911 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
3912 {
3913         p4d_t *new = p4d_alloc_one(mm, address);
3914         if (!new)
3915                 return -ENOMEM;
3916
3917         smp_wmb(); /* See comment in __pte_alloc */
3918
3919         spin_lock(&mm->page_table_lock);
3920         if (pgd_present(*pgd))          /* Another has populated it */
3921                 p4d_free(mm, new);
3922         else
3923                 pgd_populate(mm, pgd, new);
3924         spin_unlock(&mm->page_table_lock);
3925         return 0;
3926 }
3927 #endif /* __PAGETABLE_P4D_FOLDED */
3928
3929 #ifndef __PAGETABLE_PUD_FOLDED
3930 /*
3931  * Allocate page upper directory.
3932  * We've already handled the fast-path in-line.
3933  */
3934 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
3935 {
3936         pud_t *new = pud_alloc_one(mm, address);
3937         if (!new)
3938                 return -ENOMEM;
3939
3940         smp_wmb(); /* See comment in __pte_alloc */
3941
3942         spin_lock(&mm->page_table_lock);
3943 #ifndef __ARCH_HAS_5LEVEL_HACK
3944         if (p4d_present(*p4d))          /* Another has populated it */
3945                 pud_free(mm, new);
3946         else
3947                 p4d_populate(mm, p4d, new);
3948 #else
3949         if (pgd_present(*p4d))          /* Another has populated it */
3950                 pud_free(mm, new);
3951         else
3952                 pgd_populate(mm, p4d, new);
3953 #endif /* __ARCH_HAS_5LEVEL_HACK */
3954         spin_unlock(&mm->page_table_lock);
3955         return 0;
3956 }
3957 #endif /* __PAGETABLE_PUD_FOLDED */
3958
3959 #ifndef __PAGETABLE_PMD_FOLDED
3960 /*
3961  * Allocate page middle directory.
3962  * We've already handled the fast-path in-line.
3963  */
3964 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3965 {
3966         spinlock_t *ptl;
3967         pmd_t *new = pmd_alloc_one(mm, address);
3968         if (!new)
3969                 return -ENOMEM;
3970
3971         smp_wmb(); /* See comment in __pte_alloc */
3972
3973         ptl = pud_lock(mm, pud);
3974 #ifndef __ARCH_HAS_4LEVEL_HACK
3975         if (!pud_present(*pud)) {
3976                 mm_inc_nr_pmds(mm);
3977                 pud_populate(mm, pud, new);
3978         } else  /* Another has populated it */
3979                 pmd_free(mm, new);
3980 #else
3981         if (!pgd_present(*pud)) {
3982                 mm_inc_nr_pmds(mm);
3983                 pgd_populate(mm, pud, new);
3984         } else /* Another has populated it */
3985                 pmd_free(mm, new);
3986 #endif /* __ARCH_HAS_4LEVEL_HACK */
3987         spin_unlock(ptl);
3988         return 0;
3989 }
3990 #endif /* __PAGETABLE_PMD_FOLDED */
3991
3992 static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
3993                 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
3994 {
3995         pgd_t *pgd;
3996         p4d_t *p4d;
3997         pud_t *pud;
3998         pmd_t *pmd;
3999         pte_t *ptep;
4000
4001         pgd = pgd_offset(mm, address);
4002         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4003                 goto out;
4004
4005         p4d = p4d_offset(pgd, address);
4006         if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4007                 goto out;
4008
4009         pud = pud_offset(p4d, address);
4010         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4011                 goto out;
4012
4013         pmd = pmd_offset(pud, address);
4014         VM_BUG_ON(pmd_trans_huge(*pmd));
4015
4016         if (pmd_huge(*pmd)) {
4017                 if (!pmdpp)
4018                         goto out;
4019
4020                 *ptlp = pmd_lock(mm, pmd);
4021                 if (pmd_huge(*pmd)) {
4022                         *pmdpp = pmd;
4023                         return 0;
4024                 }
4025                 spin_unlock(*ptlp);
4026         }
4027
4028         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4029                 goto out;
4030
4031         ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4032         if (!pte_present(*ptep))
4033                 goto unlock;
4034         *ptepp = ptep;
4035         return 0;
4036 unlock:
4037         pte_unmap_unlock(ptep, *ptlp);
4038 out:
4039         return -EINVAL;
4040 }
4041
4042 static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4043                              pte_t **ptepp, spinlock_t **ptlp)
4044 {
4045         int res;
4046
4047         /* (void) is needed to make gcc happy */
4048         (void) __cond_lock(*ptlp,
4049                            !(res = __follow_pte_pmd(mm, address, ptepp, NULL,
4050                                            ptlp)));
4051         return res;
4052 }
4053
4054 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4055                              pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4056 {
4057         int res;
4058
4059         /* (void) is needed to make gcc happy */
4060         (void) __cond_lock(*ptlp,
4061                            !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp,
4062                                            ptlp)));
4063         return res;
4064 }
4065 EXPORT_SYMBOL(follow_pte_pmd);
4066
4067 /**
4068  * follow_pfn - look up PFN at a user virtual address
4069  * @vma: memory mapping
4070  * @address: user virtual address
4071  * @pfn: location to store found PFN
4072  *
4073  * Only IO mappings and raw PFN mappings are allowed.
4074  *
4075  * Returns zero and the pfn at @pfn on success, -ve otherwise.
4076  */
4077 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4078         unsigned long *pfn)
4079 {
4080         int ret = -EINVAL;
4081         spinlock_t *ptl;
4082         pte_t *ptep;
4083
4084         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4085                 return ret;
4086
4087         ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
4088         if (ret)
4089                 return ret;
4090         *pfn = pte_pfn(*ptep);
4091         pte_unmap_unlock(ptep, ptl);
4092         return 0;
4093 }
4094 EXPORT_SYMBOL(follow_pfn);
4095
4096 #ifdef CONFIG_HAVE_IOREMAP_PROT
4097 int follow_phys(struct vm_area_struct *vma,
4098                 unsigned long address, unsigned int flags,
4099                 unsigned long *prot, resource_size_t *phys)
4100 {
4101         int ret = -EINVAL;
4102         pte_t *ptep, pte;
4103         spinlock_t *ptl;
4104
4105         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4106                 goto out;
4107
4108         if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
4109                 goto out;
4110         pte = *ptep;
4111
4112         if ((flags & FOLL_WRITE) && !pte_write(pte))
4113                 goto unlock;
4114
4115         *prot = pgprot_val(pte_pgprot(pte));
4116         *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
4117
4118         ret = 0;
4119 unlock:
4120         pte_unmap_unlock(ptep, ptl);
4121 out:
4122         return ret;
4123 }
4124
4125 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4126                         void *buf, int len, int write)
4127 {
4128         resource_size_t phys_addr;
4129         unsigned long prot = 0;
4130         void __iomem *maddr;
4131         int offset = addr & (PAGE_SIZE-1);
4132
4133         if (follow_phys(vma, addr, write, &prot, &phys_addr))
4134                 return -EINVAL;
4135
4136         maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
4137         if (write)
4138                 memcpy_toio(maddr + offset, buf, len);
4139         else
4140                 memcpy_fromio(buf, maddr + offset, len);
4141         iounmap(maddr);
4142
4143         return len;
4144 }
4145 EXPORT_SYMBOL_GPL(generic_access_phys);
4146 #endif
4147
4148 /*
4149  * Access another process' address space as given in mm.  If non-NULL, use the
4150  * given task for page fault accounting.
4151  */
4152 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
4153                 unsigned long addr, void *buf, int len, unsigned int gup_flags)
4154 {
4155         struct vm_area_struct *vma;
4156         void *old_buf = buf;
4157         int write = gup_flags & FOLL_WRITE;
4158
4159         down_read(&mm->mmap_sem);
4160         /* ignore errors, just check how much was successfully transferred */
4161         while (len) {
4162                 int bytes, ret, offset;
4163                 void *maddr;
4164                 struct page *page = NULL;
4165
4166                 ret = get_user_pages_remote(tsk, mm, addr, 1,
4167                                 gup_flags, &page, &vma, NULL);
4168                 if (ret <= 0) {
4169 #ifndef CONFIG_HAVE_IOREMAP_PROT
4170                         break;
4171 #else
4172                         /*
4173                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
4174                          * we can access using slightly different code.
4175                          */
4176                         vma = find_vma(mm, addr);
4177                         if (!vma || vma->vm_start > addr)
4178                                 break;
4179                         if (vma->vm_ops && vma->vm_ops->access)
4180                                 ret = vma->vm_ops->access(vma, addr, buf,
4181                                                           len, write);
4182                         if (ret <= 0)
4183                                 break;
4184                         bytes = ret;
4185 #endif
4186                 } else {
4187                         bytes = len;
4188                         offset = addr & (PAGE_SIZE-1);
4189                         if (bytes > PAGE_SIZE-offset)
4190                                 bytes = PAGE_SIZE-offset;
4191
4192                         maddr = kmap(page);
4193                         if (write) {
4194                                 copy_to_user_page(vma, page, addr,
4195                                                   maddr + offset, buf, bytes);
4196                                 set_page_dirty_lock(page);
4197                         } else {
4198                                 copy_from_user_page(vma, page, addr,
4199                                                     buf, maddr + offset, bytes);
4200                         }
4201                         kunmap(page);
4202                         put_page(page);
4203                 }
4204                 len -= bytes;
4205                 buf += bytes;
4206                 addr += bytes;
4207         }
4208         up_read(&mm->mmap_sem);
4209
4210         return buf - old_buf;
4211 }
4212
4213 /**
4214  * access_remote_vm - access another process' address space
4215  * @mm:         the mm_struct of the target address space
4216  * @addr:       start address to access
4217  * @buf:        source or destination buffer
4218  * @len:        number of bytes to transfer
4219  * @gup_flags:  flags modifying lookup behaviour
4220  *
4221  * The caller must hold a reference on @mm.
4222  */
4223 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
4224                 void *buf, int len, unsigned int gup_flags)
4225 {
4226         return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
4227 }
4228
4229 /*
4230  * Access another process' address space.
4231  * Source/target buffer must be kernel space,
4232  * Do not walk the page table directly, use get_user_pages
4233  */
4234 int access_process_vm(struct task_struct *tsk, unsigned long addr,
4235                 void *buf, int len, unsigned int gup_flags)
4236 {
4237         struct mm_struct *mm;
4238         int ret;
4239
4240         mm = get_task_mm(tsk);
4241         if (!mm)
4242                 return 0;
4243
4244         ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
4245
4246         mmput(mm);
4247
4248         return ret;
4249 }
4250 EXPORT_SYMBOL_GPL(access_process_vm);
4251
4252 /*
4253  * Print the name of a VMA.
4254  */
4255 void print_vma_addr(char *prefix, unsigned long ip)
4256 {
4257         struct mm_struct *mm = current->mm;
4258         struct vm_area_struct *vma;
4259
4260         /*
4261          * Do not print if we are in atomic
4262          * contexts (in exception stacks, etc.):
4263          */
4264         if (preempt_count())
4265                 return;
4266
4267         down_read(&mm->mmap_sem);
4268         vma = find_vma(mm, ip);
4269         if (vma && vma->vm_file) {
4270                 struct file *f = vma->vm_file;
4271                 char *buf = (char *)__get_free_page(GFP_KERNEL);
4272                 if (buf) {
4273                         char *p;
4274
4275                         p = file_path(f, buf, PAGE_SIZE);
4276                         if (IS_ERR(p))
4277                                 p = "?";
4278                         printk("%s%s[%lx+%lx]", prefix, kbasename(p),
4279                                         vma->vm_start,
4280                                         vma->vm_end - vma->vm_start);
4281                         free_page((unsigned long)buf);
4282                 }
4283         }
4284         up_read(&mm->mmap_sem);
4285 }
4286
4287 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
4288 void __might_fault(const char *file, int line)
4289 {
4290         /*
4291          * Some code (nfs/sunrpc) uses socket ops on kernel memory while
4292          * holding the mmap_sem, this is safe because kernel memory doesn't
4293          * get paged out, therefore we'll never actually fault, and the
4294          * below annotations will generate false positives.
4295          */
4296         if (uaccess_kernel())
4297                 return;
4298         if (pagefault_disabled())
4299                 return;
4300         __might_sleep(file, line, 0);
4301 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
4302         if (current->mm)
4303                 might_lock_read(&current->mm->mmap_sem);
4304 #endif
4305 }
4306 EXPORT_SYMBOL(__might_fault);
4307 #endif
4308
4309 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4310 static void clear_gigantic_page(struct page *page,
4311                                 unsigned long addr,
4312                                 unsigned int pages_per_huge_page)
4313 {
4314         int i;
4315         struct page *p = page;
4316
4317         might_sleep();
4318         for (i = 0; i < pages_per_huge_page;
4319              i++, p = mem_map_next(p, page, i)) {
4320                 cond_resched();
4321                 clear_user_highpage(p, addr + i * PAGE_SIZE);
4322         }
4323 }
4324 void clear_huge_page(struct page *page,
4325                      unsigned long addr, unsigned int pages_per_huge_page)
4326 {
4327         int i;
4328
4329         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
4330                 clear_gigantic_page(page, addr, pages_per_huge_page);
4331                 return;
4332         }
4333
4334         might_sleep();
4335         for (i = 0; i < pages_per_huge_page; i++) {
4336                 cond_resched();
4337                 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
4338         }
4339 }
4340
4341 static void copy_user_gigantic_page(struct page *dst, struct page *src,
4342                                     unsigned long addr,
4343                                     struct vm_area_struct *vma,
4344                                     unsigned int pages_per_huge_page)
4345 {
4346         int i;
4347         struct page *dst_base = dst;
4348         struct page *src_base = src;
4349
4350         for (i = 0; i < pages_per_huge_page; ) {
4351                 cond_resched();
4352                 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
4353
4354                 i++;
4355                 dst = mem_map_next(dst, dst_base, i);
4356                 src = mem_map_next(src, src_base, i);
4357         }
4358 }
4359
4360 void copy_user_huge_page(struct page *dst, struct page *src,
4361                          unsigned long addr, struct vm_area_struct *vma,
4362                          unsigned int pages_per_huge_page)
4363 {
4364         int i;
4365
4366         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
4367                 copy_user_gigantic_page(dst, src, addr, vma,
4368                                         pages_per_huge_page);
4369                 return;
4370         }
4371
4372         might_sleep();
4373         for (i = 0; i < pages_per_huge_page; i++) {
4374                 cond_resched();
4375                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
4376         }
4377 }
4378
4379 long copy_huge_page_from_user(struct page *dst_page,
4380                                 const void __user *usr_src,
4381                                 unsigned int pages_per_huge_page,
4382                                 bool allow_pagefault)
4383 {
4384         void *src = (void *)usr_src;
4385         void *page_kaddr;
4386         unsigned long i, rc = 0;
4387         unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
4388
4389         for (i = 0; i < pages_per_huge_page; i++) {
4390                 if (allow_pagefault)
4391                         page_kaddr = kmap(dst_page + i);
4392                 else
4393                         page_kaddr = kmap_atomic(dst_page + i);
4394                 rc = copy_from_user(page_kaddr,
4395                                 (const void __user *)(src + i * PAGE_SIZE),
4396                                 PAGE_SIZE);
4397                 if (allow_pagefault)
4398                         kunmap(dst_page + i);
4399                 else
4400                         kunmap_atomic(page_kaddr);
4401
4402                 ret_val -= (PAGE_SIZE - rc);
4403                 if (rc)
4404                         break;
4405
4406                 cond_resched();
4407         }
4408         return ret_val;
4409 }
4410 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4411
4412 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
4413
4414 static struct kmem_cache *page_ptl_cachep;
4415
4416 void __init ptlock_cache_init(void)
4417 {
4418         page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
4419                         SLAB_PANIC, NULL);
4420 }
4421
4422 bool ptlock_alloc(struct page *page)
4423 {
4424         spinlock_t *ptl;
4425
4426         ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
4427         if (!ptl)
4428                 return false;
4429         page->ptl = ptl;
4430         return true;
4431 }
4432
4433 void ptlock_free(struct page *page)
4434 {
4435         kmem_cache_free(page_ptl_cachep, page->ptl);
4436 }
4437 #endif