]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/mm/pageattr.c
Merge remote-tracking branches 'regmap/topic/devm-irq', 'regmap/topic/doc', 'regmap...
[karo-tx-linux.git] / arch / x86 / mm / pageattr.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9 #include <linux/interrupt.h>
10 #include <linux/seq_file.h>
11 #include <linux/debugfs.h>
12 #include <linux/pfn.h>
13 #include <linux/percpu.h>
14 #include <linux/gfp.h>
15 #include <linux/pci.h>
16 #include <linux/vmalloc.h>
17
18 #include <asm/e820.h>
19 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
21 #include <asm/sections.h>
22 #include <asm/setup.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
25 #include <asm/proto.h>
26 #include <asm/pat.h>
27
28 /*
29  * The current flushing context - we pass it instead of 5 arguments:
30  */
31 struct cpa_data {
32         unsigned long   *vaddr;
33         pgd_t           *pgd;
34         pgprot_t        mask_set;
35         pgprot_t        mask_clr;
36         unsigned long   numpages;
37         int             flags;
38         unsigned long   pfn;
39         unsigned        force_split : 1;
40         int             curpage;
41         struct page     **pages;
42 };
43
44 /*
45  * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
46  * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
47  * entries change the page attribute in parallel to some other cpu
48  * splitting a large page entry along with changing the attribute.
49  */
50 static DEFINE_SPINLOCK(cpa_lock);
51
52 #define CPA_FLUSHTLB 1
53 #define CPA_ARRAY 2
54 #define CPA_PAGES_ARRAY 4
55
56 #ifdef CONFIG_PROC_FS
57 static unsigned long direct_pages_count[PG_LEVEL_NUM];
58
59 void update_page_count(int level, unsigned long pages)
60 {
61         /* Protect against CPA */
62         spin_lock(&pgd_lock);
63         direct_pages_count[level] += pages;
64         spin_unlock(&pgd_lock);
65 }
66
67 static void split_page_count(int level)
68 {
69         if (direct_pages_count[level] == 0)
70                 return;
71
72         direct_pages_count[level]--;
73         direct_pages_count[level - 1] += PTRS_PER_PTE;
74 }
75
76 void arch_report_meminfo(struct seq_file *m)
77 {
78         seq_printf(m, "DirectMap4k:    %8lu kB\n",
79                         direct_pages_count[PG_LEVEL_4K] << 2);
80 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
81         seq_printf(m, "DirectMap2M:    %8lu kB\n",
82                         direct_pages_count[PG_LEVEL_2M] << 11);
83 #else
84         seq_printf(m, "DirectMap4M:    %8lu kB\n",
85                         direct_pages_count[PG_LEVEL_2M] << 12);
86 #endif
87         if (direct_gbpages)
88                 seq_printf(m, "DirectMap1G:    %8lu kB\n",
89                         direct_pages_count[PG_LEVEL_1G] << 20);
90 }
91 #else
92 static inline void split_page_count(int level) { }
93 #endif
94
95 #ifdef CONFIG_X86_64
96
97 static inline unsigned long highmap_start_pfn(void)
98 {
99         return __pa_symbol(_text) >> PAGE_SHIFT;
100 }
101
102 static inline unsigned long highmap_end_pfn(void)
103 {
104         return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
105 }
106
107 #endif
108
109 #ifdef CONFIG_DEBUG_PAGEALLOC
110 # define debug_pagealloc 1
111 #else
112 # define debug_pagealloc 0
113 #endif
114
115 static inline int
116 within(unsigned long addr, unsigned long start, unsigned long end)
117 {
118         return addr >= start && addr < end;
119 }
120
121 /*
122  * Flushing functions
123  */
124
125 /**
126  * clflush_cache_range - flush a cache range with clflush
127  * @vaddr:      virtual start address
128  * @size:       number of bytes to flush
129  *
130  * clflushopt is an unordered instruction which needs fencing with mfence or
131  * sfence to avoid ordering issues.
132  */
133 void clflush_cache_range(void *vaddr, unsigned int size)
134 {
135         const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
136         void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
137         void *vend = vaddr + size;
138
139         if (p >= vend)
140                 return;
141
142         mb();
143
144         for (; p < vend; p += clflush_size)
145                 clflushopt(p);
146
147         mb();
148 }
149 EXPORT_SYMBOL_GPL(clflush_cache_range);
150
151 static void __cpa_flush_all(void *arg)
152 {
153         unsigned long cache = (unsigned long)arg;
154
155         /*
156          * Flush all to work around Errata in early athlons regarding
157          * large page flushing.
158          */
159         __flush_tlb_all();
160
161         if (cache && boot_cpu_data.x86 >= 4)
162                 wbinvd();
163 }
164
165 static void cpa_flush_all(unsigned long cache)
166 {
167         BUG_ON(irqs_disabled());
168
169         on_each_cpu(__cpa_flush_all, (void *) cache, 1);
170 }
171
172 static void __cpa_flush_range(void *arg)
173 {
174         /*
175          * We could optimize that further and do individual per page
176          * tlb invalidates for a low number of pages. Caveat: we must
177          * flush the high aliases on 64bit as well.
178          */
179         __flush_tlb_all();
180 }
181
182 static void cpa_flush_range(unsigned long start, int numpages, int cache)
183 {
184         unsigned int i, level;
185         unsigned long addr;
186
187         BUG_ON(irqs_disabled());
188         WARN_ON(PAGE_ALIGN(start) != start);
189
190         on_each_cpu(__cpa_flush_range, NULL, 1);
191
192         if (!cache)
193                 return;
194
195         /*
196          * We only need to flush on one CPU,
197          * clflush is a MESI-coherent instruction that
198          * will cause all other CPUs to flush the same
199          * cachelines:
200          */
201         for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
202                 pte_t *pte = lookup_address(addr, &level);
203
204                 /*
205                  * Only flush present addresses:
206                  */
207                 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
208                         clflush_cache_range((void *) addr, PAGE_SIZE);
209         }
210 }
211
212 static void cpa_flush_array(unsigned long *start, int numpages, int cache,
213                             int in_flags, struct page **pages)
214 {
215         unsigned int i, level;
216         unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
217
218         BUG_ON(irqs_disabled());
219
220         on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
221
222         if (!cache || do_wbinvd)
223                 return;
224
225         /*
226          * We only need to flush on one CPU,
227          * clflush is a MESI-coherent instruction that
228          * will cause all other CPUs to flush the same
229          * cachelines:
230          */
231         for (i = 0; i < numpages; i++) {
232                 unsigned long addr;
233                 pte_t *pte;
234
235                 if (in_flags & CPA_PAGES_ARRAY)
236                         addr = (unsigned long)page_address(pages[i]);
237                 else
238                         addr = start[i];
239
240                 pte = lookup_address(addr, &level);
241
242                 /*
243                  * Only flush present addresses:
244                  */
245                 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
246                         clflush_cache_range((void *)addr, PAGE_SIZE);
247         }
248 }
249
250 /*
251  * Certain areas of memory on x86 require very specific protection flags,
252  * for example the BIOS area or kernel text. Callers don't always get this
253  * right (again, ioremap() on BIOS memory is not uncommon) so this function
254  * checks and fixes these known static required protection bits.
255  */
256 static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
257                                    unsigned long pfn)
258 {
259         pgprot_t forbidden = __pgprot(0);
260
261         /*
262          * The BIOS area between 640k and 1Mb needs to be executable for
263          * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
264          */
265 #ifdef CONFIG_PCI_BIOS
266         if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
267                 pgprot_val(forbidden) |= _PAGE_NX;
268 #endif
269
270         /*
271          * The kernel text needs to be executable for obvious reasons
272          * Does not cover __inittext since that is gone later on. On
273          * 64bit we do not enforce !NX on the low mapping
274          */
275         if (within(address, (unsigned long)_text, (unsigned long)_etext))
276                 pgprot_val(forbidden) |= _PAGE_NX;
277
278         /*
279          * The .rodata section needs to be read-only. Using the pfn
280          * catches all aliases.
281          */
282         if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
283                    __pa_symbol(__end_rodata) >> PAGE_SHIFT))
284                 pgprot_val(forbidden) |= _PAGE_RW;
285
286 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
287         /*
288          * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
289          * kernel text mappings for the large page aligned text, rodata sections
290          * will be always read-only. For the kernel identity mappings covering
291          * the holes caused by this alignment can be anything that user asks.
292          *
293          * This will preserve the large page mappings for kernel text/data
294          * at no extra cost.
295          */
296         if (kernel_set_to_readonly &&
297             within(address, (unsigned long)_text,
298                    (unsigned long)__end_rodata_hpage_align)) {
299                 unsigned int level;
300
301                 /*
302                  * Don't enforce the !RW mapping for the kernel text mapping,
303                  * if the current mapping is already using small page mapping.
304                  * No need to work hard to preserve large page mappings in this
305                  * case.
306                  *
307                  * This also fixes the Linux Xen paravirt guest boot failure
308                  * (because of unexpected read-only mappings for kernel identity
309                  * mappings). In this paravirt guest case, the kernel text
310                  * mapping and the kernel identity mapping share the same
311                  * page-table pages. Thus we can't really use different
312                  * protections for the kernel text and identity mappings. Also,
313                  * these shared mappings are made of small page mappings.
314                  * Thus this don't enforce !RW mapping for small page kernel
315                  * text mapping logic will help Linux Xen parvirt guest boot
316                  * as well.
317                  */
318                 if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
319                         pgprot_val(forbidden) |= _PAGE_RW;
320         }
321 #endif
322
323         prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
324
325         return prot;
326 }
327
328 /*
329  * Lookup the page table entry for a virtual address in a specific pgd.
330  * Return a pointer to the entry and the level of the mapping.
331  */
332 pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
333                              unsigned int *level)
334 {
335         pud_t *pud;
336         pmd_t *pmd;
337
338         *level = PG_LEVEL_NONE;
339
340         if (pgd_none(*pgd))
341                 return NULL;
342
343         pud = pud_offset(pgd, address);
344         if (pud_none(*pud))
345                 return NULL;
346
347         *level = PG_LEVEL_1G;
348         if (pud_large(*pud) || !pud_present(*pud))
349                 return (pte_t *)pud;
350
351         pmd = pmd_offset(pud, address);
352         if (pmd_none(*pmd))
353                 return NULL;
354
355         *level = PG_LEVEL_2M;
356         if (pmd_large(*pmd) || !pmd_present(*pmd))
357                 return (pte_t *)pmd;
358
359         *level = PG_LEVEL_4K;
360
361         return pte_offset_kernel(pmd, address);
362 }
363
364 /*
365  * Lookup the page table entry for a virtual address. Return a pointer
366  * to the entry and the level of the mapping.
367  *
368  * Note: We return pud and pmd either when the entry is marked large
369  * or when the present bit is not set. Otherwise we would return a
370  * pointer to a nonexisting mapping.
371  */
372 pte_t *lookup_address(unsigned long address, unsigned int *level)
373 {
374         return lookup_address_in_pgd(pgd_offset_k(address), address, level);
375 }
376 EXPORT_SYMBOL_GPL(lookup_address);
377
378 static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
379                                   unsigned int *level)
380 {
381         if (cpa->pgd)
382                 return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
383                                                address, level);
384
385         return lookup_address(address, level);
386 }
387
388 /*
389  * Lookup the PMD entry for a virtual address. Return a pointer to the entry
390  * or NULL if not present.
391  */
392 pmd_t *lookup_pmd_address(unsigned long address)
393 {
394         pgd_t *pgd;
395         pud_t *pud;
396
397         pgd = pgd_offset_k(address);
398         if (pgd_none(*pgd))
399                 return NULL;
400
401         pud = pud_offset(pgd, address);
402         if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
403                 return NULL;
404
405         return pmd_offset(pud, address);
406 }
407
408 /*
409  * This is necessary because __pa() does not work on some
410  * kinds of memory, like vmalloc() or the alloc_remap()
411  * areas on 32-bit NUMA systems.  The percpu areas can
412  * end up in this kind of memory, for instance.
413  *
414  * This could be optimized, but it is only intended to be
415  * used at inititalization time, and keeping it
416  * unoptimized should increase the testing coverage for
417  * the more obscure platforms.
418  */
419 phys_addr_t slow_virt_to_phys(void *__virt_addr)
420 {
421         unsigned long virt_addr = (unsigned long)__virt_addr;
422         phys_addr_t phys_addr;
423         unsigned long offset;
424         enum pg_level level;
425         pte_t *pte;
426
427         pte = lookup_address(virt_addr, &level);
428         BUG_ON(!pte);
429
430         /*
431          * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
432          * before being left-shifted PAGE_SHIFT bits -- this trick is to
433          * make 32-PAE kernel work correctly.
434          */
435         switch (level) {
436         case PG_LEVEL_1G:
437                 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
438                 offset = virt_addr & ~PUD_PAGE_MASK;
439                 break;
440         case PG_LEVEL_2M:
441                 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
442                 offset = virt_addr & ~PMD_PAGE_MASK;
443                 break;
444         default:
445                 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
446                 offset = virt_addr & ~PAGE_MASK;
447         }
448
449         return (phys_addr_t)(phys_addr | offset);
450 }
451 EXPORT_SYMBOL_GPL(slow_virt_to_phys);
452
453 /*
454  * Set the new pmd in all the pgds we know about:
455  */
456 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
457 {
458         /* change init_mm */
459         set_pte_atomic(kpte, pte);
460 #ifdef CONFIG_X86_32
461         if (!SHARED_KERNEL_PMD) {
462                 struct page *page;
463
464                 list_for_each_entry(page, &pgd_list, lru) {
465                         pgd_t *pgd;
466                         pud_t *pud;
467                         pmd_t *pmd;
468
469                         pgd = (pgd_t *)page_address(page) + pgd_index(address);
470                         pud = pud_offset(pgd, address);
471                         pmd = pmd_offset(pud, address);
472                         set_pte_atomic((pte_t *)pmd, pte);
473                 }
474         }
475 #endif
476 }
477
478 static int
479 try_preserve_large_page(pte_t *kpte, unsigned long address,
480                         struct cpa_data *cpa)
481 {
482         unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn;
483         pte_t new_pte, old_pte, *tmp;
484         pgprot_t old_prot, new_prot, req_prot;
485         int i, do_split = 1;
486         enum pg_level level;
487
488         if (cpa->force_split)
489                 return 1;
490
491         spin_lock(&pgd_lock);
492         /*
493          * Check for races, another CPU might have split this page
494          * up already:
495          */
496         tmp = _lookup_address_cpa(cpa, address, &level);
497         if (tmp != kpte)
498                 goto out_unlock;
499
500         switch (level) {
501         case PG_LEVEL_2M:
502                 old_prot = pmd_pgprot(*(pmd_t *)kpte);
503                 old_pfn = pmd_pfn(*(pmd_t *)kpte);
504                 break;
505         case PG_LEVEL_1G:
506                 old_prot = pud_pgprot(*(pud_t *)kpte);
507                 old_pfn = pud_pfn(*(pud_t *)kpte);
508                 break;
509         default:
510                 do_split = -EINVAL;
511                 goto out_unlock;
512         }
513
514         psize = page_level_size(level);
515         pmask = page_level_mask(level);
516
517         /*
518          * Calculate the number of pages, which fit into this large
519          * page starting at address:
520          */
521         nextpage_addr = (address + psize) & pmask;
522         numpages = (nextpage_addr - address) >> PAGE_SHIFT;
523         if (numpages < cpa->numpages)
524                 cpa->numpages = numpages;
525
526         /*
527          * We are safe now. Check whether the new pgprot is the same:
528          * Convert protection attributes to 4k-format, as cpa->mask* are set
529          * up accordingly.
530          */
531         old_pte = *kpte;
532         req_prot = pgprot_large_2_4k(old_prot);
533
534         pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
535         pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
536
537         /*
538          * req_prot is in format of 4k pages. It must be converted to large
539          * page format: the caching mode includes the PAT bit located at
540          * different bit positions in the two formats.
541          */
542         req_prot = pgprot_4k_2_large(req_prot);
543
544         /*
545          * Set the PSE and GLOBAL flags only if the PRESENT flag is
546          * set otherwise pmd_present/pmd_huge will return true even on
547          * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
548          * for the ancient hardware that doesn't support it.
549          */
550         if (pgprot_val(req_prot) & _PAGE_PRESENT)
551                 pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
552         else
553                 pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
554
555         req_prot = canon_pgprot(req_prot);
556
557         /*
558          * old_pfn points to the large page base pfn. So we need
559          * to add the offset of the virtual address:
560          */
561         pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
562         cpa->pfn = pfn;
563
564         new_prot = static_protections(req_prot, address, pfn);
565
566         /*
567          * We need to check the full range, whether
568          * static_protection() requires a different pgprot for one of
569          * the pages in the range we try to preserve:
570          */
571         addr = address & pmask;
572         pfn = old_pfn;
573         for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
574                 pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
575
576                 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
577                         goto out_unlock;
578         }
579
580         /*
581          * If there are no changes, return. maxpages has been updated
582          * above:
583          */
584         if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
585                 do_split = 0;
586                 goto out_unlock;
587         }
588
589         /*
590          * We need to change the attributes. Check, whether we can
591          * change the large page in one go. We request a split, when
592          * the address is not aligned and the number of pages is
593          * smaller than the number of pages in the large page. Note
594          * that we limited the number of possible pages already to
595          * the number of pages in the large page.
596          */
597         if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) {
598                 /*
599                  * The address is aligned and the number of pages
600                  * covers the full page.
601                  */
602                 new_pte = pfn_pte(old_pfn, new_prot);
603                 __set_pmd_pte(kpte, address, new_pte);
604                 cpa->flags |= CPA_FLUSHTLB;
605                 do_split = 0;
606         }
607
608 out_unlock:
609         spin_unlock(&pgd_lock);
610
611         return do_split;
612 }
613
614 static int
615 __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
616                    struct page *base)
617 {
618         pte_t *pbase = (pte_t *)page_address(base);
619         unsigned long ref_pfn, pfn, pfninc = 1;
620         unsigned int i, level;
621         pte_t *tmp;
622         pgprot_t ref_prot;
623
624         spin_lock(&pgd_lock);
625         /*
626          * Check for races, another CPU might have split this page
627          * up for us already:
628          */
629         tmp = _lookup_address_cpa(cpa, address, &level);
630         if (tmp != kpte) {
631                 spin_unlock(&pgd_lock);
632                 return 1;
633         }
634
635         paravirt_alloc_pte(&init_mm, page_to_pfn(base));
636
637         switch (level) {
638         case PG_LEVEL_2M:
639                 ref_prot = pmd_pgprot(*(pmd_t *)kpte);
640                 /* clear PSE and promote PAT bit to correct position */
641                 ref_prot = pgprot_large_2_4k(ref_prot);
642                 ref_pfn = pmd_pfn(*(pmd_t *)kpte);
643                 break;
644
645         case PG_LEVEL_1G:
646                 ref_prot = pud_pgprot(*(pud_t *)kpte);
647                 ref_pfn = pud_pfn(*(pud_t *)kpte);
648                 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
649
650                 /*
651                  * Clear the PSE flags if the PRESENT flag is not set
652                  * otherwise pmd_present/pmd_huge will return true
653                  * even on a non present pmd.
654                  */
655                 if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
656                         pgprot_val(ref_prot) &= ~_PAGE_PSE;
657                 break;
658
659         default:
660                 spin_unlock(&pgd_lock);
661                 return 1;
662         }
663
664         /*
665          * Set the GLOBAL flags only if the PRESENT flag is set
666          * otherwise pmd/pte_present will return true even on a non
667          * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL
668          * for the ancient hardware that doesn't support it.
669          */
670         if (pgprot_val(ref_prot) & _PAGE_PRESENT)
671                 pgprot_val(ref_prot) |= _PAGE_GLOBAL;
672         else
673                 pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
674
675         /*
676          * Get the target pfn from the original entry:
677          */
678         pfn = ref_pfn;
679         for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
680                 set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
681
682         if (virt_addr_valid(address)) {
683                 unsigned long pfn = PFN_DOWN(__pa(address));
684
685                 if (pfn_range_is_mapped(pfn, pfn + 1))
686                         split_page_count(level);
687         }
688
689         /*
690          * Install the new, split up pagetable.
691          *
692          * We use the standard kernel pagetable protections for the new
693          * pagetable protections, the actual ptes set above control the
694          * primary protection behavior:
695          */
696         __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
697
698         /*
699          * Intel Atom errata AAH41 workaround.
700          *
701          * The real fix should be in hw or in a microcode update, but
702          * we also probabilistically try to reduce the window of having
703          * a large TLB mixed with 4K TLBs while instruction fetches are
704          * going on.
705          */
706         __flush_tlb_all();
707         spin_unlock(&pgd_lock);
708
709         return 0;
710 }
711
712 static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
713                             unsigned long address)
714 {
715         struct page *base;
716
717         if (!debug_pagealloc)
718                 spin_unlock(&cpa_lock);
719         base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
720         if (!debug_pagealloc)
721                 spin_lock(&cpa_lock);
722         if (!base)
723                 return -ENOMEM;
724
725         if (__split_large_page(cpa, kpte, address, base))
726                 __free_page(base);
727
728         return 0;
729 }
730
731 static bool try_to_free_pte_page(pte_t *pte)
732 {
733         int i;
734
735         for (i = 0; i < PTRS_PER_PTE; i++)
736                 if (!pte_none(pte[i]))
737                         return false;
738
739         free_page((unsigned long)pte);
740         return true;
741 }
742
743 static bool try_to_free_pmd_page(pmd_t *pmd)
744 {
745         int i;
746
747         for (i = 0; i < PTRS_PER_PMD; i++)
748                 if (!pmd_none(pmd[i]))
749                         return false;
750
751         free_page((unsigned long)pmd);
752         return true;
753 }
754
755 static bool try_to_free_pud_page(pud_t *pud)
756 {
757         int i;
758
759         for (i = 0; i < PTRS_PER_PUD; i++)
760                 if (!pud_none(pud[i]))
761                         return false;
762
763         free_page((unsigned long)pud);
764         return true;
765 }
766
767 static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
768 {
769         pte_t *pte = pte_offset_kernel(pmd, start);
770
771         while (start < end) {
772                 set_pte(pte, __pte(0));
773
774                 start += PAGE_SIZE;
775                 pte++;
776         }
777
778         if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
779                 pmd_clear(pmd);
780                 return true;
781         }
782         return false;
783 }
784
785 static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
786                               unsigned long start, unsigned long end)
787 {
788         if (unmap_pte_range(pmd, start, end))
789                 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
790                         pud_clear(pud);
791 }
792
793 static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
794 {
795         pmd_t *pmd = pmd_offset(pud, start);
796
797         /*
798          * Not on a 2MB page boundary?
799          */
800         if (start & (PMD_SIZE - 1)) {
801                 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
802                 unsigned long pre_end = min_t(unsigned long, end, next_page);
803
804                 __unmap_pmd_range(pud, pmd, start, pre_end);
805
806                 start = pre_end;
807                 pmd++;
808         }
809
810         /*
811          * Try to unmap in 2M chunks.
812          */
813         while (end - start >= PMD_SIZE) {
814                 if (pmd_large(*pmd))
815                         pmd_clear(pmd);
816                 else
817                         __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
818
819                 start += PMD_SIZE;
820                 pmd++;
821         }
822
823         /*
824          * 4K leftovers?
825          */
826         if (start < end)
827                 return __unmap_pmd_range(pud, pmd, start, end);
828
829         /*
830          * Try again to free the PMD page if haven't succeeded above.
831          */
832         if (!pud_none(*pud))
833                 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
834                         pud_clear(pud);
835 }
836
837 static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
838 {
839         pud_t *pud = pud_offset(pgd, start);
840
841         /*
842          * Not on a GB page boundary?
843          */
844         if (start & (PUD_SIZE - 1)) {
845                 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
846                 unsigned long pre_end   = min_t(unsigned long, end, next_page);
847
848                 unmap_pmd_range(pud, start, pre_end);
849
850                 start = pre_end;
851                 pud++;
852         }
853
854         /*
855          * Try to unmap in 1G chunks?
856          */
857         while (end - start >= PUD_SIZE) {
858
859                 if (pud_large(*pud))
860                         pud_clear(pud);
861                 else
862                         unmap_pmd_range(pud, start, start + PUD_SIZE);
863
864                 start += PUD_SIZE;
865                 pud++;
866         }
867
868         /*
869          * 2M leftovers?
870          */
871         if (start < end)
872                 unmap_pmd_range(pud, start, end);
873
874         /*
875          * No need to try to free the PUD page because we'll free it in
876          * populate_pgd's error path
877          */
878 }
879
880 static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
881 {
882         pgd_t *pgd_entry = root + pgd_index(addr);
883
884         unmap_pud_range(pgd_entry, addr, end);
885
886         if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
887                 pgd_clear(pgd_entry);
888 }
889
890 static int alloc_pte_page(pmd_t *pmd)
891 {
892         pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
893         if (!pte)
894                 return -1;
895
896         set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
897         return 0;
898 }
899
900 static int alloc_pmd_page(pud_t *pud)
901 {
902         pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
903         if (!pmd)
904                 return -1;
905
906         set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
907         return 0;
908 }
909
910 static void populate_pte(struct cpa_data *cpa,
911                          unsigned long start, unsigned long end,
912                          unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
913 {
914         pte_t *pte;
915
916         pte = pte_offset_kernel(pmd, start);
917
918         while (num_pages-- && start < end) {
919
920                 /* deal with the NX bit */
921                 if (!(pgprot_val(pgprot) & _PAGE_NX))
922                         cpa->pfn &= ~_PAGE_NX;
923
924                 set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
925
926                 start    += PAGE_SIZE;
927                 cpa->pfn += PAGE_SIZE;
928                 pte++;
929         }
930 }
931
932 static int populate_pmd(struct cpa_data *cpa,
933                         unsigned long start, unsigned long end,
934                         unsigned num_pages, pud_t *pud, pgprot_t pgprot)
935 {
936         unsigned int cur_pages = 0;
937         pmd_t *pmd;
938         pgprot_t pmd_pgprot;
939
940         /*
941          * Not on a 2M boundary?
942          */
943         if (start & (PMD_SIZE - 1)) {
944                 unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
945                 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
946
947                 pre_end   = min_t(unsigned long, pre_end, next_page);
948                 cur_pages = (pre_end - start) >> PAGE_SHIFT;
949                 cur_pages = min_t(unsigned int, num_pages, cur_pages);
950
951                 /*
952                  * Need a PTE page?
953                  */
954                 pmd = pmd_offset(pud, start);
955                 if (pmd_none(*pmd))
956                         if (alloc_pte_page(pmd))
957                                 return -1;
958
959                 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
960
961                 start = pre_end;
962         }
963
964         /*
965          * We mapped them all?
966          */
967         if (num_pages == cur_pages)
968                 return cur_pages;
969
970         pmd_pgprot = pgprot_4k_2_large(pgprot);
971
972         while (end - start >= PMD_SIZE) {
973
974                 /*
975                  * We cannot use a 1G page so allocate a PMD page if needed.
976                  */
977                 if (pud_none(*pud))
978                         if (alloc_pmd_page(pud))
979                                 return -1;
980
981                 pmd = pmd_offset(pud, start);
982
983                 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
984                                    massage_pgprot(pmd_pgprot)));
985
986                 start     += PMD_SIZE;
987                 cpa->pfn  += PMD_SIZE;
988                 cur_pages += PMD_SIZE >> PAGE_SHIFT;
989         }
990
991         /*
992          * Map trailing 4K pages.
993          */
994         if (start < end) {
995                 pmd = pmd_offset(pud, start);
996                 if (pmd_none(*pmd))
997                         if (alloc_pte_page(pmd))
998                                 return -1;
999
1000                 populate_pte(cpa, start, end, num_pages - cur_pages,
1001                              pmd, pgprot);
1002         }
1003         return num_pages;
1004 }
1005
1006 static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
1007                         pgprot_t pgprot)
1008 {
1009         pud_t *pud;
1010         unsigned long end;
1011         int cur_pages = 0;
1012         pgprot_t pud_pgprot;
1013
1014         end = start + (cpa->numpages << PAGE_SHIFT);
1015
1016         /*
1017          * Not on a Gb page boundary? => map everything up to it with
1018          * smaller pages.
1019          */
1020         if (start & (PUD_SIZE - 1)) {
1021                 unsigned long pre_end;
1022                 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1023
1024                 pre_end   = min_t(unsigned long, end, next_page);
1025                 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1026                 cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
1027
1028                 pud = pud_offset(pgd, start);
1029
1030                 /*
1031                  * Need a PMD page?
1032                  */
1033                 if (pud_none(*pud))
1034                         if (alloc_pmd_page(pud))
1035                                 return -1;
1036
1037                 cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1038                                          pud, pgprot);
1039                 if (cur_pages < 0)
1040                         return cur_pages;
1041
1042                 start = pre_end;
1043         }
1044
1045         /* We mapped them all? */
1046         if (cpa->numpages == cur_pages)
1047                 return cur_pages;
1048
1049         pud = pud_offset(pgd, start);
1050         pud_pgprot = pgprot_4k_2_large(pgprot);
1051
1052         /*
1053          * Map everything starting from the Gb boundary, possibly with 1G pages
1054          */
1055         while (end - start >= PUD_SIZE) {
1056                 set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
1057                                    massage_pgprot(pud_pgprot)));
1058
1059                 start     += PUD_SIZE;
1060                 cpa->pfn  += PUD_SIZE;
1061                 cur_pages += PUD_SIZE >> PAGE_SHIFT;
1062                 pud++;
1063         }
1064
1065         /* Map trailing leftover */
1066         if (start < end) {
1067                 int tmp;
1068
1069                 pud = pud_offset(pgd, start);
1070                 if (pud_none(*pud))
1071                         if (alloc_pmd_page(pud))
1072                                 return -1;
1073
1074                 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1075                                    pud, pgprot);
1076                 if (tmp < 0)
1077                         return cur_pages;
1078
1079                 cur_pages += tmp;
1080         }
1081         return cur_pages;
1082 }
1083
1084 /*
1085  * Restrictions for kernel page table do not necessarily apply when mapping in
1086  * an alternate PGD.
1087  */
1088 static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1089 {
1090         pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1091         pud_t *pud = NULL;      /* shut up gcc */
1092         pgd_t *pgd_entry;
1093         int ret;
1094
1095         pgd_entry = cpa->pgd + pgd_index(addr);
1096
1097         /*
1098          * Allocate a PUD page and hand it down for mapping.
1099          */
1100         if (pgd_none(*pgd_entry)) {
1101                 pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
1102                 if (!pud)
1103                         return -1;
1104
1105                 set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
1106         }
1107
1108         pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1109         pgprot_val(pgprot) |=  pgprot_val(cpa->mask_set);
1110
1111         ret = populate_pud(cpa, addr, pgd_entry, pgprot);
1112         if (ret < 0) {
1113                 unmap_pgd_range(cpa->pgd, addr,
1114                                 addr + (cpa->numpages << PAGE_SHIFT));
1115                 return ret;
1116         }
1117
1118         cpa->numpages = ret;
1119         return 0;
1120 }
1121
1122 static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1123                                int primary)
1124 {
1125         if (cpa->pgd)
1126                 return populate_pgd(cpa, vaddr);
1127
1128         /*
1129          * Ignore all non primary paths.
1130          */
1131         if (!primary)
1132                 return 0;
1133
1134         /*
1135          * Ignore the NULL PTE for kernel identity mapping, as it is expected
1136          * to have holes.
1137          * Also set numpages to '1' indicating that we processed cpa req for
1138          * one virtual address page and its pfn. TBD: numpages can be set based
1139          * on the initial value and the level returned by lookup_address().
1140          */
1141         if (within(vaddr, PAGE_OFFSET,
1142                    PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1143                 cpa->numpages = 1;
1144                 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1145                 return 0;
1146         } else {
1147                 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1148                         "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1149                         *cpa->vaddr);
1150
1151                 return -EFAULT;
1152         }
1153 }
1154
1155 static int __change_page_attr(struct cpa_data *cpa, int primary)
1156 {
1157         unsigned long address;
1158         int do_split, err;
1159         unsigned int level;
1160         pte_t *kpte, old_pte;
1161
1162         if (cpa->flags & CPA_PAGES_ARRAY) {
1163                 struct page *page = cpa->pages[cpa->curpage];
1164                 if (unlikely(PageHighMem(page)))
1165                         return 0;
1166                 address = (unsigned long)page_address(page);
1167         } else if (cpa->flags & CPA_ARRAY)
1168                 address = cpa->vaddr[cpa->curpage];
1169         else
1170                 address = *cpa->vaddr;
1171 repeat:
1172         kpte = _lookup_address_cpa(cpa, address, &level);
1173         if (!kpte)
1174                 return __cpa_process_fault(cpa, address, primary);
1175
1176         old_pte = *kpte;
1177         if (!pte_val(old_pte))
1178                 return __cpa_process_fault(cpa, address, primary);
1179
1180         if (level == PG_LEVEL_4K) {
1181                 pte_t new_pte;
1182                 pgprot_t new_prot = pte_pgprot(old_pte);
1183                 unsigned long pfn = pte_pfn(old_pte);
1184
1185                 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1186                 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1187
1188                 new_prot = static_protections(new_prot, address, pfn);
1189
1190                 /*
1191                  * Set the GLOBAL flags only if the PRESENT flag is
1192                  * set otherwise pte_present will return true even on
1193                  * a non present pte. The canon_pgprot will clear
1194                  * _PAGE_GLOBAL for the ancient hardware that doesn't
1195                  * support it.
1196                  */
1197                 if (pgprot_val(new_prot) & _PAGE_PRESENT)
1198                         pgprot_val(new_prot) |= _PAGE_GLOBAL;
1199                 else
1200                         pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
1201
1202                 /*
1203                  * We need to keep the pfn from the existing PTE,
1204                  * after all we're only going to change it's attributes
1205                  * not the memory it points to
1206                  */
1207                 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
1208                 cpa->pfn = pfn;
1209                 /*
1210                  * Do we really change anything ?
1211                  */
1212                 if (pte_val(old_pte) != pte_val(new_pte)) {
1213                         set_pte_atomic(kpte, new_pte);
1214                         cpa->flags |= CPA_FLUSHTLB;
1215                 }
1216                 cpa->numpages = 1;
1217                 return 0;
1218         }
1219
1220         /*
1221          * Check, whether we can keep the large page intact
1222          * and just change the pte:
1223          */
1224         do_split = try_preserve_large_page(kpte, address, cpa);
1225         /*
1226          * When the range fits into the existing large page,
1227          * return. cp->numpages and cpa->tlbflush have been updated in
1228          * try_large_page:
1229          */
1230         if (do_split <= 0)
1231                 return do_split;
1232
1233         /*
1234          * We have to split the large page:
1235          */
1236         err = split_large_page(cpa, kpte, address);
1237         if (!err) {
1238                 /*
1239                  * Do a global flush tlb after splitting the large page
1240                  * and before we do the actual change page attribute in the PTE.
1241                  *
1242                  * With out this, we violate the TLB application note, that says
1243                  * "The TLBs may contain both ordinary and large-page
1244                  *  translations for a 4-KByte range of linear addresses. This
1245                  *  may occur if software modifies the paging structures so that
1246                  *  the page size used for the address range changes. If the two
1247                  *  translations differ with respect to page frame or attributes
1248                  *  (e.g., permissions), processor behavior is undefined and may
1249                  *  be implementation-specific."
1250                  *
1251                  * We do this global tlb flush inside the cpa_lock, so that we
1252                  * don't allow any other cpu, with stale tlb entries change the
1253                  * page attribute in parallel, that also falls into the
1254                  * just split large page entry.
1255                  */
1256                 flush_tlb_all();
1257                 goto repeat;
1258         }
1259
1260         return err;
1261 }
1262
1263 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
1264
1265 static int cpa_process_alias(struct cpa_data *cpa)
1266 {
1267         struct cpa_data alias_cpa;
1268         unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
1269         unsigned long vaddr;
1270         int ret;
1271
1272         if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
1273                 return 0;
1274
1275         /*
1276          * No need to redo, when the primary call touched the direct
1277          * mapping already:
1278          */
1279         if (cpa->flags & CPA_PAGES_ARRAY) {
1280                 struct page *page = cpa->pages[cpa->curpage];
1281                 if (unlikely(PageHighMem(page)))
1282                         return 0;
1283                 vaddr = (unsigned long)page_address(page);
1284         } else if (cpa->flags & CPA_ARRAY)
1285                 vaddr = cpa->vaddr[cpa->curpage];
1286         else
1287                 vaddr = *cpa->vaddr;
1288
1289         if (!(within(vaddr, PAGE_OFFSET,
1290                     PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
1291
1292                 alias_cpa = *cpa;
1293                 alias_cpa.vaddr = &laddr;
1294                 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1295
1296                 ret = __change_page_attr_set_clr(&alias_cpa, 0);
1297                 if (ret)
1298                         return ret;
1299         }
1300
1301 #ifdef CONFIG_X86_64
1302         /*
1303          * If the primary call didn't touch the high mapping already
1304          * and the physical address is inside the kernel map, we need
1305          * to touch the high mapped kernel as well:
1306          */
1307         if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1308             within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
1309                 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1310                                                __START_KERNEL_map - phys_base;
1311                 alias_cpa = *cpa;
1312                 alias_cpa.vaddr = &temp_cpa_vaddr;
1313                 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1314
1315                 /*
1316                  * The high mapping range is imprecise, so ignore the
1317                  * return value.
1318                  */
1319                 __change_page_attr_set_clr(&alias_cpa, 0);
1320         }
1321 #endif
1322
1323         return 0;
1324 }
1325
1326 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1327 {
1328         int ret, numpages = cpa->numpages;
1329
1330         while (numpages) {
1331                 /*
1332                  * Store the remaining nr of pages for the large page
1333                  * preservation check.
1334                  */
1335                 cpa->numpages = numpages;
1336                 /* for array changes, we can't use large page */
1337                 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
1338                         cpa->numpages = 1;
1339
1340                 if (!debug_pagealloc)
1341                         spin_lock(&cpa_lock);
1342                 ret = __change_page_attr(cpa, checkalias);
1343                 if (!debug_pagealloc)
1344                         spin_unlock(&cpa_lock);
1345                 if (ret)
1346                         return ret;
1347
1348                 if (checkalias) {
1349                         ret = cpa_process_alias(cpa);
1350                         if (ret)
1351                                 return ret;
1352                 }
1353
1354                 /*
1355                  * Adjust the number of pages with the result of the
1356                  * CPA operation. Either a large page has been
1357                  * preserved or a single page update happened.
1358                  */
1359                 BUG_ON(cpa->numpages > numpages || !cpa->numpages);
1360                 numpages -= cpa->numpages;
1361                 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
1362                         cpa->curpage++;
1363                 else
1364                         *cpa->vaddr += cpa->numpages * PAGE_SIZE;
1365
1366         }
1367         return 0;
1368 }
1369
1370 static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1371                                     pgprot_t mask_set, pgprot_t mask_clr,
1372                                     int force_split, int in_flag,
1373                                     struct page **pages)
1374 {
1375         struct cpa_data cpa;
1376         int ret, cache, checkalias;
1377         unsigned long baddr = 0;
1378
1379         memset(&cpa, 0, sizeof(cpa));
1380
1381         /*
1382          * Check, if we are requested to change a not supported
1383          * feature:
1384          */
1385         mask_set = canon_pgprot(mask_set);
1386         mask_clr = canon_pgprot(mask_clr);
1387         if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
1388                 return 0;
1389
1390         /* Ensure we are PAGE_SIZE aligned */
1391         if (in_flag & CPA_ARRAY) {
1392                 int i;
1393                 for (i = 0; i < numpages; i++) {
1394                         if (addr[i] & ~PAGE_MASK) {
1395                                 addr[i] &= PAGE_MASK;
1396                                 WARN_ON_ONCE(1);
1397                         }
1398                 }
1399         } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1400                 /*
1401                  * in_flag of CPA_PAGES_ARRAY implies it is aligned.
1402                  * No need to cehck in that case
1403                  */
1404                 if (*addr & ~PAGE_MASK) {
1405                         *addr &= PAGE_MASK;
1406                         /*
1407                          * People should not be passing in unaligned addresses:
1408                          */
1409                         WARN_ON_ONCE(1);
1410                 }
1411                 /*
1412                  * Save address for cache flush. *addr is modified in the call
1413                  * to __change_page_attr_set_clr() below.
1414                  */
1415                 baddr = *addr;
1416         }
1417
1418         /* Must avoid aliasing mappings in the highmem code */
1419         kmap_flush_unused();
1420
1421         vm_unmap_aliases();
1422
1423         cpa.vaddr = addr;
1424         cpa.pages = pages;
1425         cpa.numpages = numpages;
1426         cpa.mask_set = mask_set;
1427         cpa.mask_clr = mask_clr;
1428         cpa.flags = 0;
1429         cpa.curpage = 0;
1430         cpa.force_split = force_split;
1431
1432         if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
1433                 cpa.flags |= in_flag;
1434
1435         /* No alias checking for _NX bit modifications */
1436         checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
1437
1438         ret = __change_page_attr_set_clr(&cpa, checkalias);
1439
1440         /*
1441          * Check whether we really changed something:
1442          */
1443         if (!(cpa.flags & CPA_FLUSHTLB))
1444                 goto out;
1445
1446         /*
1447          * No need to flush, when we did not set any of the caching
1448          * attributes:
1449          */
1450         cache = !!pgprot2cachemode(mask_set);
1451
1452         /*
1453          * On success we use CLFLUSH, when the CPU supports it to
1454          * avoid the WBINVD. If the CPU does not support it and in the
1455          * error case we fall back to cpa_flush_all (which uses
1456          * WBINVD):
1457          */
1458         if (!ret && cpu_has_clflush) {
1459                 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
1460                         cpa_flush_array(addr, numpages, cache,
1461                                         cpa.flags, pages);
1462                 } else
1463                         cpa_flush_range(baddr, numpages, cache);
1464         } else
1465                 cpa_flush_all(cache);
1466
1467 out:
1468         return ret;
1469 }
1470
1471 static inline int change_page_attr_set(unsigned long *addr, int numpages,
1472                                        pgprot_t mask, int array)
1473 {
1474         return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
1475                 (array ? CPA_ARRAY : 0), NULL);
1476 }
1477
1478 static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1479                                          pgprot_t mask, int array)
1480 {
1481         return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
1482                 (array ? CPA_ARRAY : 0), NULL);
1483 }
1484
1485 static inline int cpa_set_pages_array(struct page **pages, int numpages,
1486                                        pgprot_t mask)
1487 {
1488         return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1489                 CPA_PAGES_ARRAY, pages);
1490 }
1491
1492 static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1493                                          pgprot_t mask)
1494 {
1495         return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1496                 CPA_PAGES_ARRAY, pages);
1497 }
1498
1499 int _set_memory_uc(unsigned long addr, int numpages)
1500 {
1501         /*
1502          * for now UC MINUS. see comments in ioremap_nocache()
1503          * If you really need strong UC use ioremap_uc(), but note
1504          * that you cannot override IO areas with set_memory_*() as
1505          * these helpers cannot work with IO memory.
1506          */
1507         return change_page_attr_set(&addr, numpages,
1508                                     cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1509                                     0);
1510 }
1511
1512 int set_memory_uc(unsigned long addr, int numpages)
1513 {
1514         int ret;
1515
1516         /*
1517          * for now UC MINUS. see comments in ioremap_nocache()
1518          */
1519         ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1520                               _PAGE_CACHE_MODE_UC_MINUS, NULL);
1521         if (ret)
1522                 goto out_err;
1523
1524         ret = _set_memory_uc(addr, numpages);
1525         if (ret)
1526                 goto out_free;
1527
1528         return 0;
1529
1530 out_free:
1531         free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1532 out_err:
1533         return ret;
1534 }
1535 EXPORT_SYMBOL(set_memory_uc);
1536
1537 static int _set_memory_array(unsigned long *addr, int addrinarray,
1538                 enum page_cache_mode new_type)
1539 {
1540         enum page_cache_mode set_type;
1541         int i, j;
1542         int ret;
1543
1544         for (i = 0; i < addrinarray; i++) {
1545                 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
1546                                         new_type, NULL);
1547                 if (ret)
1548                         goto out_free;
1549         }
1550
1551         /* If WC, set to UC- first and then WC */
1552         set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
1553                                 _PAGE_CACHE_MODE_UC_MINUS : new_type;
1554
1555         ret = change_page_attr_set(addr, addrinarray,
1556                                    cachemode2pgprot(set_type), 1);
1557
1558         if (!ret && new_type == _PAGE_CACHE_MODE_WC)
1559                 ret = change_page_attr_set_clr(addr, addrinarray,
1560                                                cachemode2pgprot(
1561                                                 _PAGE_CACHE_MODE_WC),
1562                                                __pgprot(_PAGE_CACHE_MASK),
1563                                                0, CPA_ARRAY, NULL);
1564         if (ret)
1565                 goto out_free;
1566
1567         return 0;
1568
1569 out_free:
1570         for (j = 0; j < i; j++)
1571                 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
1572
1573         return ret;
1574 }
1575
1576 int set_memory_array_uc(unsigned long *addr, int addrinarray)
1577 {
1578         return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
1579 }
1580 EXPORT_SYMBOL(set_memory_array_uc);
1581
1582 int set_memory_array_wc(unsigned long *addr, int addrinarray)
1583 {
1584         return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC);
1585 }
1586 EXPORT_SYMBOL(set_memory_array_wc);
1587
1588 int set_memory_array_wt(unsigned long *addr, int addrinarray)
1589 {
1590         return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT);
1591 }
1592 EXPORT_SYMBOL_GPL(set_memory_array_wt);
1593
1594 int _set_memory_wc(unsigned long addr, int numpages)
1595 {
1596         int ret;
1597         unsigned long addr_copy = addr;
1598
1599         ret = change_page_attr_set(&addr, numpages,
1600                                    cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1601                                    0);
1602         if (!ret) {
1603                 ret = change_page_attr_set_clr(&addr_copy, numpages,
1604                                                cachemode2pgprot(
1605                                                 _PAGE_CACHE_MODE_WC),
1606                                                __pgprot(_PAGE_CACHE_MASK),
1607                                                0, 0, NULL);
1608         }
1609         return ret;
1610 }
1611
1612 int set_memory_wc(unsigned long addr, int numpages)
1613 {
1614         int ret;
1615
1616         ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1617                 _PAGE_CACHE_MODE_WC, NULL);
1618         if (ret)
1619                 return ret;
1620
1621         ret = _set_memory_wc(addr, numpages);
1622         if (ret)
1623                 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1624
1625         return ret;
1626 }
1627 EXPORT_SYMBOL(set_memory_wc);
1628
1629 int _set_memory_wt(unsigned long addr, int numpages)
1630 {
1631         return change_page_attr_set(&addr, numpages,
1632                                     cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
1633 }
1634
1635 int set_memory_wt(unsigned long addr, int numpages)
1636 {
1637         int ret;
1638
1639         ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1640                               _PAGE_CACHE_MODE_WT, NULL);
1641         if (ret)
1642                 return ret;
1643
1644         ret = _set_memory_wt(addr, numpages);
1645         if (ret)
1646                 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1647
1648         return ret;
1649 }
1650 EXPORT_SYMBOL_GPL(set_memory_wt);
1651
1652 int _set_memory_wb(unsigned long addr, int numpages)
1653 {
1654         /* WB cache mode is hard wired to all cache attribute bits being 0 */
1655         return change_page_attr_clear(&addr, numpages,
1656                                       __pgprot(_PAGE_CACHE_MASK), 0);
1657 }
1658
1659 int set_memory_wb(unsigned long addr, int numpages)
1660 {
1661         int ret;
1662
1663         ret = _set_memory_wb(addr, numpages);
1664         if (ret)
1665                 return ret;
1666
1667         free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1668         return 0;
1669 }
1670 EXPORT_SYMBOL(set_memory_wb);
1671
1672 int set_memory_array_wb(unsigned long *addr, int addrinarray)
1673 {
1674         int i;
1675         int ret;
1676
1677         /* WB cache mode is hard wired to all cache attribute bits being 0 */
1678         ret = change_page_attr_clear(addr, addrinarray,
1679                                       __pgprot(_PAGE_CACHE_MASK), 1);
1680         if (ret)
1681                 return ret;
1682
1683         for (i = 0; i < addrinarray; i++)
1684                 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
1685
1686         return 0;
1687 }
1688 EXPORT_SYMBOL(set_memory_array_wb);
1689
1690 int set_memory_x(unsigned long addr, int numpages)
1691 {
1692         if (!(__supported_pte_mask & _PAGE_NX))
1693                 return 0;
1694
1695         return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
1696 }
1697 EXPORT_SYMBOL(set_memory_x);
1698
1699 int set_memory_nx(unsigned long addr, int numpages)
1700 {
1701         if (!(__supported_pte_mask & _PAGE_NX))
1702                 return 0;
1703
1704         return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
1705 }
1706 EXPORT_SYMBOL(set_memory_nx);
1707
1708 int set_memory_ro(unsigned long addr, int numpages)
1709 {
1710         return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
1711 }
1712
1713 int set_memory_rw(unsigned long addr, int numpages)
1714 {
1715         return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
1716 }
1717
1718 int set_memory_np(unsigned long addr, int numpages)
1719 {
1720         return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
1721 }
1722
1723 int set_memory_4k(unsigned long addr, int numpages)
1724 {
1725         return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1726                                         __pgprot(0), 1, 0, NULL);
1727 }
1728
1729 int set_pages_uc(struct page *page, int numpages)
1730 {
1731         unsigned long addr = (unsigned long)page_address(page);
1732
1733         return set_memory_uc(addr, numpages);
1734 }
1735 EXPORT_SYMBOL(set_pages_uc);
1736
1737 static int _set_pages_array(struct page **pages, int addrinarray,
1738                 enum page_cache_mode new_type)
1739 {
1740         unsigned long start;
1741         unsigned long end;
1742         enum page_cache_mode set_type;
1743         int i;
1744         int free_idx;
1745         int ret;
1746
1747         for (i = 0; i < addrinarray; i++) {
1748                 if (PageHighMem(pages[i]))
1749                         continue;
1750                 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1751                 end = start + PAGE_SIZE;
1752                 if (reserve_memtype(start, end, new_type, NULL))
1753                         goto err_out;
1754         }
1755
1756         /* If WC, set to UC- first and then WC */
1757         set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
1758                                 _PAGE_CACHE_MODE_UC_MINUS : new_type;
1759
1760         ret = cpa_set_pages_array(pages, addrinarray,
1761                                   cachemode2pgprot(set_type));
1762         if (!ret && new_type == _PAGE_CACHE_MODE_WC)
1763                 ret = change_page_attr_set_clr(NULL, addrinarray,
1764                                                cachemode2pgprot(
1765                                                 _PAGE_CACHE_MODE_WC),
1766                                                __pgprot(_PAGE_CACHE_MASK),
1767                                                0, CPA_PAGES_ARRAY, pages);
1768         if (ret)
1769                 goto err_out;
1770         return 0; /* Success */
1771 err_out:
1772         free_idx = i;
1773         for (i = 0; i < free_idx; i++) {
1774                 if (PageHighMem(pages[i]))
1775                         continue;
1776                 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1777                 end = start + PAGE_SIZE;
1778                 free_memtype(start, end);
1779         }
1780         return -EINVAL;
1781 }
1782
1783 int set_pages_array_uc(struct page **pages, int addrinarray)
1784 {
1785         return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
1786 }
1787 EXPORT_SYMBOL(set_pages_array_uc);
1788
1789 int set_pages_array_wc(struct page **pages, int addrinarray)
1790 {
1791         return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC);
1792 }
1793 EXPORT_SYMBOL(set_pages_array_wc);
1794
1795 int set_pages_array_wt(struct page **pages, int addrinarray)
1796 {
1797         return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT);
1798 }
1799 EXPORT_SYMBOL_GPL(set_pages_array_wt);
1800
1801 int set_pages_wb(struct page *page, int numpages)
1802 {
1803         unsigned long addr = (unsigned long)page_address(page);
1804
1805         return set_memory_wb(addr, numpages);
1806 }
1807 EXPORT_SYMBOL(set_pages_wb);
1808
1809 int set_pages_array_wb(struct page **pages, int addrinarray)
1810 {
1811         int retval;
1812         unsigned long start;
1813         unsigned long end;
1814         int i;
1815
1816         /* WB cache mode is hard wired to all cache attribute bits being 0 */
1817         retval = cpa_clear_pages_array(pages, addrinarray,
1818                         __pgprot(_PAGE_CACHE_MASK));
1819         if (retval)
1820                 return retval;
1821
1822         for (i = 0; i < addrinarray; i++) {
1823                 if (PageHighMem(pages[i]))
1824                         continue;
1825                 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1826                 end = start + PAGE_SIZE;
1827                 free_memtype(start, end);
1828         }
1829
1830         return 0;
1831 }
1832 EXPORT_SYMBOL(set_pages_array_wb);
1833
1834 int set_pages_x(struct page *page, int numpages)
1835 {
1836         unsigned long addr = (unsigned long)page_address(page);
1837
1838         return set_memory_x(addr, numpages);
1839 }
1840 EXPORT_SYMBOL(set_pages_x);
1841
1842 int set_pages_nx(struct page *page, int numpages)
1843 {
1844         unsigned long addr = (unsigned long)page_address(page);
1845
1846         return set_memory_nx(addr, numpages);
1847 }
1848 EXPORT_SYMBOL(set_pages_nx);
1849
1850 int set_pages_ro(struct page *page, int numpages)
1851 {
1852         unsigned long addr = (unsigned long)page_address(page);
1853
1854         return set_memory_ro(addr, numpages);
1855 }
1856
1857 int set_pages_rw(struct page *page, int numpages)
1858 {
1859         unsigned long addr = (unsigned long)page_address(page);
1860
1861         return set_memory_rw(addr, numpages);
1862 }
1863
1864 #ifdef CONFIG_DEBUG_PAGEALLOC
1865
1866 static int __set_pages_p(struct page *page, int numpages)
1867 {
1868         unsigned long tempaddr = (unsigned long) page_address(page);
1869         struct cpa_data cpa = { .vaddr = &tempaddr,
1870                                 .pgd = NULL,
1871                                 .numpages = numpages,
1872                                 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1873                                 .mask_clr = __pgprot(0),
1874                                 .flags = 0};
1875
1876         /*
1877          * No alias checking needed for setting present flag. otherwise,
1878          * we may need to break large pages for 64-bit kernel text
1879          * mappings (this adds to complexity if we want to do this from
1880          * atomic context especially). Let's keep it simple!
1881          */
1882         return __change_page_attr_set_clr(&cpa, 0);
1883 }
1884
1885 static int __set_pages_np(struct page *page, int numpages)
1886 {
1887         unsigned long tempaddr = (unsigned long) page_address(page);
1888         struct cpa_data cpa = { .vaddr = &tempaddr,
1889                                 .pgd = NULL,
1890                                 .numpages = numpages,
1891                                 .mask_set = __pgprot(0),
1892                                 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1893                                 .flags = 0};
1894
1895         /*
1896          * No alias checking needed for setting not present flag. otherwise,
1897          * we may need to break large pages for 64-bit kernel text
1898          * mappings (this adds to complexity if we want to do this from
1899          * atomic context especially). Let's keep it simple!
1900          */
1901         return __change_page_attr_set_clr(&cpa, 0);
1902 }
1903
1904 void __kernel_map_pages(struct page *page, int numpages, int enable)
1905 {
1906         if (PageHighMem(page))
1907                 return;
1908         if (!enable) {
1909                 debug_check_no_locks_freed(page_address(page),
1910                                            numpages * PAGE_SIZE);
1911         }
1912
1913         /*
1914          * The return value is ignored as the calls cannot fail.
1915          * Large pages for identity mappings are not used at boot time
1916          * and hence no memory allocations during large page split.
1917          */
1918         if (enable)
1919                 __set_pages_p(page, numpages);
1920         else
1921                 __set_pages_np(page, numpages);
1922
1923         /*
1924          * We should perform an IPI and flush all tlbs,
1925          * but that can deadlock->flush only current cpu:
1926          */
1927         __flush_tlb_all();
1928
1929         arch_flush_lazy_mmu_mode();
1930 }
1931
1932 #ifdef CONFIG_HIBERNATION
1933
1934 bool kernel_page_present(struct page *page)
1935 {
1936         unsigned int level;
1937         pte_t *pte;
1938
1939         if (PageHighMem(page))
1940                 return false;
1941
1942         pte = lookup_address((unsigned long)page_address(page), &level);
1943         return (pte_val(*pte) & _PAGE_PRESENT);
1944 }
1945
1946 #endif /* CONFIG_HIBERNATION */
1947
1948 #endif /* CONFIG_DEBUG_PAGEALLOC */
1949
1950 int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
1951                             unsigned numpages, unsigned long page_flags)
1952 {
1953         int retval = -EINVAL;
1954
1955         struct cpa_data cpa = {
1956                 .vaddr = &address,
1957                 .pfn = pfn,
1958                 .pgd = pgd,
1959                 .numpages = numpages,
1960                 .mask_set = __pgprot(0),
1961                 .mask_clr = __pgprot(0),
1962                 .flags = 0,
1963         };
1964
1965         if (!(__supported_pte_mask & _PAGE_NX))
1966                 goto out;
1967
1968         if (!(page_flags & _PAGE_NX))
1969                 cpa.mask_clr = __pgprot(_PAGE_NX);
1970
1971         cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
1972
1973         retval = __change_page_attr_set_clr(&cpa, 0);
1974         __flush_tlb_all();
1975
1976 out:
1977         return retval;
1978 }
1979
1980 void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
1981                                unsigned numpages)
1982 {
1983         unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
1984 }
1985
1986 /*
1987  * The testcases use internal knowledge of the implementation that shouldn't
1988  * be exposed to the rest of the kernel. Include these directly here.
1989  */
1990 #ifdef CONFIG_CPA_DEBUG
1991 #include "pageattr-test.c"
1992 #endif