]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/mm/init_64.c
Merge branches 'pm-cpufreq-sched' and 'intel_pstate'
[karo-tx-linux.git] / arch / x86 / mm / init_64.c
1 /*
2  *  linux/arch/x86_64/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
6  *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7  */
8
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/initrd.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/memory.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/memremap.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
36
37 #include <asm/processor.h>
38 #include <asm/bios_ebda.h>
39 #include <linux/uaccess.h>
40 #include <asm/pgtable.h>
41 #include <asm/pgalloc.h>
42 #include <asm/dma.h>
43 #include <asm/fixmap.h>
44 #include <asm/e820/api.h>
45 #include <asm/apic.h>
46 #include <asm/tlb.h>
47 #include <asm/mmu_context.h>
48 #include <asm/proto.h>
49 #include <asm/smp.h>
50 #include <asm/sections.h>
51 #include <asm/kdebug.h>
52 #include <asm/numa.h>
53 #include <asm/set_memory.h>
54 #include <asm/init.h>
55 #include <asm/uv/uv.h>
56 #include <asm/setup.h>
57
58 #include "mm_internal.h"
59
60 #include "ident_map.c"
61
62 /*
63  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
64  * physical space so we can cache the place of the first one and move
65  * around without checking the pgd every time.
66  */
67
68 pteval_t __supported_pte_mask __read_mostly = ~0;
69 EXPORT_SYMBOL_GPL(__supported_pte_mask);
70
71 int force_personality32;
72
73 /*
74  * noexec32=on|off
75  * Control non executable heap for 32bit processes.
76  * To control the stack too use noexec=off
77  *
78  * on   PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
79  * off  PROT_READ implies PROT_EXEC
80  */
81 static int __init nonx32_setup(char *str)
82 {
83         if (!strcmp(str, "on"))
84                 force_personality32 &= ~READ_IMPLIES_EXEC;
85         else if (!strcmp(str, "off"))
86                 force_personality32 |= READ_IMPLIES_EXEC;
87         return 1;
88 }
89 __setup("noexec32=", nonx32_setup);
90
91 /*
92  * When memory was added make sure all the processes MM have
93  * suitable PGD entries in the local PGD level page.
94  */
95 #ifdef CONFIG_X86_5LEVEL
96 void sync_global_pgds(unsigned long start, unsigned long end)
97 {
98         unsigned long addr;
99
100         for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
101                 const pgd_t *pgd_ref = pgd_offset_k(addr);
102                 struct page *page;
103
104                 /* Check for overflow */
105                 if (addr < start)
106                         break;
107
108                 if (pgd_none(*pgd_ref))
109                         continue;
110
111                 spin_lock(&pgd_lock);
112                 list_for_each_entry(page, &pgd_list, lru) {
113                         pgd_t *pgd;
114                         spinlock_t *pgt_lock;
115
116                         pgd = (pgd_t *)page_address(page) + pgd_index(addr);
117                         /* the pgt_lock only for Xen */
118                         pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
119                         spin_lock(pgt_lock);
120
121                         if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
122                                 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
123
124                         if (pgd_none(*pgd))
125                                 set_pgd(pgd, *pgd_ref);
126
127                         spin_unlock(pgt_lock);
128                 }
129                 spin_unlock(&pgd_lock);
130         }
131 }
132 #else
133 void sync_global_pgds(unsigned long start, unsigned long end)
134 {
135         unsigned long addr;
136
137         for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
138                 pgd_t *pgd_ref = pgd_offset_k(addr);
139                 const p4d_t *p4d_ref;
140                 struct page *page;
141
142                 /*
143                  * With folded p4d, pgd_none() is always false, we need to
144                  * handle synchonization on p4d level.
145                  */
146                 BUILD_BUG_ON(pgd_none(*pgd_ref));
147                 p4d_ref = p4d_offset(pgd_ref, addr);
148
149                 if (p4d_none(*p4d_ref))
150                         continue;
151
152                 spin_lock(&pgd_lock);
153                 list_for_each_entry(page, &pgd_list, lru) {
154                         pgd_t *pgd;
155                         p4d_t *p4d;
156                         spinlock_t *pgt_lock;
157
158                         pgd = (pgd_t *)page_address(page) + pgd_index(addr);
159                         p4d = p4d_offset(pgd, addr);
160                         /* the pgt_lock only for Xen */
161                         pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
162                         spin_lock(pgt_lock);
163
164                         if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
165                                 BUG_ON(p4d_page_vaddr(*p4d)
166                                        != p4d_page_vaddr(*p4d_ref));
167
168                         if (p4d_none(*p4d))
169                                 set_p4d(p4d, *p4d_ref);
170
171                         spin_unlock(pgt_lock);
172                 }
173                 spin_unlock(&pgd_lock);
174         }
175 }
176 #endif
177
178 /*
179  * NOTE: This function is marked __ref because it calls __init function
180  * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
181  */
182 static __ref void *spp_getpage(void)
183 {
184         void *ptr;
185
186         if (after_bootmem)
187                 ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
188         else
189                 ptr = alloc_bootmem_pages(PAGE_SIZE);
190
191         if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
192                 panic("set_pte_phys: cannot allocate page data %s\n",
193                         after_bootmem ? "after bootmem" : "");
194         }
195
196         pr_debug("spp_getpage %p\n", ptr);
197
198         return ptr;
199 }
200
201 static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
202 {
203         if (pgd_none(*pgd)) {
204                 p4d_t *p4d = (p4d_t *)spp_getpage();
205                 pgd_populate(&init_mm, pgd, p4d);
206                 if (p4d != p4d_offset(pgd, 0))
207                         printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
208                                p4d, p4d_offset(pgd, 0));
209         }
210         return p4d_offset(pgd, vaddr);
211 }
212
213 static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
214 {
215         if (p4d_none(*p4d)) {
216                 pud_t *pud = (pud_t *)spp_getpage();
217                 p4d_populate(&init_mm, p4d, pud);
218                 if (pud != pud_offset(p4d, 0))
219                         printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
220                                pud, pud_offset(p4d, 0));
221         }
222         return pud_offset(p4d, vaddr);
223 }
224
225 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
226 {
227         if (pud_none(*pud)) {
228                 pmd_t *pmd = (pmd_t *) spp_getpage();
229                 pud_populate(&init_mm, pud, pmd);
230                 if (pmd != pmd_offset(pud, 0))
231                         printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
232                                pmd, pmd_offset(pud, 0));
233         }
234         return pmd_offset(pud, vaddr);
235 }
236
237 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
238 {
239         if (pmd_none(*pmd)) {
240                 pte_t *pte = (pte_t *) spp_getpage();
241                 pmd_populate_kernel(&init_mm, pmd, pte);
242                 if (pte != pte_offset_kernel(pmd, 0))
243                         printk(KERN_ERR "PAGETABLE BUG #03!\n");
244         }
245         return pte_offset_kernel(pmd, vaddr);
246 }
247
248 static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
249 {
250         pmd_t *pmd = fill_pmd(pud, vaddr);
251         pte_t *pte = fill_pte(pmd, vaddr);
252
253         set_pte(pte, new_pte);
254
255         /*
256          * It's enough to flush this one mapping.
257          * (PGE mappings get flushed as well)
258          */
259         __flush_tlb_one(vaddr);
260 }
261
262 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
263 {
264         p4d_t *p4d = p4d_page + p4d_index(vaddr);
265         pud_t *pud = fill_pud(p4d, vaddr);
266
267         __set_pte_vaddr(pud, vaddr, new_pte);
268 }
269
270 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
271 {
272         pud_t *pud = pud_page + pud_index(vaddr);
273
274         __set_pte_vaddr(pud, vaddr, new_pte);
275 }
276
277 void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
278 {
279         pgd_t *pgd;
280         p4d_t *p4d_page;
281
282         pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
283
284         pgd = pgd_offset_k(vaddr);
285         if (pgd_none(*pgd)) {
286                 printk(KERN_ERR
287                         "PGD FIXMAP MISSING, it should be setup in head.S!\n");
288                 return;
289         }
290
291         p4d_page = p4d_offset(pgd, 0);
292         set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
293 }
294
295 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
296 {
297         pgd_t *pgd;
298         p4d_t *p4d;
299         pud_t *pud;
300
301         pgd = pgd_offset_k(vaddr);
302         p4d = fill_p4d(pgd, vaddr);
303         pud = fill_pud(p4d, vaddr);
304         return fill_pmd(pud, vaddr);
305 }
306
307 pte_t * __init populate_extra_pte(unsigned long vaddr)
308 {
309         pmd_t *pmd;
310
311         pmd = populate_extra_pmd(vaddr);
312         return fill_pte(pmd, vaddr);
313 }
314
315 /*
316  * Create large page table mappings for a range of physical addresses.
317  */
318 static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
319                                         enum page_cache_mode cache)
320 {
321         pgd_t *pgd;
322         p4d_t *p4d;
323         pud_t *pud;
324         pmd_t *pmd;
325         pgprot_t prot;
326
327         pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
328                 pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
329         BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
330         for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
331                 pgd = pgd_offset_k((unsigned long)__va(phys));
332                 if (pgd_none(*pgd)) {
333                         p4d = (p4d_t *) spp_getpage();
334                         set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
335                                                 _PAGE_USER));
336                 }
337                 p4d = p4d_offset(pgd, (unsigned long)__va(phys));
338                 if (p4d_none(*p4d)) {
339                         pud = (pud_t *) spp_getpage();
340                         set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
341                                                 _PAGE_USER));
342                 }
343                 pud = pud_offset(p4d, (unsigned long)__va(phys));
344                 if (pud_none(*pud)) {
345                         pmd = (pmd_t *) spp_getpage();
346                         set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
347                                                 _PAGE_USER));
348                 }
349                 pmd = pmd_offset(pud, phys);
350                 BUG_ON(!pmd_none(*pmd));
351                 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
352         }
353 }
354
355 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
356 {
357         __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
358 }
359
360 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
361 {
362         __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
363 }
364
365 /*
366  * The head.S code sets up the kernel high mapping:
367  *
368  *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
369  *
370  * phys_base holds the negative offset to the kernel, which is added
371  * to the compile time generated pmds. This results in invalid pmds up
372  * to the point where we hit the physaddr 0 mapping.
373  *
374  * We limit the mappings to the region from _text to _brk_end.  _brk_end
375  * is rounded up to the 2MB boundary. This catches the invalid pmds as
376  * well, as they are located before _text:
377  */
378 void __init cleanup_highmap(void)
379 {
380         unsigned long vaddr = __START_KERNEL_map;
381         unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
382         unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
383         pmd_t *pmd = level2_kernel_pgt;
384
385         /*
386          * Native path, max_pfn_mapped is not set yet.
387          * Xen has valid max_pfn_mapped set in
388          *      arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
389          */
390         if (max_pfn_mapped)
391                 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
392
393         for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
394                 if (pmd_none(*pmd))
395                         continue;
396                 if (vaddr < (unsigned long) _text || vaddr > end)
397                         set_pmd(pmd, __pmd(0));
398         }
399 }
400
401 /*
402  * Create PTE level page table mapping for physical addresses.
403  * It returns the last physical address mapped.
404  */
405 static unsigned long __meminit
406 phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
407               pgprot_t prot)
408 {
409         unsigned long pages = 0, paddr_next;
410         unsigned long paddr_last = paddr_end;
411         pte_t *pte;
412         int i;
413
414         pte = pte_page + pte_index(paddr);
415         i = pte_index(paddr);
416
417         for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
418                 paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
419                 if (paddr >= paddr_end) {
420                         if (!after_bootmem &&
421                             !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
422                                              E820_TYPE_RAM) &&
423                             !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
424                                              E820_TYPE_RESERVED_KERN))
425                                 set_pte(pte, __pte(0));
426                         continue;
427                 }
428
429                 /*
430                  * We will re-use the existing mapping.
431                  * Xen for example has some special requirements, like mapping
432                  * pagetable pages as RO. So assume someone who pre-setup
433                  * these mappings are more intelligent.
434                  */
435                 if (!pte_none(*pte)) {
436                         if (!after_bootmem)
437                                 pages++;
438                         continue;
439                 }
440
441                 if (0)
442                         pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
443                                 pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
444                 pages++;
445                 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
446                 paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
447         }
448
449         update_page_count(PG_LEVEL_4K, pages);
450
451         return paddr_last;
452 }
453
454 /*
455  * Create PMD level page table mapping for physical addresses. The virtual
456  * and physical address have to be aligned at this level.
457  * It returns the last physical address mapped.
458  */
459 static unsigned long __meminit
460 phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
461               unsigned long page_size_mask, pgprot_t prot)
462 {
463         unsigned long pages = 0, paddr_next;
464         unsigned long paddr_last = paddr_end;
465
466         int i = pmd_index(paddr);
467
468         for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
469                 pmd_t *pmd = pmd_page + pmd_index(paddr);
470                 pte_t *pte;
471                 pgprot_t new_prot = prot;
472
473                 paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
474                 if (paddr >= paddr_end) {
475                         if (!after_bootmem &&
476                             !e820__mapped_any(paddr & PMD_MASK, paddr_next,
477                                              E820_TYPE_RAM) &&
478                             !e820__mapped_any(paddr & PMD_MASK, paddr_next,
479                                              E820_TYPE_RESERVED_KERN))
480                                 set_pmd(pmd, __pmd(0));
481                         continue;
482                 }
483
484                 if (!pmd_none(*pmd)) {
485                         if (!pmd_large(*pmd)) {
486                                 spin_lock(&init_mm.page_table_lock);
487                                 pte = (pte_t *)pmd_page_vaddr(*pmd);
488                                 paddr_last = phys_pte_init(pte, paddr,
489                                                            paddr_end, prot);
490                                 spin_unlock(&init_mm.page_table_lock);
491                                 continue;
492                         }
493                         /*
494                          * If we are ok with PG_LEVEL_2M mapping, then we will
495                          * use the existing mapping,
496                          *
497                          * Otherwise, we will split the large page mapping but
498                          * use the same existing protection bits except for
499                          * large page, so that we don't violate Intel's TLB
500                          * Application note (317080) which says, while changing
501                          * the page sizes, new and old translations should
502                          * not differ with respect to page frame and
503                          * attributes.
504                          */
505                         if (page_size_mask & (1 << PG_LEVEL_2M)) {
506                                 if (!after_bootmem)
507                                         pages++;
508                                 paddr_last = paddr_next;
509                                 continue;
510                         }
511                         new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
512                 }
513
514                 if (page_size_mask & (1<<PG_LEVEL_2M)) {
515                         pages++;
516                         spin_lock(&init_mm.page_table_lock);
517                         set_pte((pte_t *)pmd,
518                                 pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
519                                         __pgprot(pgprot_val(prot) | _PAGE_PSE)));
520                         spin_unlock(&init_mm.page_table_lock);
521                         paddr_last = paddr_next;
522                         continue;
523                 }
524
525                 pte = alloc_low_page();
526                 paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
527
528                 spin_lock(&init_mm.page_table_lock);
529                 pmd_populate_kernel(&init_mm, pmd, pte);
530                 spin_unlock(&init_mm.page_table_lock);
531         }
532         update_page_count(PG_LEVEL_2M, pages);
533         return paddr_last;
534 }
535
536 /*
537  * Create PUD level page table mapping for physical addresses. The virtual
538  * and physical address do not have to be aligned at this level. KASLR can
539  * randomize virtual addresses up to this level.
540  * It returns the last physical address mapped.
541  */
542 static unsigned long __meminit
543 phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
544               unsigned long page_size_mask)
545 {
546         unsigned long pages = 0, paddr_next;
547         unsigned long paddr_last = paddr_end;
548         unsigned long vaddr = (unsigned long)__va(paddr);
549         int i = pud_index(vaddr);
550
551         for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
552                 pud_t *pud;
553                 pmd_t *pmd;
554                 pgprot_t prot = PAGE_KERNEL;
555
556                 vaddr = (unsigned long)__va(paddr);
557                 pud = pud_page + pud_index(vaddr);
558                 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
559
560                 if (paddr >= paddr_end) {
561                         if (!after_bootmem &&
562                             !e820__mapped_any(paddr & PUD_MASK, paddr_next,
563                                              E820_TYPE_RAM) &&
564                             !e820__mapped_any(paddr & PUD_MASK, paddr_next,
565                                              E820_TYPE_RESERVED_KERN))
566                                 set_pud(pud, __pud(0));
567                         continue;
568                 }
569
570                 if (!pud_none(*pud)) {
571                         if (!pud_large(*pud)) {
572                                 pmd = pmd_offset(pud, 0);
573                                 paddr_last = phys_pmd_init(pmd, paddr,
574                                                            paddr_end,
575                                                            page_size_mask,
576                                                            prot);
577                                 __flush_tlb_all();
578                                 continue;
579                         }
580                         /*
581                          * If we are ok with PG_LEVEL_1G mapping, then we will
582                          * use the existing mapping.
583                          *
584                          * Otherwise, we will split the gbpage mapping but use
585                          * the same existing protection  bits except for large
586                          * page, so that we don't violate Intel's TLB
587                          * Application note (317080) which says, while changing
588                          * the page sizes, new and old translations should
589                          * not differ with respect to page frame and
590                          * attributes.
591                          */
592                         if (page_size_mask & (1 << PG_LEVEL_1G)) {
593                                 if (!after_bootmem)
594                                         pages++;
595                                 paddr_last = paddr_next;
596                                 continue;
597                         }
598                         prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
599                 }
600
601                 if (page_size_mask & (1<<PG_LEVEL_1G)) {
602                         pages++;
603                         spin_lock(&init_mm.page_table_lock);
604                         set_pte((pte_t *)pud,
605                                 pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
606                                         PAGE_KERNEL_LARGE));
607                         spin_unlock(&init_mm.page_table_lock);
608                         paddr_last = paddr_next;
609                         continue;
610                 }
611
612                 pmd = alloc_low_page();
613                 paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
614                                            page_size_mask, prot);
615
616                 spin_lock(&init_mm.page_table_lock);
617                 pud_populate(&init_mm, pud, pmd);
618                 spin_unlock(&init_mm.page_table_lock);
619         }
620         __flush_tlb_all();
621
622         update_page_count(PG_LEVEL_1G, pages);
623
624         return paddr_last;
625 }
626
627 static unsigned long __meminit
628 phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
629               unsigned long page_size_mask)
630 {
631         unsigned long paddr_next, paddr_last = paddr_end;
632         unsigned long vaddr = (unsigned long)__va(paddr);
633         int i = p4d_index(vaddr);
634
635         if (!IS_ENABLED(CONFIG_X86_5LEVEL))
636                 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
637
638         for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
639                 p4d_t *p4d;
640                 pud_t *pud;
641
642                 vaddr = (unsigned long)__va(paddr);
643                 p4d = p4d_page + p4d_index(vaddr);
644                 paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
645
646                 if (paddr >= paddr_end) {
647                         if (!after_bootmem &&
648                             !e820__mapped_any(paddr & P4D_MASK, paddr_next,
649                                              E820_TYPE_RAM) &&
650                             !e820__mapped_any(paddr & P4D_MASK, paddr_next,
651                                              E820_TYPE_RESERVED_KERN))
652                                 set_p4d(p4d, __p4d(0));
653                         continue;
654                 }
655
656                 if (!p4d_none(*p4d)) {
657                         pud = pud_offset(p4d, 0);
658                         paddr_last = phys_pud_init(pud, paddr,
659                                         paddr_end,
660                                         page_size_mask);
661                         __flush_tlb_all();
662                         continue;
663                 }
664
665                 pud = alloc_low_page();
666                 paddr_last = phys_pud_init(pud, paddr, paddr_end,
667                                            page_size_mask);
668
669                 spin_lock(&init_mm.page_table_lock);
670                 p4d_populate(&init_mm, p4d, pud);
671                 spin_unlock(&init_mm.page_table_lock);
672         }
673         __flush_tlb_all();
674
675         return paddr_last;
676 }
677
678 /*
679  * Create page table mapping for the physical memory for specific physical
680  * addresses. The virtual and physical addresses have to be aligned on PMD level
681  * down. It returns the last physical address mapped.
682  */
683 unsigned long __meminit
684 kernel_physical_mapping_init(unsigned long paddr_start,
685                              unsigned long paddr_end,
686                              unsigned long page_size_mask)
687 {
688         bool pgd_changed = false;
689         unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
690
691         paddr_last = paddr_end;
692         vaddr = (unsigned long)__va(paddr_start);
693         vaddr_end = (unsigned long)__va(paddr_end);
694         vaddr_start = vaddr;
695
696         for (; vaddr < vaddr_end; vaddr = vaddr_next) {
697                 pgd_t *pgd = pgd_offset_k(vaddr);
698                 p4d_t *p4d;
699
700                 vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
701
702                 if (pgd_val(*pgd)) {
703                         p4d = (p4d_t *)pgd_page_vaddr(*pgd);
704                         paddr_last = phys_p4d_init(p4d, __pa(vaddr),
705                                                    __pa(vaddr_end),
706                                                    page_size_mask);
707                         continue;
708                 }
709
710                 p4d = alloc_low_page();
711                 paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
712                                            page_size_mask);
713
714                 spin_lock(&init_mm.page_table_lock);
715                 if (IS_ENABLED(CONFIG_X86_5LEVEL))
716                         pgd_populate(&init_mm, pgd, p4d);
717                 else
718                         p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
719                 spin_unlock(&init_mm.page_table_lock);
720                 pgd_changed = true;
721         }
722
723         if (pgd_changed)
724                 sync_global_pgds(vaddr_start, vaddr_end - 1);
725
726         __flush_tlb_all();
727
728         return paddr_last;
729 }
730
731 #ifndef CONFIG_NUMA
732 void __init initmem_init(void)
733 {
734         memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
735 }
736 #endif
737
738 void __init paging_init(void)
739 {
740         sparse_memory_present_with_active_regions(MAX_NUMNODES);
741         sparse_init();
742
743         /*
744          * clear the default setting with node 0
745          * note: don't use nodes_clear here, that is really clearing when
746          *       numa support is not compiled in, and later node_set_state
747          *       will not set it back.
748          */
749         node_clear_state(0, N_MEMORY);
750         if (N_MEMORY != N_NORMAL_MEMORY)
751                 node_clear_state(0, N_NORMAL_MEMORY);
752
753         zone_sizes_init();
754 }
755
756 /*
757  * Memory hotplug specific functions
758  */
759 #ifdef CONFIG_MEMORY_HOTPLUG
760 /*
761  * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
762  * updating.
763  */
764 static void  update_end_of_memory_vars(u64 start, u64 size)
765 {
766         unsigned long end_pfn = PFN_UP(start + size);
767
768         if (end_pfn > max_pfn) {
769                 max_pfn = end_pfn;
770                 max_low_pfn = end_pfn;
771                 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
772         }
773 }
774
775 int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
776 {
777         unsigned long start_pfn = start >> PAGE_SHIFT;
778         unsigned long nr_pages = size >> PAGE_SHIFT;
779         int ret;
780
781         init_memory_mapping(start, start + size);
782
783         ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
784         WARN_ON_ONCE(ret);
785
786         /* update max_pfn, max_low_pfn and high_memory */
787         update_end_of_memory_vars(start, size);
788
789         return ret;
790 }
791 EXPORT_SYMBOL_GPL(arch_add_memory);
792
793 #define PAGE_INUSE 0xFD
794
795 static void __meminit free_pagetable(struct page *page, int order)
796 {
797         unsigned long magic;
798         unsigned int nr_pages = 1 << order;
799         struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);
800
801         if (altmap) {
802                 vmem_altmap_free(altmap, nr_pages);
803                 return;
804         }
805
806         /* bootmem page has reserved flag */
807         if (PageReserved(page)) {
808                 __ClearPageReserved(page);
809
810                 magic = (unsigned long)page->freelist;
811                 if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
812                         while (nr_pages--)
813                                 put_page_bootmem(page++);
814                 } else
815                         while (nr_pages--)
816                                 free_reserved_page(page++);
817         } else
818                 free_pages((unsigned long)page_address(page), order);
819 }
820
821 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
822 {
823         pte_t *pte;
824         int i;
825
826         for (i = 0; i < PTRS_PER_PTE; i++) {
827                 pte = pte_start + i;
828                 if (!pte_none(*pte))
829                         return;
830         }
831
832         /* free a pte talbe */
833         free_pagetable(pmd_page(*pmd), 0);
834         spin_lock(&init_mm.page_table_lock);
835         pmd_clear(pmd);
836         spin_unlock(&init_mm.page_table_lock);
837 }
838
839 static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
840 {
841         pmd_t *pmd;
842         int i;
843
844         for (i = 0; i < PTRS_PER_PMD; i++) {
845                 pmd = pmd_start + i;
846                 if (!pmd_none(*pmd))
847                         return;
848         }
849
850         /* free a pmd talbe */
851         free_pagetable(pud_page(*pud), 0);
852         spin_lock(&init_mm.page_table_lock);
853         pud_clear(pud);
854         spin_unlock(&init_mm.page_table_lock);
855 }
856
857 static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
858 {
859         pud_t *pud;
860         int i;
861
862         for (i = 0; i < PTRS_PER_PUD; i++) {
863                 pud = pud_start + i;
864                 if (!pud_none(*pud))
865                         return;
866         }
867
868         /* free a pud talbe */
869         free_pagetable(p4d_page(*p4d), 0);
870         spin_lock(&init_mm.page_table_lock);
871         p4d_clear(p4d);
872         spin_unlock(&init_mm.page_table_lock);
873 }
874
875 static void __meminit
876 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
877                  bool direct)
878 {
879         unsigned long next, pages = 0;
880         pte_t *pte;
881         void *page_addr;
882         phys_addr_t phys_addr;
883
884         pte = pte_start + pte_index(addr);
885         for (; addr < end; addr = next, pte++) {
886                 next = (addr + PAGE_SIZE) & PAGE_MASK;
887                 if (next > end)
888                         next = end;
889
890                 if (!pte_present(*pte))
891                         continue;
892
893                 /*
894                  * We mapped [0,1G) memory as identity mapping when
895                  * initializing, in arch/x86/kernel/head_64.S. These
896                  * pagetables cannot be removed.
897                  */
898                 phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
899                 if (phys_addr < (phys_addr_t)0x40000000)
900                         return;
901
902                 if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
903                         /*
904                          * Do not free direct mapping pages since they were
905                          * freed when offlining, or simplely not in use.
906                          */
907                         if (!direct)
908                                 free_pagetable(pte_page(*pte), 0);
909
910                         spin_lock(&init_mm.page_table_lock);
911                         pte_clear(&init_mm, addr, pte);
912                         spin_unlock(&init_mm.page_table_lock);
913
914                         /* For non-direct mapping, pages means nothing. */
915                         pages++;
916                 } else {
917                         /*
918                          * If we are here, we are freeing vmemmap pages since
919                          * direct mapped memory ranges to be freed are aligned.
920                          *
921                          * If we are not removing the whole page, it means
922                          * other page structs in this page are being used and
923                          * we canot remove them. So fill the unused page_structs
924                          * with 0xFD, and remove the page when it is wholly
925                          * filled with 0xFD.
926                          */
927                         memset((void *)addr, PAGE_INUSE, next - addr);
928
929                         page_addr = page_address(pte_page(*pte));
930                         if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
931                                 free_pagetable(pte_page(*pte), 0);
932
933                                 spin_lock(&init_mm.page_table_lock);
934                                 pte_clear(&init_mm, addr, pte);
935                                 spin_unlock(&init_mm.page_table_lock);
936                         }
937                 }
938         }
939
940         /* Call free_pte_table() in remove_pmd_table(). */
941         flush_tlb_all();
942         if (direct)
943                 update_page_count(PG_LEVEL_4K, -pages);
944 }
945
946 static void __meminit
947 remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
948                  bool direct)
949 {
950         unsigned long next, pages = 0;
951         pte_t *pte_base;
952         pmd_t *pmd;
953         void *page_addr;
954
955         pmd = pmd_start + pmd_index(addr);
956         for (; addr < end; addr = next, pmd++) {
957                 next = pmd_addr_end(addr, end);
958
959                 if (!pmd_present(*pmd))
960                         continue;
961
962                 if (pmd_large(*pmd)) {
963                         if (IS_ALIGNED(addr, PMD_SIZE) &&
964                             IS_ALIGNED(next, PMD_SIZE)) {
965                                 if (!direct)
966                                         free_pagetable(pmd_page(*pmd),
967                                                        get_order(PMD_SIZE));
968
969                                 spin_lock(&init_mm.page_table_lock);
970                                 pmd_clear(pmd);
971                                 spin_unlock(&init_mm.page_table_lock);
972                                 pages++;
973                         } else {
974                                 /* If here, we are freeing vmemmap pages. */
975                                 memset((void *)addr, PAGE_INUSE, next - addr);
976
977                                 page_addr = page_address(pmd_page(*pmd));
978                                 if (!memchr_inv(page_addr, PAGE_INUSE,
979                                                 PMD_SIZE)) {
980                                         free_pagetable(pmd_page(*pmd),
981                                                        get_order(PMD_SIZE));
982
983                                         spin_lock(&init_mm.page_table_lock);
984                                         pmd_clear(pmd);
985                                         spin_unlock(&init_mm.page_table_lock);
986                                 }
987                         }
988
989                         continue;
990                 }
991
992                 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
993                 remove_pte_table(pte_base, addr, next, direct);
994                 free_pte_table(pte_base, pmd);
995         }
996
997         /* Call free_pmd_table() in remove_pud_table(). */
998         if (direct)
999                 update_page_count(PG_LEVEL_2M, -pages);
1000 }
1001
1002 static void __meminit
1003 remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1004                  bool direct)
1005 {
1006         unsigned long next, pages = 0;
1007         pmd_t *pmd_base;
1008         pud_t *pud;
1009         void *page_addr;
1010
1011         pud = pud_start + pud_index(addr);
1012         for (; addr < end; addr = next, pud++) {
1013                 next = pud_addr_end(addr, end);
1014
1015                 if (!pud_present(*pud))
1016                         continue;
1017
1018                 if (pud_large(*pud)) {
1019                         if (IS_ALIGNED(addr, PUD_SIZE) &&
1020                             IS_ALIGNED(next, PUD_SIZE)) {
1021                                 if (!direct)
1022                                         free_pagetable(pud_page(*pud),
1023                                                        get_order(PUD_SIZE));
1024
1025                                 spin_lock(&init_mm.page_table_lock);
1026                                 pud_clear(pud);
1027                                 spin_unlock(&init_mm.page_table_lock);
1028                                 pages++;
1029                         } else {
1030                                 /* If here, we are freeing vmemmap pages. */
1031                                 memset((void *)addr, PAGE_INUSE, next - addr);
1032
1033                                 page_addr = page_address(pud_page(*pud));
1034                                 if (!memchr_inv(page_addr, PAGE_INUSE,
1035                                                 PUD_SIZE)) {
1036                                         free_pagetable(pud_page(*pud),
1037                                                        get_order(PUD_SIZE));
1038
1039                                         spin_lock(&init_mm.page_table_lock);
1040                                         pud_clear(pud);
1041                                         spin_unlock(&init_mm.page_table_lock);
1042                                 }
1043                         }
1044
1045                         continue;
1046                 }
1047
1048                 pmd_base = pmd_offset(pud, 0);
1049                 remove_pmd_table(pmd_base, addr, next, direct);
1050                 free_pmd_table(pmd_base, pud);
1051         }
1052
1053         if (direct)
1054                 update_page_count(PG_LEVEL_1G, -pages);
1055 }
1056
1057 static void __meminit
1058 remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1059                  bool direct)
1060 {
1061         unsigned long next, pages = 0;
1062         pud_t *pud_base;
1063         p4d_t *p4d;
1064
1065         p4d = p4d_start + p4d_index(addr);
1066         for (; addr < end; addr = next, p4d++) {
1067                 next = p4d_addr_end(addr, end);
1068
1069                 if (!p4d_present(*p4d))
1070                         continue;
1071
1072                 BUILD_BUG_ON(p4d_large(*p4d));
1073
1074                 pud_base = pud_offset(p4d, 0);
1075                 remove_pud_table(pud_base, addr, next, direct);
1076                 /*
1077                  * For 4-level page tables we do not want to free PUDs, but in the
1078                  * 5-level case we should free them. This code will have to change
1079                  * to adapt for boot-time switching between 4 and 5 level page tables.
1080                  */
1081                 if (CONFIG_PGTABLE_LEVELS == 5)
1082                         free_pud_table(pud_base, p4d);
1083         }
1084
1085         if (direct)
1086                 update_page_count(PG_LEVEL_512G, -pages);
1087 }
1088
1089 /* start and end are both virtual address. */
1090 static void __meminit
1091 remove_pagetable(unsigned long start, unsigned long end, bool direct)
1092 {
1093         unsigned long next;
1094         unsigned long addr;
1095         pgd_t *pgd;
1096         p4d_t *p4d;
1097
1098         for (addr = start; addr < end; addr = next) {
1099                 next = pgd_addr_end(addr, end);
1100
1101                 pgd = pgd_offset_k(addr);
1102                 if (!pgd_present(*pgd))
1103                         continue;
1104
1105                 p4d = p4d_offset(pgd, 0);
1106                 remove_p4d_table(p4d, addr, next, direct);
1107         }
1108
1109         flush_tlb_all();
1110 }
1111
1112 void __ref vmemmap_free(unsigned long start, unsigned long end)
1113 {
1114         remove_pagetable(start, end, false);
1115 }
1116
1117 #ifdef CONFIG_MEMORY_HOTREMOVE
1118 static void __meminit
1119 kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1120 {
1121         start = (unsigned long)__va(start);
1122         end = (unsigned long)__va(end);
1123
1124         remove_pagetable(start, end, true);
1125 }
1126
1127 int __ref arch_remove_memory(u64 start, u64 size)
1128 {
1129         unsigned long start_pfn = start >> PAGE_SHIFT;
1130         unsigned long nr_pages = size >> PAGE_SHIFT;
1131         struct page *page = pfn_to_page(start_pfn);
1132         struct vmem_altmap *altmap;
1133         struct zone *zone;
1134         int ret;
1135
1136         /* With altmap the first mapped page is offset from @start */
1137         altmap = to_vmem_altmap((unsigned long) page);
1138         if (altmap)
1139                 page += vmem_altmap_offset(altmap);
1140         zone = page_zone(page);
1141         ret = __remove_pages(zone, start_pfn, nr_pages);
1142         WARN_ON_ONCE(ret);
1143         kernel_physical_mapping_remove(start, start + size);
1144
1145         return ret;
1146 }
1147 #endif
1148 #endif /* CONFIG_MEMORY_HOTPLUG */
1149
1150 static struct kcore_list kcore_vsyscall;
1151
1152 static void __init register_page_bootmem_info(void)
1153 {
1154 #ifdef CONFIG_NUMA
1155         int i;
1156
1157         for_each_online_node(i)
1158                 register_page_bootmem_info_node(NODE_DATA(i));
1159 #endif
1160 }
1161
1162 void __init mem_init(void)
1163 {
1164         pci_iommu_alloc();
1165
1166         /* clear_bss() already clear the empty_zero_page */
1167
1168         register_page_bootmem_info();
1169
1170         /* this will put all memory onto the freelists */
1171         free_all_bootmem();
1172         after_bootmem = 1;
1173
1174         /* Register memory areas for /proc/kcore */
1175         kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1176                          PAGE_SIZE, KCORE_OTHER);
1177
1178         mem_init_print_info(NULL);
1179 }
1180
1181 int kernel_set_to_readonly;
1182
1183 void set_kernel_text_rw(void)
1184 {
1185         unsigned long start = PFN_ALIGN(_text);
1186         unsigned long end = PFN_ALIGN(__stop___ex_table);
1187
1188         if (!kernel_set_to_readonly)
1189                 return;
1190
1191         pr_debug("Set kernel text: %lx - %lx for read write\n",
1192                  start, end);
1193
1194         /*
1195          * Make the kernel identity mapping for text RW. Kernel text
1196          * mapping will always be RO. Refer to the comment in
1197          * static_protections() in pageattr.c
1198          */
1199         set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1200 }
1201
1202 void set_kernel_text_ro(void)
1203 {
1204         unsigned long start = PFN_ALIGN(_text);
1205         unsigned long end = PFN_ALIGN(__stop___ex_table);
1206
1207         if (!kernel_set_to_readonly)
1208                 return;
1209
1210         pr_debug("Set kernel text: %lx - %lx for read only\n",
1211                  start, end);
1212
1213         /*
1214          * Set the kernel identity mapping for text RO.
1215          */
1216         set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1217 }
1218
1219 void mark_rodata_ro(void)
1220 {
1221         unsigned long start = PFN_ALIGN(_text);
1222         unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1223         unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1224         unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1225         unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1226         unsigned long all_end;
1227
1228         printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1229                (end - start) >> 10);
1230         set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1231
1232         kernel_set_to_readonly = 1;
1233
1234         /*
1235          * The rodata/data/bss/brk section (but not the kernel text!)
1236          * should also be not-executable.
1237          *
1238          * We align all_end to PMD_SIZE because the existing mapping
1239          * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1240          * split the PMD and the reminder between _brk_end and the end
1241          * of the PMD will remain mapped executable.
1242          *
1243          * Any PMD which was setup after the one which covers _brk_end
1244          * has been zapped already via cleanup_highmem().
1245          */
1246         all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1247         set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1248
1249 #ifdef CONFIG_CPA_DEBUG
1250         printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1251         set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1252
1253         printk(KERN_INFO "Testing CPA: again\n");
1254         set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1255 #endif
1256
1257         free_init_pages("unused kernel",
1258                         (unsigned long) __va(__pa_symbol(text_end)),
1259                         (unsigned long) __va(__pa_symbol(rodata_start)));
1260         free_init_pages("unused kernel",
1261                         (unsigned long) __va(__pa_symbol(rodata_end)),
1262                         (unsigned long) __va(__pa_symbol(_sdata)));
1263
1264         debug_checkwx();
1265 }
1266
1267 int kern_addr_valid(unsigned long addr)
1268 {
1269         unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1270         pgd_t *pgd;
1271         p4d_t *p4d;
1272         pud_t *pud;
1273         pmd_t *pmd;
1274         pte_t *pte;
1275
1276         if (above != 0 && above != -1UL)
1277                 return 0;
1278
1279         pgd = pgd_offset_k(addr);
1280         if (pgd_none(*pgd))
1281                 return 0;
1282
1283         p4d = p4d_offset(pgd, addr);
1284         if (p4d_none(*p4d))
1285                 return 0;
1286
1287         pud = pud_offset(p4d, addr);
1288         if (pud_none(*pud))
1289                 return 0;
1290
1291         if (pud_large(*pud))
1292                 return pfn_valid(pud_pfn(*pud));
1293
1294         pmd = pmd_offset(pud, addr);
1295         if (pmd_none(*pmd))
1296                 return 0;
1297
1298         if (pmd_large(*pmd))
1299                 return pfn_valid(pmd_pfn(*pmd));
1300
1301         pte = pte_offset_kernel(pmd, addr);
1302         if (pte_none(*pte))
1303                 return 0;
1304
1305         return pfn_valid(pte_pfn(*pte));
1306 }
1307
1308 static unsigned long probe_memory_block_size(void)
1309 {
1310         unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
1311
1312         /* if system is UV or has 64GB of RAM or more, use large blocks */
1313         if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
1314                 bz = 2UL << 30; /* 2GB */
1315
1316         pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1317
1318         return bz;
1319 }
1320
1321 static unsigned long memory_block_size_probed;
1322 unsigned long memory_block_size_bytes(void)
1323 {
1324         if (!memory_block_size_probed)
1325                 memory_block_size_probed = probe_memory_block_size();
1326
1327         return memory_block_size_probed;
1328 }
1329
1330 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1331 /*
1332  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1333  */
1334 static long __meminitdata addr_start, addr_end;
1335 static void __meminitdata *p_start, *p_end;
1336 static int __meminitdata node_start;
1337
1338 static int __meminit vmemmap_populate_hugepages(unsigned long start,
1339                 unsigned long end, int node, struct vmem_altmap *altmap)
1340 {
1341         unsigned long addr;
1342         unsigned long next;
1343         pgd_t *pgd;
1344         p4d_t *p4d;
1345         pud_t *pud;
1346         pmd_t *pmd;
1347
1348         for (addr = start; addr < end; addr = next) {
1349                 next = pmd_addr_end(addr, end);
1350
1351                 pgd = vmemmap_pgd_populate(addr, node);
1352                 if (!pgd)
1353                         return -ENOMEM;
1354
1355                 p4d = vmemmap_p4d_populate(pgd, addr, node);
1356                 if (!p4d)
1357                         return -ENOMEM;
1358
1359                 pud = vmemmap_pud_populate(p4d, addr, node);
1360                 if (!pud)
1361                         return -ENOMEM;
1362
1363                 pmd = pmd_offset(pud, addr);
1364                 if (pmd_none(*pmd)) {
1365                         void *p;
1366
1367                         p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1368                         if (p) {
1369                                 pte_t entry;
1370
1371                                 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1372                                                 PAGE_KERNEL_LARGE);
1373                                 set_pmd(pmd, __pmd(pte_val(entry)));
1374
1375                                 /* check to see if we have contiguous blocks */
1376                                 if (p_end != p || node_start != node) {
1377                                         if (p_start)
1378                                                 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1379                                                        addr_start, addr_end-1, p_start, p_end-1, node_start);
1380                                         addr_start = addr;
1381                                         node_start = node;
1382                                         p_start = p;
1383                                 }
1384
1385                                 addr_end = addr + PMD_SIZE;
1386                                 p_end = p + PMD_SIZE;
1387                                 continue;
1388                         } else if (altmap)
1389                                 return -ENOMEM; /* no fallback */
1390                 } else if (pmd_large(*pmd)) {
1391                         vmemmap_verify((pte_t *)pmd, node, addr, next);
1392                         continue;
1393                 }
1394                 pr_warn_once("vmemmap: falling back to regular page backing\n");
1395                 if (vmemmap_populate_basepages(addr, next, node))
1396                         return -ENOMEM;
1397         }
1398         return 0;
1399 }
1400
1401 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1402 {
1403         struct vmem_altmap *altmap = to_vmem_altmap(start);
1404         int err;
1405
1406         if (boot_cpu_has(X86_FEATURE_PSE))
1407                 err = vmemmap_populate_hugepages(start, end, node, altmap);
1408         else if (altmap) {
1409                 pr_err_once("%s: no cpu support for altmap allocations\n",
1410                                 __func__);
1411                 err = -ENOMEM;
1412         } else
1413                 err = vmemmap_populate_basepages(start, end, node);
1414         if (!err)
1415                 sync_global_pgds(start, end - 1);
1416         return err;
1417 }
1418
1419 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1420 void register_page_bootmem_memmap(unsigned long section_nr,
1421                                   struct page *start_page, unsigned long size)
1422 {
1423         unsigned long addr = (unsigned long)start_page;
1424         unsigned long end = (unsigned long)(start_page + size);
1425         unsigned long next;
1426         pgd_t *pgd;
1427         p4d_t *p4d;
1428         pud_t *pud;
1429         pmd_t *pmd;
1430         unsigned int nr_pages;
1431         struct page *page;
1432
1433         for (; addr < end; addr = next) {
1434                 pte_t *pte = NULL;
1435
1436                 pgd = pgd_offset_k(addr);
1437                 if (pgd_none(*pgd)) {
1438                         next = (addr + PAGE_SIZE) & PAGE_MASK;
1439                         continue;
1440                 }
1441                 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1442
1443                 p4d = p4d_offset(pgd, addr);
1444                 if (p4d_none(*p4d)) {
1445                         next = (addr + PAGE_SIZE) & PAGE_MASK;
1446                         continue;
1447                 }
1448                 get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);
1449
1450                 pud = pud_offset(p4d, addr);
1451                 if (pud_none(*pud)) {
1452                         next = (addr + PAGE_SIZE) & PAGE_MASK;
1453                         continue;
1454                 }
1455                 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1456
1457                 if (!boot_cpu_has(X86_FEATURE_PSE)) {
1458                         next = (addr + PAGE_SIZE) & PAGE_MASK;
1459                         pmd = pmd_offset(pud, addr);
1460                         if (pmd_none(*pmd))
1461                                 continue;
1462                         get_page_bootmem(section_nr, pmd_page(*pmd),
1463                                          MIX_SECTION_INFO);
1464
1465                         pte = pte_offset_kernel(pmd, addr);
1466                         if (pte_none(*pte))
1467                                 continue;
1468                         get_page_bootmem(section_nr, pte_page(*pte),
1469                                          SECTION_INFO);
1470                 } else {
1471                         next = pmd_addr_end(addr, end);
1472
1473                         pmd = pmd_offset(pud, addr);
1474                         if (pmd_none(*pmd))
1475                                 continue;
1476
1477                         nr_pages = 1 << (get_order(PMD_SIZE));
1478                         page = pmd_page(*pmd);
1479                         while (nr_pages--)
1480                                 get_page_bootmem(section_nr, page++,
1481                                                  SECTION_INFO);
1482                 }
1483         }
1484 }
1485 #endif
1486
1487 void __meminit vmemmap_populate_print_last(void)
1488 {
1489         if (p_start) {
1490                 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1491                         addr_start, addr_end-1, p_start, p_end-1, node_start);
1492                 p_start = NULL;
1493                 p_end = NULL;
1494                 node_start = 0;
1495         }
1496 }
1497 #endif