2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 struct memblock memblock __initdata_memblock;
25 int memblock_debug __initdata_memblock;
26 int memblock_can_resize __initdata_memblock;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type *type)
33 if (type == &memblock.memory)
35 else if (type == &memblock.reserved)
42 * Address comparison utilities
44 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
45 phys_addr_t base2, phys_addr_t size2)
47 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
50 static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
51 phys_addr_t base, phys_addr_t size)
55 for (i = 0; i < type->cnt; i++) {
56 phys_addr_t rgnbase = type->regions[i].base;
57 phys_addr_t rgnsize = type->regions[i].size;
58 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
62 return (i < type->cnt) ? i : -1;
66 * Find, allocate, deallocate or reserve unreserved regions. All allocations
70 static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
71 phys_addr_t size, phys_addr_t align)
73 phys_addr_t base, res_base;
76 /* In case, huge size is requested */
80 base = round_down(end - size, align);
82 /* Prevent allocations returning 0 as it's also used to
83 * indicate an allocation failure
88 while (start <= base) {
89 j = memblock_overlaps_region(&memblock.reserved, base, size);
92 res_base = memblock.reserved.regions[j].base;
95 base = round_down(res_base - size, align);
102 * Find a free area with specified alignment in a specific range.
104 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t end,
105 phys_addr_t size, phys_addr_t align)
111 /* Pump up max_addr */
112 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
113 end = memblock.current_limit;
115 /* We do a top-down search, this tends to limit memory
116 * fragmentation by keeping early boot allocs near the
119 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
120 phys_addr_t memblockbase = memblock.memory.regions[i].base;
121 phys_addr_t memblocksize = memblock.memory.regions[i].size;
122 phys_addr_t bottom, top, found;
124 if (memblocksize < size)
126 if ((memblockbase + memblocksize) <= start)
128 bottom = max(memblockbase, start);
129 top = min(memblockbase + memblocksize, end);
132 found = memblock_find_region(bottom, top, size, align);
140 * Free memblock.reserved.regions
142 int __init_memblock memblock_free_reserved_regions(void)
144 if (memblock.reserved.regions == memblock_reserved_init_regions)
147 return memblock_free(__pa(memblock.reserved.regions),
148 sizeof(struct memblock_region) * memblock.reserved.max);
152 * Reserve memblock.reserved.regions
154 int __init_memblock memblock_reserve_reserved_regions(void)
156 if (memblock.reserved.regions == memblock_reserved_init_regions)
159 return memblock_reserve(__pa(memblock.reserved.regions),
160 sizeof(struct memblock_region) * memblock.reserved.max);
163 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
165 memmove(&type->regions[r], &type->regions[r + 1],
166 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
169 /* Special case for empty arrays */
170 if (type->cnt == 0) {
172 type->regions[0].base = 0;
173 type->regions[0].size = 0;
174 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
178 /* Defined below but needed now */
179 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
181 static int __init_memblock memblock_double_array(struct memblock_type *type)
183 struct memblock_region *new_array, *old_array;
184 phys_addr_t old_size, new_size, addr;
185 int use_slab = slab_is_available();
187 /* We don't allow resizing until we know about the reserved regions
188 * of memory that aren't suitable for allocation
190 if (!memblock_can_resize)
193 /* Calculate new doubled size */
194 old_size = type->max * sizeof(struct memblock_region);
195 new_size = old_size << 1;
197 /* Try to find some space for it.
199 * WARNING: We assume that either slab_is_available() and we use it or
200 * we use MEMBLOCK for allocations. That means that this is unsafe to use
201 * when bootmem is currently active (unless bootmem itself is implemented
202 * on top of MEMBLOCK which isn't the case yet)
204 * This should however not be an issue for now, as we currently only
205 * call into MEMBLOCK while it's still active, or much later when slab is
206 * active for memory hotplug operations
209 new_array = kmalloc(new_size, GFP_KERNEL);
210 addr = new_array ? __pa(new_array) : 0;
212 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
214 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
215 memblock_type_name(type), type->max, type->max * 2);
218 new_array = __va(addr);
220 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
221 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
223 /* Found space, we now need to move the array over before
224 * we add the reserved region since it may be our reserved
225 * array itself that is full.
227 memcpy(new_array, type->regions, old_size);
228 memset(new_array + type->max, 0, old_size);
229 old_array = type->regions;
230 type->regions = new_array;
233 /* If we use SLAB that's it, we are done */
237 /* Add the new reserved region now. Should not fail ! */
238 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
240 /* If the array wasn't our static init one, then free it. We only do
241 * that before SLAB is available as later on, we don't know whether
242 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
245 if (old_array != memblock_memory_init_regions &&
246 old_array != memblock_reserved_init_regions)
247 memblock_free(__pa(old_array), old_size);
253 * memblock_merge_regions - merge neighboring compatible regions
254 * @type: memblock type to scan
256 * Scan @type and merge neighboring compatible regions.
258 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
262 /* cnt never goes below 1 */
263 while (i < type->cnt - 1) {
264 struct memblock_region *this = &type->regions[i];
265 struct memblock_region *next = &type->regions[i + 1];
267 if (this->base + this->size != next->base ||
268 memblock_get_region_node(this) !=
269 memblock_get_region_node(next)) {
270 BUG_ON(this->base + this->size > next->base);
275 this->size += next->size;
276 memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
282 * memblock_insert_region - insert new memblock region
283 * @type: memblock type to insert into
284 * @idx: index for the insertion point
285 * @base: base address of the new region
286 * @size: size of the new region
288 * Insert new memblock region [@base,@base+@size) into @type at @idx.
289 * @type must already have extra room to accomodate the new region.
291 static void __init_memblock memblock_insert_region(struct memblock_type *type,
292 int idx, phys_addr_t base,
293 phys_addr_t size, int nid)
295 struct memblock_region *rgn = &type->regions[idx];
297 BUG_ON(type->cnt >= type->max);
298 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
301 memblock_set_region_node(rgn, nid);
306 * memblock_add_region - add new memblock region
307 * @type: memblock type to add new region into
308 * @base: base address of the new region
309 * @size: size of the new region
311 * Add new memblock region [@base,@base+@size) into @type. The new region
312 * is allowed to overlap with existing ones - overlaps don't affect already
313 * existing regions. @type is guaranteed to be minimal (all neighbouring
314 * compatible regions are merged) after the addition.
317 * 0 on success, -errno on failure.
319 static long __init_memblock memblock_add_region(struct memblock_type *type,
320 phys_addr_t base, phys_addr_t size)
323 phys_addr_t obase = base, end = base + size;
326 /* special case for empty array */
327 if (type->regions[0].size == 0) {
328 WARN_ON(type->cnt != 1);
329 type->regions[0].base = base;
330 type->regions[0].size = size;
331 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
336 * The following is executed twice. Once with %false @insert and
337 * then with %true. The first counts the number of regions needed
338 * to accomodate the new area. The second actually inserts them.
343 for (i = 0; i < type->cnt; i++) {
344 struct memblock_region *rgn = &type->regions[i];
345 phys_addr_t rbase = rgn->base;
346 phys_addr_t rend = rbase + rgn->size;
353 * @rgn overlaps. If it separates the lower part of new
354 * area, insert that portion.
359 memblock_insert_region(type, i++, base,
360 rbase - base, MAX_NUMNODES);
362 /* area below @rend is dealt with, forget about it */
363 base = min(rend, end);
366 /* insert the remaining portion */
370 memblock_insert_region(type, i, base, end - base,
375 * If this was the first round, resize array and repeat for actual
376 * insertions; otherwise, merge and return.
379 while (type->cnt + nr_new > type->max)
380 if (memblock_double_array(type) < 0)
385 memblock_merge_regions(type);
390 long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
392 return memblock_add_region(&memblock.memory, base, size);
395 static long __init_memblock __memblock_remove(struct memblock_type *type,
396 phys_addr_t base, phys_addr_t size)
398 phys_addr_t end = base + size;
401 /* Walk through the array for collisions */
402 for (i = 0; i < type->cnt; i++) {
403 struct memblock_region *rgn = &type->regions[i];
404 phys_addr_t rend = rgn->base + rgn->size;
406 /* Nothing more to do, exit */
407 if (rgn->base > end || rgn->size == 0)
410 /* If we fully enclose the block, drop it */
411 if (base <= rgn->base && end >= rend) {
412 memblock_remove_region(type, i--);
416 /* If we are fully enclosed within a block
417 * then we need to split it and we are done
419 if (base > rgn->base && end < rend) {
420 rgn->size = base - rgn->base;
421 if (!memblock_add_region(type, end, rend - end))
423 /* Failure to split is bad, we at least
424 * restore the block before erroring
426 rgn->size = rend - rgn->base;
431 /* Check if we need to trim the bottom of a block */
432 if (rgn->base < end && rend > end) {
433 rgn->size -= end - rgn->base;
438 /* And check if we need to trim the top of a block */
440 rgn->size -= rend - base;
446 long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
448 return __memblock_remove(&memblock.memory, base, size);
451 long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
453 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
454 (unsigned long long)base,
455 (unsigned long long)base + size,
458 return __memblock_remove(&memblock.reserved, base, size);
461 long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
463 struct memblock_type *_rgn = &memblock.reserved;
465 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
466 (unsigned long long)base,
467 (unsigned long long)base + size,
471 return memblock_add_region(_rgn, base, size);
475 * __next_free_mem_range - next function for for_each_free_mem_range()
476 * @idx: pointer to u64 loop variable
477 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
478 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
479 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
480 * @p_nid: ptr to int for nid of the range, can be %NULL
482 * Find the first free area from *@idx which matches @nid, fill the out
483 * parameters, and update *@idx for the next iteration. The lower 32bit of
484 * *@idx contains index into memory region and the upper 32bit indexes the
485 * areas before each reserved region. For example, if reserved regions
486 * look like the following,
488 * 0:[0-16), 1:[32-48), 2:[128-130)
490 * The upper 32bit indexes the following regions.
492 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
494 * As both region arrays are sorted, the function advances the two indices
495 * in lockstep and returns each intersection.
497 void __init_memblock __next_free_mem_range(u64 *idx, int nid,
498 phys_addr_t *out_start,
499 phys_addr_t *out_end, int *out_nid)
501 struct memblock_type *mem = &memblock.memory;
502 struct memblock_type *rsv = &memblock.reserved;
503 int mi = *idx & 0xffffffff;
506 for ( ; mi < mem->cnt; mi++) {
507 struct memblock_region *m = &mem->regions[mi];
508 phys_addr_t m_start = m->base;
509 phys_addr_t m_end = m->base + m->size;
511 /* only memory regions are associated with nodes, check it */
512 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
515 /* scan areas before each reservation for intersection */
516 for ( ; ri < rsv->cnt + 1; ri++) {
517 struct memblock_region *r = &rsv->regions[ri];
518 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
519 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
521 /* if ri advanced past mi, break out to advance mi */
522 if (r_start >= m_end)
524 /* if the two regions intersect, we're done */
525 if (m_start < r_end) {
527 *out_start = max(m_start, r_start);
529 *out_end = min(m_end, r_end);
531 *out_nid = memblock_get_region_node(m);
533 * The region which ends first is advanced
534 * for the next iteration.
540 *idx = (u32)mi | (u64)ri << 32;
546 /* signal end of iteration */
550 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
552 * Common iterator interface used to define for_each_mem_range().
554 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
555 unsigned long *out_start_pfn,
556 unsigned long *out_end_pfn, int *out_nid)
558 struct memblock_type *type = &memblock.memory;
559 struct memblock_region *r;
561 while (++*idx < type->cnt) {
562 r = &type->regions[*idx];
564 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
566 if (nid == MAX_NUMNODES || nid == r->nid)
569 if (*idx >= type->cnt) {
575 *out_start_pfn = PFN_UP(r->base);
577 *out_end_pfn = PFN_DOWN(r->base + r->size);
583 * memblock_set_node - set node ID on memblock regions
584 * @base: base of area to set node ID for
585 * @size: size of area to set node ID for
586 * @nid: node ID to set
588 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
589 * Regions which cross the area boundaries are split as necessary.
592 * 0 on success, -errno on failure.
594 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
597 struct memblock_type *type = &memblock.memory;
598 phys_addr_t end = base + size;
601 /* we'll create at most two more regions */
602 while (type->cnt + 2 > type->max)
603 if (memblock_double_array(type) < 0)
606 for (i = 0; i < type->cnt; i++) {
607 struct memblock_region *rgn = &type->regions[i];
608 phys_addr_t rbase = rgn->base;
609 phys_addr_t rend = rbase + rgn->size;
618 * @rgn intersects from below. Split and continue
619 * to process the next region - the new top half.
622 rgn->size = rend - rgn->base;
623 memblock_insert_region(type, i, rbase, base - rbase,
625 } else if (rend > end) {
627 * @rgn intersects from above. Split and redo the
628 * current region - the new bottom half.
631 rgn->size = rend - rgn->base;
632 memblock_insert_region(type, i--, rbase, end - rbase,
635 /* @rgn is fully contained, set ->nid */
640 memblock_merge_regions(type);
643 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
645 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
649 /* We align the size to limit fragmentation. Without this, a lot of
650 * small allocs quickly eat up the whole reserve array on sparc
652 size = round_up(size, align);
654 found = memblock_find_in_range(0, max_addr, size, align);
655 if (found && !memblock_add_region(&memblock.reserved, found, size))
661 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
665 alloc = __memblock_alloc_base(size, align, max_addr);
668 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
669 (unsigned long long) size, (unsigned long long) max_addr);
674 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
676 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
681 * Additional node-local top-down allocators.
683 * WARNING: Only available after early_node_map[] has been populated,
684 * on some architectures, that is after all the calls to add_active_range()
685 * have been done to populate it.
688 static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
689 phys_addr_t end, int *nid)
691 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
692 unsigned long start_pfn, end_pfn;
695 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid)
696 if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn))
697 return max(start, PFN_PHYS(start_pfn));
703 phys_addr_t __init memblock_find_in_range_node(phys_addr_t start,
706 phys_addr_t align, int nid)
708 struct memblock_type *mem = &memblock.memory;
713 /* Pump up max_addr */
714 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
715 end = memblock.current_limit;
717 for (i = mem->cnt - 1; i >= 0; i--) {
718 struct memblock_region *r = &mem->regions[i];
719 phys_addr_t base = max(start, r->base);
720 phys_addr_t top = min(end, r->base + r->size);
723 phys_addr_t tbase, ret;
726 tbase = memblock_nid_range_rev(base, top, &tnid);
727 if (nid == MAX_NUMNODES || tnid == nid) {
728 ret = memblock_find_region(tbase, top, size, align);
739 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
744 * We align the size to limit fragmentation. Without this, a lot of
745 * small allocs quickly eat up the whole reserve array on sparc
747 size = round_up(size, align);
749 found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE,
751 if (found && !memblock_add_region(&memblock.reserved, found, size))
757 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
759 phys_addr_t res = memblock_alloc_nid(size, align, nid);
763 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
768 * Remaining API functions
771 /* You must call memblock_analyze() before this. */
772 phys_addr_t __init memblock_phys_mem_size(void)
774 return memblock.memory_size;
778 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
780 return memblock.memory.regions[0].base;
783 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
785 int idx = memblock.memory.cnt - 1;
787 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
790 /* You must call memblock_analyze() after this. */
791 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
795 struct memblock_region *p;
800 /* Truncate the memblock regions to satisfy the memory limit. */
801 limit = memory_limit;
802 for (i = 0; i < memblock.memory.cnt; i++) {
803 if (limit > memblock.memory.regions[i].size) {
804 limit -= memblock.memory.regions[i].size;
808 memblock.memory.regions[i].size = limit;
809 memblock.memory.cnt = i + 1;
813 memory_limit = memblock_end_of_DRAM();
815 /* And truncate any reserves above the limit also. */
816 for (i = 0; i < memblock.reserved.cnt; i++) {
817 p = &memblock.reserved.regions[i];
819 if (p->base > memory_limit)
821 else if ((p->base + p->size) > memory_limit)
822 p->size = memory_limit - p->base;
825 memblock_remove_region(&memblock.reserved, i);
831 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
833 unsigned int left = 0, right = type->cnt;
836 unsigned int mid = (right + left) / 2;
838 if (addr < type->regions[mid].base)
840 else if (addr >= (type->regions[mid].base +
841 type->regions[mid].size))
845 } while (left < right);
849 int __init memblock_is_reserved(phys_addr_t addr)
851 return memblock_search(&memblock.reserved, addr) != -1;
854 int __init_memblock memblock_is_memory(phys_addr_t addr)
856 return memblock_search(&memblock.memory, addr) != -1;
859 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
861 int idx = memblock_search(&memblock.memory, base);
865 return memblock.memory.regions[idx].base <= base &&
866 (memblock.memory.regions[idx].base +
867 memblock.memory.regions[idx].size) >= (base + size);
870 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
872 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
876 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
878 memblock.current_limit = limit;
881 static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
883 unsigned long long base, size;
886 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
888 for (i = 0; i < type->cnt; i++) {
889 struct memblock_region *rgn = &type->regions[i];
890 char nid_buf[32] = "";
894 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
895 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
896 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
897 memblock_get_region_node(rgn));
899 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
900 name, i, base, base + size - 1, size, nid_buf);
904 void __init_memblock memblock_dump_all(void)
909 pr_info("MEMBLOCK configuration:\n");
910 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
912 memblock_dump(&memblock.memory, "memory");
913 memblock_dump(&memblock.reserved, "reserved");
916 void __init memblock_analyze(void)
920 /* Check marker in the unused last array entry */
921 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
922 != MEMBLOCK_INACTIVE);
923 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
924 != MEMBLOCK_INACTIVE);
926 memblock.memory_size = 0;
928 for (i = 0; i < memblock.memory.cnt; i++)
929 memblock.memory_size += memblock.memory.regions[i].size;
931 /* We allow resizing from there */
932 memblock_can_resize = 1;
935 void __init memblock_init(void)
937 static int init_done __initdata = 0;
943 /* Hookup the initial arrays */
944 memblock.memory.regions = memblock_memory_init_regions;
945 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
946 memblock.reserved.regions = memblock_reserved_init_regions;
947 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
949 /* Write a marker in the unused last array entry */
950 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
951 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
953 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
954 * This simplifies the memblock_add() code below...
956 memblock.memory.regions[0].base = 0;
957 memblock.memory.regions[0].size = 0;
958 memblock_set_region_node(&memblock.memory.regions[0], MAX_NUMNODES);
959 memblock.memory.cnt = 1;
962 memblock.reserved.regions[0].base = 0;
963 memblock.reserved.regions[0].size = 0;
964 memblock_set_region_node(&memblock.reserved.regions[0], MAX_NUMNODES);
965 memblock.reserved.cnt = 1;
967 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
970 static int __init early_memblock(char *p)
972 if (p && strstr(p, "debug"))
976 early_param("memblock", early_memblock);
978 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
980 static int memblock_debug_show(struct seq_file *m, void *private)
982 struct memblock_type *type = m->private;
983 struct memblock_region *reg;
986 for (i = 0; i < type->cnt; i++) {
987 reg = &type->regions[i];
988 seq_printf(m, "%4d: ", i);
989 if (sizeof(phys_addr_t) == 4)
990 seq_printf(m, "0x%08lx..0x%08lx\n",
991 (unsigned long)reg->base,
992 (unsigned long)(reg->base + reg->size - 1));
994 seq_printf(m, "0x%016llx..0x%016llx\n",
995 (unsigned long long)reg->base,
996 (unsigned long long)(reg->base + reg->size - 1));
1002 static int memblock_debug_open(struct inode *inode, struct file *file)
1004 return single_open(file, memblock_debug_show, inode->i_private);
1007 static const struct file_operations memblock_debug_fops = {
1008 .open = memblock_debug_open,
1010 .llseek = seq_lseek,
1011 .release = single_release,
1014 static int __init memblock_init_debugfs(void)
1016 struct dentry *root = debugfs_create_dir("memblock", NULL);
1019 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1020 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1024 __initcall(memblock_init_debugfs);
1026 #endif /* CONFIG_DEBUG_FS */