]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/nobootmem.c
Merge branch 'master' into x86/memblock
[karo-tx-linux.git] / mm / nobootmem.c
index 7fa41b4a07bf2c129d57d2df5be92b434252ca7e..24f0fc1a56d60ebbbacf1950ac0ebec7067dbf41 100644 (file)
@@ -41,14 +41,13 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
        if (limit > memblock.current_limit)
                limit = memblock.current_limit;
 
-       addr = find_memory_core_early(nid, size, align, goal, limit);
-
-       if (addr == MEMBLOCK_ERROR)
+       addr = memblock_find_in_range_node(goal, limit, size, align, nid);
+       if (!addr)
                return NULL;
 
        ptr = phys_to_virt(addr);
        memset(ptr, 0, size);
-       memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
+       memblock_reserve(addr, size);
        /*
         * The min_count is set to 0 so that bootmem allocated blocks
         * are never reported as leaks.
@@ -107,23 +106,27 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
                __free_pages_bootmem(pfn_to_page(i), 0);
 }
 
-unsigned long __init free_all_memory_core_early(int nodeid)
+unsigned long __init free_low_memory_core_early(int nodeid)
 {
-       int i;
-       u64 start, end;
        unsigned long count = 0;
-       struct range *range = NULL;
-       int nr_range;
-
-       nr_range = get_free_all_memory_range(&range, nodeid);
-
-       for (i = 0; i < nr_range; i++) {
-               start = range[i].start;
-               end = range[i].end;
-               count += end - start;
-               __free_pages_memory(start, end);
+       phys_addr_t start, end;
+       u64 i;
+
+       /* free reserved array temporarily so that it's treated as free area */
+       memblock_free_reserved_regions();
+
+       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+               unsigned long start_pfn = PFN_UP(start);
+               unsigned long end_pfn = min_t(unsigned long,
+                                             PFN_DOWN(end), max_low_pfn);
+               if (start_pfn < end_pfn) {
+                       __free_pages_memory(start_pfn, end_pfn);
+                       count += end_pfn - start_pfn;
+               }
        }
 
+       /* put region array back? */
+       memblock_reserve_reserved_regions();
        return count;
 }
 
@@ -137,7 +140,7 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 {
        register_page_bootmem_info_node(pgdat);
 
-       /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+       /* free_low_memory_core_early(MAX_NUMNODES) will be called later */
        return 0;
 }
 
@@ -155,7 +158,7 @@ unsigned long __init free_all_bootmem(void)
         * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
         *  will be used instead of only Node0 related
         */
-       return free_all_memory_core_early(MAX_NUMNODES);
+       return free_low_memory_core_early(MAX_NUMNODES);
 }
 
 /**
@@ -172,7 +175,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
                              unsigned long size)
 {
        kmemleak_free_part(__va(physaddr), size);
-       memblock_x86_free_range(physaddr, physaddr + size);
+       memblock_free(physaddr, size);
 }
 
 /**
@@ -187,7 +190,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 void __init free_bootmem(unsigned long addr, unsigned long size)
 {
        kmemleak_free_part(__va(addr), size);
-       memblock_x86_free_range(addr, addr + size);
+       memblock_free(addr, size);
 }
 
 static void * __init ___alloc_bootmem_nopanic(unsigned long size,