4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <asm/sparsemem.h>
22 #include <asm/system.h>
25 static int numa_enabled = 1;
27 static int numa_debug;
28 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30 int numa_cpu_lookup_table[NR_CPUS];
31 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
32 struct pglist_data *node_data[MAX_NUMNODES];
34 EXPORT_SYMBOL(numa_cpu_lookup_table);
35 EXPORT_SYMBOL(numa_cpumask_lookup_table);
36 EXPORT_SYMBOL(node_data);
38 static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
39 static int min_common_depth;
40 static int n_mem_addr_cells, n_mem_size_cells;
43 * We need somewhere to store start/end/node for each region until we have
44 * allocated the real node_data structures.
46 #define MAX_REGIONS (MAX_LMB_REGIONS*2)
48 unsigned long start_pfn;
49 unsigned long end_pfn;
51 } init_node_data[MAX_REGIONS] __initdata;
53 int __init early_pfn_to_nid(unsigned long pfn)
57 for (i = 0; init_node_data[i].end_pfn; i++) {
58 unsigned long start_pfn = init_node_data[i].start_pfn;
59 unsigned long end_pfn = init_node_data[i].end_pfn;
61 if ((start_pfn <= pfn) && (pfn < end_pfn))
62 return init_node_data[i].nid;
68 void __init add_region(unsigned int nid, unsigned long start_pfn,
73 dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n",
74 nid, start_pfn, pages);
76 for (i = 0; init_node_data[i].end_pfn; i++) {
77 if (init_node_data[i].nid != nid)
79 if (init_node_data[i].end_pfn == start_pfn) {
80 init_node_data[i].end_pfn += pages;
83 if (init_node_data[i].start_pfn == (start_pfn + pages)) {
84 init_node_data[i].start_pfn -= pages;
90 * Leave last entry NULL so we dont iterate off the end (we use
91 * entry.end_pfn to terminate the walk).
93 if (i >= (MAX_REGIONS - 1)) {
94 printk(KERN_ERR "WARNING: too many memory regions in "
95 "numa code, truncating\n");
99 init_node_data[i].start_pfn = start_pfn;
100 init_node_data[i].end_pfn = start_pfn + pages;
101 init_node_data[i].nid = nid;
104 /* We assume init_node_data has no overlapping regions */
105 void __init get_region(unsigned int nid, unsigned long *start_pfn,
106 unsigned long *end_pfn, unsigned long *pages_present)
111 *end_pfn = *pages_present = 0;
113 for (i = 0; init_node_data[i].end_pfn; i++) {
114 if (init_node_data[i].nid != nid)
117 *pages_present += init_node_data[i].end_pfn -
118 init_node_data[i].start_pfn;
120 if (init_node_data[i].start_pfn < *start_pfn)
121 *start_pfn = init_node_data[i].start_pfn;
123 if (init_node_data[i].end_pfn > *end_pfn)
124 *end_pfn = init_node_data[i].end_pfn;
127 /* We didnt find a matching region, return start/end as 0 */
128 if (*start_pfn == -1UL)
132 static void __cpuinit map_cpu_to_node(int cpu, int node)
134 numa_cpu_lookup_table[cpu] = node;
136 dbg("adding cpu %d to node %d\n", cpu, node);
138 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
139 cpu_set(cpu, numa_cpumask_lookup_table[node]);
142 #ifdef CONFIG_HOTPLUG_CPU
143 static void unmap_cpu_from_node(unsigned long cpu)
145 int node = numa_cpu_lookup_table[cpu];
147 dbg("removing cpu %lu from node %d\n", cpu, node);
149 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
150 cpu_clear(cpu, numa_cpumask_lookup_table[node]);
152 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
156 #endif /* CONFIG_HOTPLUG_CPU */
158 static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
160 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
161 struct device_node *cpu_node = NULL;
162 unsigned int *interrupt_server, *reg;
165 while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
166 /* Try interrupt server first */
167 interrupt_server = (unsigned int *)get_property(cpu_node,
168 "ibm,ppc-interrupt-server#s", &len);
170 len = len / sizeof(u32);
172 if (interrupt_server && (len > 0)) {
174 if (interrupt_server[len] == hw_cpuid)
178 reg = (unsigned int *)get_property(cpu_node,
180 if (reg && (len > 0) && (reg[0] == hw_cpuid))
188 /* must hold reference to node during call */
189 static int *of_get_associativity(struct device_node *dev)
191 return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
194 static int of_node_to_nid(struct device_node *device)
199 if (min_common_depth == -1)
202 tmp = of_get_associativity(device);
203 if (tmp && (tmp[0] >= min_common_depth)) {
204 nid = tmp[min_common_depth];
206 dbg("WARNING: no NUMA information for %s\n",
211 /* POWER4 LPAR uses 0xffff as invalid node */
219 * In theory, the "ibm,associativity" property may contain multiple
220 * associativity lists because a resource may be multiply connected
221 * into the machine. This resource then has different associativity
222 * characteristics relative to its multiple connections. We ignore
223 * this for now. We also assume that all cpu and memory sets have
224 * their distances represented at a common level. This won't be
225 * true for heirarchical NUMA.
227 * In any case the ibm,associativity-reference-points should give
228 * the correct depth for a normal NUMA system.
230 * - Dave Hansen <haveblue@us.ibm.com>
232 static int __init find_min_common_depth(void)
235 unsigned int *ref_points;
236 struct device_node *rtas_root;
239 rtas_root = of_find_node_by_path("/rtas");
245 * this property is 2 32-bit integers, each representing a level of
246 * depth in the associativity nodes. The first is for an SMP
247 * configuration (should be all 0's) and the second is for a normal
248 * NUMA configuration.
250 ref_points = (unsigned int *)get_property(rtas_root,
251 "ibm,associativity-reference-points", &len);
253 if ((len >= 1) && ref_points) {
254 depth = ref_points[1];
256 dbg("NUMA: ibm,associativity-reference-points not found.\n");
259 of_node_put(rtas_root);
264 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
266 struct device_node *memory = NULL;
268 memory = of_find_node_by_type(memory, "memory");
270 panic("numa.c: No memory nodes found!");
272 *n_addr_cells = prom_n_addr_cells(memory);
273 *n_size_cells = prom_n_size_cells(memory);
277 static unsigned long __devinit read_n_cells(int n, unsigned int **buf)
279 unsigned long result = 0;
282 result = (result << 32) | **buf;
289 * Figure out to which domain a cpu belongs and stick it there.
290 * Return the id of the domain used.
292 static int __cpuinit numa_setup_cpu(unsigned long lcpu)
295 struct device_node *cpu = find_cpu_node(lcpu);
302 nid = of_node_to_nid(cpu);
304 if (nid >= num_online_nodes()) {
305 printk(KERN_ERR "WARNING: cpu %ld "
306 "maps to invalid NUMA node %d\n",
311 node_set_online(nid);
313 map_cpu_to_node(lcpu, nid);
320 static int cpu_numa_callback(struct notifier_block *nfb,
321 unsigned long action,
324 unsigned long lcpu = (unsigned long)hcpu;
325 int ret = NOTIFY_DONE;
329 if (min_common_depth == -1 || !numa_enabled)
330 map_cpu_to_node(lcpu, 0);
332 numa_setup_cpu(lcpu);
335 #ifdef CONFIG_HOTPLUG_CPU
337 case CPU_UP_CANCELED:
338 unmap_cpu_from_node(lcpu);
347 * Check and possibly modify a memory region to enforce the memory limit.
349 * Returns the size the region should have to enforce the memory limit.
350 * This will either be the original value of size, a truncated value,
351 * or zero. If the returned value of size is 0 the region should be
352 * discarded as it lies wholy above the memory limit.
354 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
358 * We use lmb_end_of_DRAM() in here instead of memory_limit because
359 * we've already adjusted it for the limit and it takes care of
360 * having memory holes below the limit.
366 if (start + size <= lmb_end_of_DRAM())
369 if (start >= lmb_end_of_DRAM())
372 return lmb_end_of_DRAM() - start;
375 static int __init parse_numa_properties(void)
377 struct device_node *cpu = NULL;
378 struct device_node *memory = NULL;
382 if (numa_enabled == 0) {
383 printk(KERN_WARNING "NUMA disabled by user\n");
387 min_common_depth = find_min_common_depth();
389 if (min_common_depth < 0)
390 return min_common_depth;
392 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
395 * Even though we connect cpus to numa domains later in SMP init,
396 * we need to know the maximum node id now. This is because each
397 * node id must have NODE_DATA etc backing it.
398 * As a result of hotplug we could still have cpus appear later on
399 * with larger node ids. In that case we force the cpu into node 0.
404 cpu = find_cpu_node(i);
407 nid = of_node_to_nid(cpu);
410 if (nid < MAX_NUMNODES &&
416 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
418 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
423 unsigned int *memcell_buf;
426 memcell_buf = (unsigned int *)get_property(memory,
427 "linux,usable-memory", &len);
428 if (!memcell_buf || len <= 0)
430 (unsigned int *)get_property(memory, "reg",
432 if (!memcell_buf || len <= 0)
436 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
438 /* these are order-sensitive, and modify the buffer pointer */
439 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
440 size = read_n_cells(n_mem_size_cells, &memcell_buf);
442 nid = of_node_to_nid(memory);
444 if (nid >= MAX_NUMNODES) {
445 printk(KERN_ERR "WARNING: memory at %lx maps "
446 "to invalid NUMA node %d\n", start,
451 if (max_domain < nid)
454 if (!(size = numa_enforce_memory_limit(start, size))) {
461 add_region(nid, start >> PAGE_SHIFT,
468 for (i = 0; i <= max_domain; i++)
471 max_domain = numa_setup_cpu(boot_cpuid);
476 static void __init setup_nonnuma(void)
478 unsigned long top_of_ram = lmb_end_of_DRAM();
479 unsigned long total_ram = lmb_phys_mem_size();
482 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
483 top_of_ram, total_ram);
484 printk(KERN_INFO "Memory hole size: %ldMB\n",
485 (top_of_ram - total_ram) >> 20);
487 map_cpu_to_node(boot_cpuid, 0);
488 for (i = 0; i < lmb.memory.cnt; ++i)
489 add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT,
490 lmb_size_pages(&lmb.memory, i));
494 void __init dump_numa_cpu_topology(void)
497 unsigned int cpu, count;
499 if (min_common_depth == -1 || !numa_enabled)
502 for_each_online_node(node) {
503 printk(KERN_INFO "Node %d CPUs:", node);
507 * If we used a CPU iterator here we would miss printing
508 * the holes in the cpumap.
510 for (cpu = 0; cpu < NR_CPUS; cpu++) {
511 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
517 printk("-%u", cpu - 1);
523 printk("-%u", NR_CPUS - 1);
528 static void __init dump_numa_memory_topology(void)
533 if (min_common_depth == -1 || !numa_enabled)
536 for_each_online_node(node) {
539 printk(KERN_INFO "Node %d Memory:", node);
543 for (i = 0; i < lmb_end_of_DRAM();
544 i += (1 << SECTION_SIZE_BITS)) {
545 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
563 * Allocate some memory, satisfying the lmb or bootmem allocator where
564 * required. nid is the preferred node and end is the physical address of
565 * the highest address in the node.
567 * Returns the physical address of the memory.
569 static void __init *careful_allocation(int nid, unsigned long size,
571 unsigned long end_pfn)
574 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
576 /* retry over all memory */
578 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
581 panic("numa.c: cannot allocate %lu bytes on node %d",
585 * If the memory came from a previously allocated node, we must
586 * retry with the bootmem allocator.
588 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
590 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
594 panic("numa.c: cannot allocate %lu bytes on node %d",
599 dbg("alloc_bootmem %lx %lx\n", ret, size);
605 void __init do_init_bootmem(void)
609 static struct notifier_block ppc64_numa_nb = {
610 .notifier_call = cpu_numa_callback,
611 .priority = 1 /* Must run before sched domains notifier. */
615 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
616 max_pfn = max_low_pfn;
618 if (parse_numa_properties())
621 dump_numa_memory_topology();
623 register_cpu_notifier(&ppc64_numa_nb);
625 for_each_online_node(nid) {
626 unsigned long start_pfn, end_pfn, pages_present;
627 unsigned long bootmem_paddr;
628 unsigned long bootmap_pages;
630 get_region(nid, &start_pfn, &end_pfn, &pages_present);
632 /* Allocate the node structure node local if possible */
633 NODE_DATA(nid) = careful_allocation(nid,
634 sizeof(struct pglist_data),
635 SMP_CACHE_BYTES, end_pfn);
636 NODE_DATA(nid) = __va(NODE_DATA(nid));
637 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
639 dbg("node %d\n", nid);
640 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
642 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
643 NODE_DATA(nid)->node_start_pfn = start_pfn;
644 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
646 if (NODE_DATA(nid)->node_spanned_pages == 0)
649 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
650 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
652 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
653 bootmem_paddr = (unsigned long)careful_allocation(nid,
654 bootmap_pages << PAGE_SHIFT,
656 memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
658 dbg("bootmap_paddr = %lx\n", bootmem_paddr);
660 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
663 /* Add free regions on this node */
664 for (i = 0; init_node_data[i].end_pfn; i++) {
665 unsigned long start, end;
667 if (init_node_data[i].nid != nid)
670 start = init_node_data[i].start_pfn << PAGE_SHIFT;
671 end = init_node_data[i].end_pfn << PAGE_SHIFT;
673 dbg("free_bootmem %lx %lx\n", start, end - start);
674 free_bootmem_node(NODE_DATA(nid), start, end - start);
677 /* Mark reserved regions on this node */
678 for (i = 0; i < lmb.reserved.cnt; i++) {
679 unsigned long physbase = lmb.reserved.region[i].base;
680 unsigned long size = lmb.reserved.region[i].size;
681 unsigned long start_paddr = start_pfn << PAGE_SHIFT;
682 unsigned long end_paddr = end_pfn << PAGE_SHIFT;
684 if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&
685 early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)
688 if (physbase < end_paddr &&
689 (physbase+size) > start_paddr) {
691 if (physbase < start_paddr) {
692 size -= start_paddr - physbase;
693 physbase = start_paddr;
696 if (size > end_paddr - physbase)
697 size = end_paddr - physbase;
699 dbg("reserve_bootmem %lx %lx\n", physbase,
701 reserve_bootmem_node(NODE_DATA(nid), physbase,
706 /* Add regions into sparsemem */
707 for (i = 0; init_node_data[i].end_pfn; i++) {
708 unsigned long start, end;
710 if (init_node_data[i].nid != nid)
713 start = init_node_data[i].start_pfn;
714 end = init_node_data[i].end_pfn;
716 memory_present(nid, start, end);
721 void __init paging_init(void)
723 unsigned long zones_size[MAX_NR_ZONES];
724 unsigned long zholes_size[MAX_NR_ZONES];
727 memset(zones_size, 0, sizeof(zones_size));
728 memset(zholes_size, 0, sizeof(zholes_size));
730 for_each_online_node(nid) {
731 unsigned long start_pfn, end_pfn, pages_present;
733 get_region(nid, &start_pfn, &end_pfn, &pages_present);
735 zones_size[ZONE_DMA] = end_pfn - start_pfn;
736 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - pages_present;
738 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,
739 zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);
741 free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn,
746 static int __init early_numa(char *p)
751 if (strstr(p, "off"))
754 if (strstr(p, "debug"))
759 early_param("numa", early_numa);
761 #ifdef CONFIG_MEMORY_HOTPLUG
763 * Find the node associated with a hot added memory section. Section
764 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
765 * sections are fully contained within a single LMB.
767 int hot_add_scn_to_nid(unsigned long scn_addr)
769 struct device_node *memory = NULL;
773 if (!numa_enabled || (min_common_depth < 0))
776 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
777 unsigned long start, size;
779 unsigned int *memcell_buf;
782 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
783 if (!memcell_buf || len <= 0)
787 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
789 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
790 size = read_n_cells(n_mem_size_cells, &memcell_buf);
791 nid = of_node_to_nid(memory);
793 /* Domains not present at boot default to 0 */
794 if (!node_online(nid))
795 nid = any_online_node(NODE_MASK_ALL);
797 if ((scn_addr >= start) && (scn_addr < (start + size))) {
802 if (--ranges) /* process all ranges in cell */
805 BUG(); /* section address should be found above */
807 /* Temporary code to ensure that returned node is not empty */
810 while (NODE_DATA(nid)->node_spanned_pages == 0) {
811 node_clear(nid, nodes);
812 nid = any_online_node(nodes);
816 #endif /* CONFIG_MEMORY_HOTPLUG */