4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <asm/sparsemem.h>
32 #include <asm/firmware.h>
34 #include <asm/hvcall.h>
35 #include <asm/setup.h>
38 static int numa_enabled = 1;
40 static char *cmdline __initdata;
42 static int numa_debug;
43 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
45 int numa_cpu_lookup_table[NR_CPUS];
46 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
47 struct pglist_data *node_data[MAX_NUMNODES];
49 EXPORT_SYMBOL(numa_cpu_lookup_table);
50 EXPORT_SYMBOL(node_to_cpumask_map);
51 EXPORT_SYMBOL(node_data);
53 static int min_common_depth;
54 static int n_mem_addr_cells, n_mem_size_cells;
55 static int form1_affinity;
57 #define MAX_DISTANCE_REF_POINTS 4
58 static int distance_ref_points_depth;
59 static const unsigned int *distance_ref_points;
60 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
63 * Allocate node_to_cpumask_map based on number of available nodes
64 * Requires node_possible_map to be valid.
66 * Note: cpumask_of_node() is not valid until after this is done.
68 static void __init setup_node_to_cpumask_map(void)
70 unsigned int node, num = 0;
72 /* setup nr_node_ids if not done yet */
73 if (nr_node_ids == MAX_NUMNODES) {
74 for_each_node_mask(node, node_possible_map)
76 nr_node_ids = num + 1;
79 /* allocate the map */
80 for (node = 0; node < nr_node_ids; node++)
81 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
83 /* cpumask_of_node() will now work */
84 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
87 static int __init fake_numa_create_new_node(unsigned long end_pfn,
90 unsigned long long mem;
92 static unsigned int fake_nid;
93 static unsigned long long curr_boundary;
96 * Modify node id, iff we started creating NUMA nodes
97 * We want to continue from where we left of the last time
102 * In case there are no more arguments to parse, the
103 * node_id should be the same as the last fake node id
104 * (we've handled this above).
109 mem = memparse(p, &p);
113 if (mem < curr_boundary)
118 if ((end_pfn << PAGE_SHIFT) > mem) {
120 * Skip commas and spaces
122 while (*p == ',' || *p == ' ' || *p == '\t')
128 dbg("created new fake_node with id %d\n", fake_nid);
135 * get_node_active_region - Return active region containing pfn
136 * Active range returned is empty if none found.
137 * @pfn: The page to return the region for
138 * @node_ar: Returned set to the active region containing @pfn
140 static void __init get_node_active_region(unsigned long pfn,
141 struct node_active_region *node_ar)
143 unsigned long start_pfn, end_pfn;
146 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
147 if (pfn >= start_pfn && pfn < end_pfn) {
149 node_ar->start_pfn = start_pfn;
150 node_ar->end_pfn = end_pfn;
156 static void map_cpu_to_node(int cpu, int node)
158 numa_cpu_lookup_table[cpu] = node;
160 dbg("adding cpu %d to node %d\n", cpu, node);
162 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
163 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
166 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
167 static void unmap_cpu_from_node(unsigned long cpu)
169 int node = numa_cpu_lookup_table[cpu];
171 dbg("removing cpu %lu from node %d\n", cpu, node);
173 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
174 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
176 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
180 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
182 /* must hold reference to node during call */
183 static const int *of_get_associativity(struct device_node *dev)
185 return of_get_property(dev, "ibm,associativity", NULL);
189 * Returns the property linux,drconf-usable-memory if
190 * it exists (the property exists only in kexec/kdump kernels,
191 * added by kexec-tools)
193 static const u32 *of_get_usable_memory(struct device_node *memory)
197 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
198 if (!prop || len < sizeof(unsigned int))
203 int __node_distance(int a, int b)
206 int distance = LOCAL_DISTANCE;
209 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
211 for (i = 0; i < distance_ref_points_depth; i++) {
212 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
215 /* Double the distance for each NUMA level */
222 static void initialize_distance_lookup_table(int nid,
223 const unsigned int *associativity)
230 for (i = 0; i < distance_ref_points_depth; i++) {
231 distance_lookup_table[nid][i] =
232 associativity[distance_ref_points[i]];
236 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
239 static int associativity_to_nid(const unsigned int *associativity)
243 if (min_common_depth == -1)
246 if (associativity[0] >= min_common_depth)
247 nid = associativity[min_common_depth];
249 /* POWER4 LPAR uses 0xffff as invalid node */
250 if (nid == 0xffff || nid >= MAX_NUMNODES)
253 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
254 initialize_distance_lookup_table(nid, associativity);
260 /* Returns the nid associated with the given device tree node,
261 * or -1 if not found.
263 static int of_node_to_nid_single(struct device_node *device)
266 const unsigned int *tmp;
268 tmp = of_get_associativity(device);
270 nid = associativity_to_nid(tmp);
274 /* Walk the device tree upwards, looking for an associativity id */
275 int of_node_to_nid(struct device_node *device)
277 struct device_node *tmp;
282 nid = of_node_to_nid_single(device);
287 device = of_get_parent(tmp);
294 EXPORT_SYMBOL_GPL(of_node_to_nid);
296 static int __init find_min_common_depth(void)
299 struct device_node *root;
301 if (firmware_has_feature(FW_FEATURE_OPAL))
302 root = of_find_node_by_path("/ibm,opal");
304 root = of_find_node_by_path("/rtas");
306 root = of_find_node_by_path("/");
309 * This property is a set of 32-bit integers, each representing
310 * an index into the ibm,associativity nodes.
312 * With form 0 affinity the first integer is for an SMP configuration
313 * (should be all 0's) and the second is for a normal NUMA
314 * configuration. We have only one level of NUMA.
316 * With form 1 affinity the first integer is the most significant
317 * NUMA boundary and the following are progressively less significant
318 * boundaries. There can be more than one level of NUMA.
320 distance_ref_points = of_get_property(root,
321 "ibm,associativity-reference-points",
322 &distance_ref_points_depth);
324 if (!distance_ref_points) {
325 dbg("NUMA: ibm,associativity-reference-points not found.\n");
329 distance_ref_points_depth /= sizeof(int);
331 if (firmware_has_feature(FW_FEATURE_OPAL) ||
332 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
333 dbg("Using form 1 affinity\n");
337 if (form1_affinity) {
338 depth = distance_ref_points[0];
340 if (distance_ref_points_depth < 2) {
341 printk(KERN_WARNING "NUMA: "
342 "short ibm,associativity-reference-points\n");
346 depth = distance_ref_points[1];
350 * Warn and cap if the hardware supports more than
351 * MAX_DISTANCE_REF_POINTS domains.
353 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
354 printk(KERN_WARNING "NUMA: distance array capped at "
355 "%d entries\n", MAX_DISTANCE_REF_POINTS);
356 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
367 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
369 struct device_node *memory = NULL;
371 memory = of_find_node_by_type(memory, "memory");
373 panic("numa.c: No memory nodes found!");
375 *n_addr_cells = of_n_addr_cells(memory);
376 *n_size_cells = of_n_size_cells(memory);
380 static unsigned long read_n_cells(int n, const unsigned int **buf)
382 unsigned long result = 0;
385 result = (result << 32) | **buf;
392 * Read the next memblock list entry from the ibm,dynamic-memory property
393 * and return the information in the provided of_drconf_cell structure.
395 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
399 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
402 drmem->drc_index = cp[0];
403 drmem->reserved = cp[1];
404 drmem->aa_index = cp[2];
405 drmem->flags = cp[3];
411 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
413 * The layout of the ibm,dynamic-memory property is a number N of memblock
414 * list entries followed by N memblock list entries. Each memblock list entry
415 * contains information as laid out in the of_drconf_cell struct above.
417 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
422 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
423 if (!prop || len < sizeof(unsigned int))
428 /* Now that we know the number of entries, revalidate the size
429 * of the property read in to ensure we have everything
431 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
439 * Retrieve and validate the ibm,lmb-size property for drconf memory
440 * from the device tree.
442 static u64 of_get_lmb_size(struct device_node *memory)
447 prop = of_get_property(memory, "ibm,lmb-size", &len);
448 if (!prop || len < sizeof(unsigned int))
451 return read_n_cells(n_mem_size_cells, &prop);
454 struct assoc_arrays {
461 * Retrieve and validate the list of associativity arrays for drconf
462 * memory from the ibm,associativity-lookup-arrays property of the
465 * The layout of the ibm,associativity-lookup-arrays property is a number N
466 * indicating the number of associativity arrays, followed by a number M
467 * indicating the size of each associativity array, followed by a list
468 * of N associativity arrays.
470 static int of_get_assoc_arrays(struct device_node *memory,
471 struct assoc_arrays *aa)
476 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
477 if (!prop || len < 2 * sizeof(unsigned int))
480 aa->n_arrays = *prop++;
481 aa->array_sz = *prop++;
483 /* Now that we know the number of arrays and size of each array,
484 * revalidate the size of the property read in.
486 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
494 * This is like of_node_to_nid_single() for memory represented in the
495 * ibm,dynamic-reconfiguration-memory node.
497 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
498 struct assoc_arrays *aa)
501 int nid = default_nid;
504 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
505 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
506 drmem->aa_index < aa->n_arrays) {
507 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
508 nid = aa->arrays[index];
510 if (nid == 0xffff || nid >= MAX_NUMNODES)
518 * Figure out to which domain a cpu belongs and stick it there.
519 * Return the id of the domain used.
521 static int __cpuinit numa_setup_cpu(unsigned long lcpu)
524 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
531 nid = of_node_to_nid_single(cpu);
533 if (nid < 0 || !node_online(nid))
534 nid = first_online_node;
536 map_cpu_to_node(lcpu, nid);
543 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
544 unsigned long action,
547 unsigned long lcpu = (unsigned long)hcpu;
548 int ret = NOTIFY_DONE;
552 case CPU_UP_PREPARE_FROZEN:
553 numa_setup_cpu(lcpu);
556 #ifdef CONFIG_HOTPLUG_CPU
558 case CPU_DEAD_FROZEN:
559 case CPU_UP_CANCELED:
560 case CPU_UP_CANCELED_FROZEN:
561 unmap_cpu_from_node(lcpu);
570 * Check and possibly modify a memory region to enforce the memory limit.
572 * Returns the size the region should have to enforce the memory limit.
573 * This will either be the original value of size, a truncated value,
574 * or zero. If the returned value of size is 0 the region should be
575 * discarded as it lies wholly above the memory limit.
577 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
581 * We use memblock_end_of_DRAM() in here instead of memory_limit because
582 * we've already adjusted it for the limit and it takes care of
583 * having memory holes below the limit. Also, in the case of
584 * iommu_is_off, memory_limit is not set but is implicitly enforced.
587 if (start + size <= memblock_end_of_DRAM())
590 if (start >= memblock_end_of_DRAM())
593 return memblock_end_of_DRAM() - start;
597 * Reads the counter for a given entry in
598 * linux,drconf-usable-memory property
600 static inline int __init read_usm_ranges(const u32 **usm)
603 * For each lmb in ibm,dynamic-memory a corresponding
604 * entry in linux,drconf-usable-memory property contains
605 * a counter followed by that many (base, size) duple.
606 * read the counter from linux,drconf-usable-memory
608 return read_n_cells(n_mem_size_cells, usm);
612 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
613 * node. This assumes n_mem_{addr,size}_cells have been set.
615 static void __init parse_drconf_memory(struct device_node *memory)
617 const u32 *uninitialized_var(dm), *usm;
618 unsigned int n, rc, ranges, is_kexec_kdump = 0;
619 unsigned long lmb_size, base, size, sz;
621 struct assoc_arrays aa = { .arrays = NULL };
623 n = of_get_drconf_memory(memory, &dm);
627 lmb_size = of_get_lmb_size(memory);
631 rc = of_get_assoc_arrays(memory, &aa);
635 /* check if this is a kexec/kdump kernel */
636 usm = of_get_usable_memory(memory);
640 for (; n != 0; --n) {
641 struct of_drconf_cell drmem;
643 read_drconf_cell(&drmem, &dm);
645 /* skip this block if the reserved bit is set in flags (0x80)
646 or if the block is not assigned to this partition (0x8) */
647 if ((drmem.flags & DRCONF_MEM_RESERVED)
648 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
651 base = drmem.base_addr;
655 if (is_kexec_kdump) {
656 ranges = read_usm_ranges(&usm);
657 if (!ranges) /* there are no (base, size) duple */
661 if (is_kexec_kdump) {
662 base = read_n_cells(n_mem_addr_cells, &usm);
663 size = read_n_cells(n_mem_size_cells, &usm);
665 nid = of_drconf_to_nid_single(&drmem, &aa);
666 fake_numa_create_new_node(
667 ((base + size) >> PAGE_SHIFT),
669 node_set_online(nid);
670 sz = numa_enforce_memory_limit(base, size);
672 memblock_set_node(base, sz, nid);
677 static int __init parse_numa_properties(void)
679 struct device_node *memory;
683 if (numa_enabled == 0) {
684 printk(KERN_WARNING "NUMA disabled by user\n");
688 min_common_depth = find_min_common_depth();
690 if (min_common_depth < 0)
691 return min_common_depth;
693 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
696 * Even though we connect cpus to numa domains later in SMP
697 * init, we need to know the node ids now. This is because
698 * each node to be onlined must have NODE_DATA etc backing it.
700 for_each_present_cpu(i) {
701 struct device_node *cpu;
704 cpu = of_get_cpu_node(i, NULL);
706 nid = of_node_to_nid_single(cpu);
710 * Don't fall back to default_nid yet -- we will plug
711 * cpus into nodes once the memory scan has discovered
716 node_set_online(nid);
719 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
721 for_each_node_by_type(memory, "memory") {
726 const unsigned int *memcell_buf;
729 memcell_buf = of_get_property(memory,
730 "linux,usable-memory", &len);
731 if (!memcell_buf || len <= 0)
732 memcell_buf = of_get_property(memory, "reg", &len);
733 if (!memcell_buf || len <= 0)
737 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
739 /* these are order-sensitive, and modify the buffer pointer */
740 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
741 size = read_n_cells(n_mem_size_cells, &memcell_buf);
744 * Assumption: either all memory nodes or none will
745 * have associativity properties. If none, then
746 * everything goes to default_nid.
748 nid = of_node_to_nid_single(memory);
752 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
753 node_set_online(nid);
755 if (!(size = numa_enforce_memory_limit(start, size))) {
762 memblock_set_node(start, size, nid);
769 * Now do the same thing for each MEMBLOCK listed in the
770 * ibm,dynamic-memory property in the
771 * ibm,dynamic-reconfiguration-memory node.
773 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
775 parse_drconf_memory(memory);
780 static void __init setup_nonnuma(void)
782 unsigned long top_of_ram = memblock_end_of_DRAM();
783 unsigned long total_ram = memblock_phys_mem_size();
784 unsigned long start_pfn, end_pfn;
785 unsigned int nid = 0;
786 struct memblock_region *reg;
788 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
789 top_of_ram, total_ram);
790 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
791 (top_of_ram - total_ram) >> 20);
793 for_each_memblock(memory, reg) {
794 start_pfn = memblock_region_memory_base_pfn(reg);
795 end_pfn = memblock_region_memory_end_pfn(reg);
797 fake_numa_create_new_node(end_pfn, &nid);
798 memblock_set_node(PFN_PHYS(start_pfn),
799 PFN_PHYS(end_pfn - start_pfn), nid);
800 node_set_online(nid);
804 void __init dump_numa_cpu_topology(void)
807 unsigned int cpu, count;
809 if (min_common_depth == -1 || !numa_enabled)
812 for_each_online_node(node) {
813 printk(KERN_DEBUG "Node %d CPUs:", node);
817 * If we used a CPU iterator here we would miss printing
818 * the holes in the cpumap.
820 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
821 if (cpumask_test_cpu(cpu,
822 node_to_cpumask_map[node])) {
828 printk("-%u", cpu - 1);
834 printk("-%u", nr_cpu_ids - 1);
839 static void __init dump_numa_memory_topology(void)
844 if (min_common_depth == -1 || !numa_enabled)
847 for_each_online_node(node) {
850 printk(KERN_DEBUG "Node %d Memory:", node);
854 for (i = 0; i < memblock_end_of_DRAM();
855 i += (1 << SECTION_SIZE_BITS)) {
856 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
874 * Allocate some memory, satisfying the memblock or bootmem allocator where
875 * required. nid is the preferred node and end is the physical address of
876 * the highest address in the node.
878 * Returns the virtual address of the memory.
880 static void __init *careful_zallocation(int nid, unsigned long size,
882 unsigned long end_pfn)
886 unsigned long ret_paddr;
888 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
890 /* retry over all memory */
892 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
895 panic("numa.c: cannot allocate %lu bytes for node %d",
898 ret = __va(ret_paddr);
901 * We initialize the nodes in numeric order: 0, 1, 2...
902 * and hand over control from the MEMBLOCK allocator to the
903 * bootmem allocator. If this function is called for
904 * node 5, then we know that all nodes <5 are using the
905 * bootmem allocator instead of the MEMBLOCK allocator.
907 * So, check the nid from which this allocation came
908 * and double check to see if we need to use bootmem
909 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
910 * since it would be useless.
912 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
914 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
917 dbg("alloc_bootmem %p %lx\n", ret, size);
920 memset(ret, 0, size);
924 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
925 .notifier_call = cpu_numa_callback,
926 .priority = 1 /* Must run before sched domains notifier. */
929 static void __init mark_reserved_regions_for_nid(int nid)
931 struct pglist_data *node = NODE_DATA(nid);
932 struct memblock_region *reg;
934 for_each_memblock(reserved, reg) {
935 unsigned long physbase = reg->base;
936 unsigned long size = reg->size;
937 unsigned long start_pfn = physbase >> PAGE_SHIFT;
938 unsigned long end_pfn = PFN_UP(physbase + size);
939 struct node_active_region node_ar;
940 unsigned long node_end_pfn = node->node_start_pfn +
941 node->node_spanned_pages;
944 * Check to make sure that this memblock.reserved area is
945 * within the bounds of the node that we care about.
946 * Checking the nid of the start and end points is not
947 * sufficient because the reserved area could span the
950 if (end_pfn <= node->node_start_pfn ||
951 start_pfn >= node_end_pfn)
954 get_node_active_region(start_pfn, &node_ar);
955 while (start_pfn < end_pfn &&
956 node_ar.start_pfn < node_ar.end_pfn) {
957 unsigned long reserve_size = size;
959 * if reserved region extends past active region
960 * then trim size to active region
962 if (end_pfn > node_ar.end_pfn)
963 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
966 * Only worry about *this* node, others may not
967 * yet have valid NODE_DATA().
969 if (node_ar.nid == nid) {
970 dbg("reserve_bootmem %lx %lx nid=%d\n",
971 physbase, reserve_size, node_ar.nid);
972 reserve_bootmem_node(NODE_DATA(node_ar.nid),
973 physbase, reserve_size,
977 * if reserved region is contained in the active region
980 if (end_pfn <= node_ar.end_pfn)
984 * reserved region extends past the active region
985 * get next active region that contains this
988 start_pfn = node_ar.end_pfn;
989 physbase = start_pfn << PAGE_SHIFT;
990 size = size - reserve_size;
991 get_node_active_region(start_pfn, &node_ar);
997 void __init do_init_bootmem(void)
1002 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1003 max_pfn = max_low_pfn;
1005 if (parse_numa_properties())
1008 dump_numa_memory_topology();
1010 for_each_online_node(nid) {
1011 unsigned long start_pfn, end_pfn;
1012 void *bootmem_vaddr;
1013 unsigned long bootmap_pages;
1015 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1018 * Allocate the node structure node local if possible
1020 * Be careful moving this around, as it relies on all
1021 * previous nodes' bootmem to be initialized and have
1022 * all reserved areas marked.
1024 NODE_DATA(nid) = careful_zallocation(nid,
1025 sizeof(struct pglist_data),
1026 SMP_CACHE_BYTES, end_pfn);
1028 dbg("node %d\n", nid);
1029 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1031 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1032 NODE_DATA(nid)->node_start_pfn = start_pfn;
1033 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1035 if (NODE_DATA(nid)->node_spanned_pages == 0)
1038 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1039 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1041 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1042 bootmem_vaddr = careful_zallocation(nid,
1043 bootmap_pages << PAGE_SHIFT,
1044 PAGE_SIZE, end_pfn);
1046 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1048 init_bootmem_node(NODE_DATA(nid),
1049 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1050 start_pfn, end_pfn);
1052 free_bootmem_with_active_regions(nid, end_pfn);
1054 * Be very careful about moving this around. Future
1055 * calls to careful_zallocation() depend on this getting
1058 mark_reserved_regions_for_nid(nid);
1059 sparse_memory_present_with_active_regions(nid);
1062 init_bootmem_done = 1;
1065 * Now bootmem is initialised we can create the node to cpumask
1066 * lookup tables and setup the cpu callback to populate them.
1068 setup_node_to_cpumask_map();
1070 register_cpu_notifier(&ppc64_numa_nb);
1071 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1072 (void *)(unsigned long)boot_cpuid);
1075 void __init paging_init(void)
1077 unsigned long max_zone_pfns[MAX_NR_ZONES];
1078 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1079 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1080 free_area_init_nodes(max_zone_pfns);
1083 static int __init early_numa(char *p)
1088 if (strstr(p, "off"))
1091 if (strstr(p, "debug"))
1094 p = strstr(p, "fake=");
1096 cmdline = p + strlen("fake=");
1100 early_param("numa", early_numa);
1102 #ifdef CONFIG_MEMORY_HOTPLUG
1104 * Find the node associated with a hot added memory section for
1105 * memory represented in the device tree by the property
1106 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1108 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1109 unsigned long scn_addr)
1112 unsigned int drconf_cell_cnt, rc;
1113 unsigned long lmb_size;
1114 struct assoc_arrays aa;
1117 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1118 if (!drconf_cell_cnt)
1121 lmb_size = of_get_lmb_size(memory);
1125 rc = of_get_assoc_arrays(memory, &aa);
1129 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1130 struct of_drconf_cell drmem;
1132 read_drconf_cell(&drmem, &dm);
1134 /* skip this block if it is reserved or not assigned to
1136 if ((drmem.flags & DRCONF_MEM_RESERVED)
1137 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1140 if ((scn_addr < drmem.base_addr)
1141 || (scn_addr >= (drmem.base_addr + lmb_size)))
1144 nid = of_drconf_to_nid_single(&drmem, &aa);
1152 * Find the node associated with a hot added memory section for memory
1153 * represented in the device tree as a node (i.e. memory@XXXX) for
1156 int hot_add_node_scn_to_nid(unsigned long scn_addr)
1158 struct device_node *memory;
1161 for_each_node_by_type(memory, "memory") {
1162 unsigned long start, size;
1164 const unsigned int *memcell_buf;
1167 memcell_buf = of_get_property(memory, "reg", &len);
1168 if (!memcell_buf || len <= 0)
1171 /* ranges in cell */
1172 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1175 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1176 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1178 if ((scn_addr < start) || (scn_addr >= (start + size)))
1181 nid = of_node_to_nid_single(memory);
1189 of_node_put(memory);
1195 * Find the node associated with a hot added memory section. Section
1196 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1197 * sections are fully contained within a single MEMBLOCK.
1199 int hot_add_scn_to_nid(unsigned long scn_addr)
1201 struct device_node *memory = NULL;
1204 if (!numa_enabled || (min_common_depth < 0))
1205 return first_online_node;
1207 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1209 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1210 of_node_put(memory);
1212 nid = hot_add_node_scn_to_nid(scn_addr);
1215 if (nid < 0 || !node_online(nid))
1216 nid = first_online_node;
1218 if (NODE_DATA(nid)->node_spanned_pages)
1221 for_each_online_node(nid) {
1222 if (NODE_DATA(nid)->node_spanned_pages) {
1232 static u64 hot_add_drconf_memory_max(void)
1234 struct device_node *memory = NULL;
1235 unsigned int drconf_cell_cnt = 0;
1239 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1241 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1242 lmb_size = of_get_lmb_size(memory);
1243 of_node_put(memory);
1245 return lmb_size * drconf_cell_cnt;
1249 * memory_hotplug_max - return max address of memory that may be added
1251 * This is currently only used on systems that support drconfig memory
1254 u64 memory_hotplug_max(void)
1256 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1258 #endif /* CONFIG_MEMORY_HOTPLUG */
1260 /* Virtual Processor Home Node (VPHN) support */
1261 #ifdef CONFIG_PPC_SPLPAR
1262 struct topology_update_data {
1263 struct topology_update_data *next;
1269 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1270 static cpumask_t cpu_associativity_changes_mask;
1271 static int vphn_enabled;
1272 static int prrn_enabled;
1273 static void reset_topology_timer(void);
1276 * Store the current values of the associativity change counters in the
1279 static void setup_cpu_associativity_change_counters(void)
1283 /* The VPHN feature supports a maximum of 8 reference points */
1284 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1286 for_each_possible_cpu(cpu) {
1288 u8 *counts = vphn_cpu_change_counts[cpu];
1289 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1291 for (i = 0; i < distance_ref_points_depth; i++)
1292 counts[i] = hypervisor_counts[i];
1297 * The hypervisor maintains a set of 8 associativity change counters in
1298 * the VPA of each cpu that correspond to the associativity levels in the
1299 * ibm,associativity-reference-points property. When an associativity
1300 * level changes, the corresponding counter is incremented.
1302 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1303 * node associativity levels have changed.
1305 * Returns the number of cpus with unhandled associativity changes.
1307 static int update_cpu_associativity_changes_mask(void)
1310 cpumask_t *changes = &cpu_associativity_changes_mask;
1312 for_each_possible_cpu(cpu) {
1314 u8 *counts = vphn_cpu_change_counts[cpu];
1315 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1317 for (i = 0; i < distance_ref_points_depth; i++) {
1318 if (hypervisor_counts[i] != counts[i]) {
1319 counts[i] = hypervisor_counts[i];
1324 cpumask_set_cpu(cpu, changes);
1328 return cpumask_weight(changes);
1332 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1333 * the complete property we have to add the length in the first cell.
1335 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1338 * Convert the associativity domain numbers returned from the hypervisor
1339 * to the sequence they would appear in the ibm,associativity property.
1341 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1343 int i, nr_assoc_doms = 0;
1344 const u16 *field = (const u16*) packed;
1346 #define VPHN_FIELD_UNUSED (0xffff)
1347 #define VPHN_FIELD_MSB (0x8000)
1348 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1350 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1351 if (*field == VPHN_FIELD_UNUSED) {
1352 /* All significant fields processed, and remaining
1353 * fields contain the reserved value of all 1's.
1356 unpacked[i] = *((u32*)field);
1358 } else if (*field & VPHN_FIELD_MSB) {
1359 /* Data is in the lower 15 bits of this field */
1360 unpacked[i] = *field & VPHN_FIELD_MASK;
1364 /* Data is in the lower 15 bits of this field
1365 * concatenated with the next 16 bit field
1367 unpacked[i] = *((u32*)field);
1373 /* The first cell contains the length of the property */
1374 unpacked[0] = nr_assoc_doms;
1376 return nr_assoc_doms;
1380 * Retrieve the new associativity information for a virtual processor's
1383 static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1386 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1388 int hwcpu = get_hard_smp_processor_id(cpu);
1390 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1391 vphn_unpack_associativity(retbuf, associativity);
1396 static long vphn_get_associativity(unsigned long cpu,
1397 unsigned int *associativity)
1401 rc = hcall_vphn(cpu, associativity);
1406 "VPHN is not supported. Disabling polling...\n");
1407 stop_topology_update();
1411 "hcall_vphn() experienced a hardware fault "
1412 "preventing VPHN. Disabling polling...\n");
1413 stop_topology_update();
1420 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1421 * characteristics change. This function doesn't perform any locking and is
1422 * only safe to call from stop_machine().
1424 static int update_cpu_topology(void *data)
1426 struct topology_update_data *update;
1434 for (update = data; update; update = update->next) {
1435 if (cpu != update->cpu)
1438 unregister_cpu_under_node(update->cpu, update->old_nid);
1439 unmap_cpu_from_node(update->cpu);
1440 map_cpu_to_node(update->cpu, update->new_nid);
1442 register_cpu_under_node(update->cpu, update->new_nid);
1449 * Update the node maps and sysfs entries for each cpu whose home node
1450 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1452 int arch_update_cpu_topology(void)
1454 unsigned int cpu, changed = 0;
1455 struct topology_update_data *updates, *ud;
1456 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1457 cpumask_t updated_cpus;
1461 weight = cpumask_weight(&cpu_associativity_changes_mask);
1465 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1469 cpumask_clear(&updated_cpus);
1471 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1474 vphn_get_associativity(cpu, associativity);
1475 ud->new_nid = associativity_to_nid(associativity);
1477 if (ud->new_nid < 0 || !node_online(ud->new_nid))
1478 ud->new_nid = first_online_node;
1480 ud->old_nid = numa_cpu_lookup_table[cpu];
1481 cpumask_set_cpu(cpu, &updated_cpus);
1484 ud->next = &updates[i];
1487 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1489 for (ud = &updates[0]; ud; ud = ud->next) {
1490 dev = get_cpu_device(ud->cpu);
1492 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1493 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1501 static void topology_work_fn(struct work_struct *work)
1503 rebuild_sched_domains();
1505 static DECLARE_WORK(topology_work, topology_work_fn);
1507 void topology_schedule_update(void)
1509 schedule_work(&topology_work);
1512 static void topology_timer_fn(unsigned long ignored)
1514 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1515 topology_schedule_update();
1516 else if (vphn_enabled) {
1517 if (update_cpu_associativity_changes_mask() > 0)
1518 topology_schedule_update();
1519 reset_topology_timer();
1522 static struct timer_list topology_timer =
1523 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1525 static void reset_topology_timer(void)
1527 topology_timer.data = 0;
1528 topology_timer.expires = jiffies + 60 * HZ;
1529 mod_timer(&topology_timer, topology_timer.expires);
1532 static void stage_topology_update(int core_id)
1534 cpumask_or(&cpu_associativity_changes_mask,
1535 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1536 reset_topology_timer();
1539 static int dt_update_callback(struct notifier_block *nb,
1540 unsigned long action, void *data)
1542 struct of_prop_reconfig *update;
1543 int rc = NOTIFY_DONE;
1546 case OF_RECONFIG_UPDATE_PROPERTY:
1547 update = (struct of_prop_reconfig *)data;
1548 if (!of_prop_cmp(update->dn->type, "cpu") &&
1549 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1551 of_property_read_u32(update->dn, "reg", &core_id);
1552 stage_topology_update(core_id);
1561 static struct notifier_block dt_update_nb = {
1562 .notifier_call = dt_update_callback,
1566 * Start polling for associativity changes.
1568 int start_topology_update(void)
1572 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1573 if (!prrn_enabled) {
1576 rc = of_reconfig_notifier_register(&dt_update_nb);
1578 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1579 get_lppaca()->shared_proc) {
1580 if (!vphn_enabled) {
1583 setup_cpu_associativity_change_counters();
1584 init_timer_deferrable(&topology_timer);
1585 reset_topology_timer();
1593 * Disable polling for VPHN associativity changes.
1595 int stop_topology_update(void)
1601 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1602 } else if (vphn_enabled) {
1604 rc = del_timer_sync(&topology_timer);
1610 int prrn_is_enabled(void)
1612 return prrn_enabled;
1615 static int topology_read(struct seq_file *file, void *v)
1617 if (vphn_enabled || prrn_enabled)
1618 seq_puts(file, "on\n");
1620 seq_puts(file, "off\n");
1625 static int topology_open(struct inode *inode, struct file *file)
1627 return single_open(file, topology_read, NULL);
1630 static ssize_t topology_write(struct file *file, const char __user *buf,
1631 size_t count, loff_t *off)
1633 char kbuf[4]; /* "on" or "off" plus null. */
1636 read_len = count < 3 ? count : 3;
1637 if (copy_from_user(kbuf, buf, read_len))
1640 kbuf[read_len] = '\0';
1642 if (!strncmp(kbuf, "on", 2))
1643 start_topology_update();
1644 else if (!strncmp(kbuf, "off", 3))
1645 stop_topology_update();
1652 static const struct file_operations topology_ops = {
1654 .write = topology_write,
1655 .open = topology_open,
1656 .release = single_release
1659 static int topology_update_init(void)
1661 start_topology_update();
1662 proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
1666 device_initcall(topology_update_init);
1667 #endif /* CONFIG_PPC_SPLPAR */