1ab1f9dacSPaul Mackerras /* 2ab1f9dacSPaul Mackerras * pSeries NUMA support 3ab1f9dacSPaul Mackerras * 4ab1f9dacSPaul Mackerras * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 5ab1f9dacSPaul Mackerras * 6ab1f9dacSPaul Mackerras * This program is free software; you can redistribute it and/or 7ab1f9dacSPaul Mackerras * modify it under the terms of the GNU General Public License 8ab1f9dacSPaul Mackerras * as published by the Free Software Foundation; either version 9ab1f9dacSPaul Mackerras * 2 of the License, or (at your option) any later version. 10ab1f9dacSPaul Mackerras */ 11ab1f9dacSPaul Mackerras #include <linux/threads.h> 12ab1f9dacSPaul Mackerras #include <linux/bootmem.h> 13ab1f9dacSPaul Mackerras #include <linux/init.h> 14ab1f9dacSPaul Mackerras #include <linux/mm.h> 15ab1f9dacSPaul Mackerras #include <linux/mmzone.h> 164b16f8e2SPaul Gortmaker #include <linux/export.h> 17ab1f9dacSPaul Mackerras #include <linux/nodemask.h> 18ab1f9dacSPaul Mackerras #include <linux/cpu.h> 19ab1f9dacSPaul Mackerras #include <linux/notifier.h> 2095f72d1eSYinghai Lu #include <linux/memblock.h> 216df1646eSMichael Ellerman #include <linux/of.h> 2206eccea6SDave Hansen #include <linux/pfn.h> 239eff1a38SJesse Larrew #include <linux/cpuset.h> 249eff1a38SJesse Larrew #include <linux/node.h> 2530c05350SNathan Fontenot #include <linux/stop_machine.h> 26e04fa612SNathan Fontenot #include <linux/proc_fs.h> 27e04fa612SNathan Fontenot #include <linux/seq_file.h> 28e04fa612SNathan Fontenot #include <linux/uaccess.h> 29191a7120SLinus Torvalds #include <linux/slab.h> 303be7db6aSRobert Jennings #include <asm/cputhreads.h> 3145fb6ceaSAnton Blanchard #include <asm/sparsemem.h> 32d9b2b2a2SDavid S. Miller #include <asm/prom.h> 332249ca9dSPaul Mackerras #include <asm/smp.h> 349eff1a38SJesse Larrew #include <asm/firmware.h> 359eff1a38SJesse Larrew #include <asm/paca.h> 3639bf990eSJesse Larrew #include <asm/hvcall.h> 37ae3a197eSDavid Howells #include <asm/setup.h> 38176bbf14SJesse Larrew #include <asm/vdso.h> 39ab1f9dacSPaul Mackerras 40ab1f9dacSPaul Mackerras static int numa_enabled = 1; 41ab1f9dacSPaul Mackerras 421daa6d08SBalbir Singh static char *cmdline __initdata; 431daa6d08SBalbir Singh 44ab1f9dacSPaul Mackerras static int numa_debug; 45ab1f9dacSPaul Mackerras #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } 46ab1f9dacSPaul Mackerras 4745fb6ceaSAnton Blanchard int numa_cpu_lookup_table[NR_CPUS]; 4825863de0SAnton Blanchard cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 49ab1f9dacSPaul Mackerras struct pglist_data *node_data[MAX_NUMNODES]; 5045fb6ceaSAnton Blanchard 5145fb6ceaSAnton Blanchard EXPORT_SYMBOL(numa_cpu_lookup_table); 5225863de0SAnton Blanchard EXPORT_SYMBOL(node_to_cpumask_map); 5345fb6ceaSAnton Blanchard EXPORT_SYMBOL(node_data); 5445fb6ceaSAnton Blanchard 55ab1f9dacSPaul Mackerras static int min_common_depth; 56237a0989SMike Kravetz static int n_mem_addr_cells, n_mem_size_cells; 5741eab6f8SAnton Blanchard static int form1_affinity; 5841eab6f8SAnton Blanchard 5941eab6f8SAnton Blanchard #define MAX_DISTANCE_REF_POINTS 4 6041eab6f8SAnton Blanchard static int distance_ref_points_depth; 61*b08a2a12SAlistair Popple static const __be32 *distance_ref_points; 6241eab6f8SAnton Blanchard static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; 63ab1f9dacSPaul Mackerras 6425863de0SAnton Blanchard /* 6525863de0SAnton Blanchard * Allocate node_to_cpumask_map based on number of available nodes 6625863de0SAnton Blanchard * Requires node_possible_map to be valid. 6725863de0SAnton Blanchard * 689512938bSWanlong Gao * Note: cpumask_of_node() is not valid until after this is done. 6925863de0SAnton Blanchard */ 7025863de0SAnton Blanchard static void __init setup_node_to_cpumask_map(void) 7125863de0SAnton Blanchard { 72f9d531b8SCody P Schafer unsigned int node; 7325863de0SAnton Blanchard 7425863de0SAnton Blanchard /* setup nr_node_ids if not done yet */ 75f9d531b8SCody P Schafer if (nr_node_ids == MAX_NUMNODES) 76f9d531b8SCody P Schafer setup_nr_node_ids(); 7725863de0SAnton Blanchard 7825863de0SAnton Blanchard /* allocate the map */ 7925863de0SAnton Blanchard for (node = 0; node < nr_node_ids; node++) 8025863de0SAnton Blanchard alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 8125863de0SAnton Blanchard 8225863de0SAnton Blanchard /* cpumask_of_node() will now work */ 8325863de0SAnton Blanchard dbg("Node to cpumask map for %d nodes\n", nr_node_ids); 8425863de0SAnton Blanchard } 8525863de0SAnton Blanchard 8655671f3cSStephen Rothwell static int __init fake_numa_create_new_node(unsigned long end_pfn, 871daa6d08SBalbir Singh unsigned int *nid) 881daa6d08SBalbir Singh { 891daa6d08SBalbir Singh unsigned long long mem; 901daa6d08SBalbir Singh char *p = cmdline; 911daa6d08SBalbir Singh static unsigned int fake_nid; 921daa6d08SBalbir Singh static unsigned long long curr_boundary; 931daa6d08SBalbir Singh 941daa6d08SBalbir Singh /* 951daa6d08SBalbir Singh * Modify node id, iff we started creating NUMA nodes 961daa6d08SBalbir Singh * We want to continue from where we left of the last time 971daa6d08SBalbir Singh */ 981daa6d08SBalbir Singh if (fake_nid) 991daa6d08SBalbir Singh *nid = fake_nid; 1001daa6d08SBalbir Singh /* 1011daa6d08SBalbir Singh * In case there are no more arguments to parse, the 1021daa6d08SBalbir Singh * node_id should be the same as the last fake node id 1031daa6d08SBalbir Singh * (we've handled this above). 1041daa6d08SBalbir Singh */ 1051daa6d08SBalbir Singh if (!p) 1061daa6d08SBalbir Singh return 0; 1071daa6d08SBalbir Singh 1081daa6d08SBalbir Singh mem = memparse(p, &p); 1091daa6d08SBalbir Singh if (!mem) 1101daa6d08SBalbir Singh return 0; 1111daa6d08SBalbir Singh 1121daa6d08SBalbir Singh if (mem < curr_boundary) 1131daa6d08SBalbir Singh return 0; 1141daa6d08SBalbir Singh 1151daa6d08SBalbir Singh curr_boundary = mem; 1161daa6d08SBalbir Singh 1171daa6d08SBalbir Singh if ((end_pfn << PAGE_SHIFT) > mem) { 1181daa6d08SBalbir Singh /* 1191daa6d08SBalbir Singh * Skip commas and spaces 1201daa6d08SBalbir Singh */ 1211daa6d08SBalbir Singh while (*p == ',' || *p == ' ' || *p == '\t') 1221daa6d08SBalbir Singh p++; 1231daa6d08SBalbir Singh 1241daa6d08SBalbir Singh cmdline = p; 1251daa6d08SBalbir Singh fake_nid++; 1261daa6d08SBalbir Singh *nid = fake_nid; 1271daa6d08SBalbir Singh dbg("created new fake_node with id %d\n", fake_nid); 1281daa6d08SBalbir Singh return 1; 1291daa6d08SBalbir Singh } 1301daa6d08SBalbir Singh return 0; 1311daa6d08SBalbir Singh } 1321daa6d08SBalbir Singh 1338f64e1f2SJon Tollefson /* 1345dfe8660STejun Heo * get_node_active_region - Return active region containing pfn 135e8170372SJon Tollefson * Active range returned is empty if none found. 1365dfe8660STejun Heo * @pfn: The page to return the region for 1375dfe8660STejun Heo * @node_ar: Returned set to the active region containing @pfn 1388f64e1f2SJon Tollefson */ 1395dfe8660STejun Heo static void __init get_node_active_region(unsigned long pfn, 1408f64e1f2SJon Tollefson struct node_active_region *node_ar) 1418f64e1f2SJon Tollefson { 1425dfe8660STejun Heo unsigned long start_pfn, end_pfn; 1435dfe8660STejun Heo int i, nid; 1448f64e1f2SJon Tollefson 1455dfe8660STejun Heo for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 1465dfe8660STejun Heo if (pfn >= start_pfn && pfn < end_pfn) { 1478f64e1f2SJon Tollefson node_ar->nid = nid; 1488f64e1f2SJon Tollefson node_ar->start_pfn = start_pfn; 1495dfe8660STejun Heo node_ar->end_pfn = end_pfn; 1505dfe8660STejun Heo break; 1515dfe8660STejun Heo } 1525dfe8660STejun Heo } 1538f64e1f2SJon Tollefson } 1548f64e1f2SJon Tollefson 15539bf990eSJesse Larrew static void map_cpu_to_node(int cpu, int node) 156ab1f9dacSPaul Mackerras { 157ab1f9dacSPaul Mackerras numa_cpu_lookup_table[cpu] = node; 15845fb6ceaSAnton Blanchard 159bf4b85b0SNathan Lynch dbg("adding cpu %d to node %d\n", cpu, node); 160bf4b85b0SNathan Lynch 16125863de0SAnton Blanchard if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) 16225863de0SAnton Blanchard cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 163ab1f9dacSPaul Mackerras } 164ab1f9dacSPaul Mackerras 16539bf990eSJesse Larrew #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) 166ab1f9dacSPaul Mackerras static void unmap_cpu_from_node(unsigned long cpu) 167ab1f9dacSPaul Mackerras { 168ab1f9dacSPaul Mackerras int node = numa_cpu_lookup_table[cpu]; 169ab1f9dacSPaul Mackerras 170ab1f9dacSPaul Mackerras dbg("removing cpu %lu from node %d\n", cpu, node); 171ab1f9dacSPaul Mackerras 17225863de0SAnton Blanchard if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { 173429f4d8dSAnton Blanchard cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); 174ab1f9dacSPaul Mackerras } else { 175ab1f9dacSPaul Mackerras printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", 176ab1f9dacSPaul Mackerras cpu, node); 177ab1f9dacSPaul Mackerras } 178ab1f9dacSPaul Mackerras } 17939bf990eSJesse Larrew #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ 180ab1f9dacSPaul Mackerras 181ab1f9dacSPaul Mackerras /* must hold reference to node during call */ 182*b08a2a12SAlistair Popple static const __be32 *of_get_associativity(struct device_node *dev) 183ab1f9dacSPaul Mackerras { 184e2eb6392SStephen Rothwell return of_get_property(dev, "ibm,associativity", NULL); 185ab1f9dacSPaul Mackerras } 186ab1f9dacSPaul Mackerras 187cf00085dSChandru /* 188cf00085dSChandru * Returns the property linux,drconf-usable-memory if 189cf00085dSChandru * it exists (the property exists only in kexec/kdump kernels, 190cf00085dSChandru * added by kexec-tools) 191cf00085dSChandru */ 192*b08a2a12SAlistair Popple static const __be32 *of_get_usable_memory(struct device_node *memory) 193cf00085dSChandru { 194*b08a2a12SAlistair Popple const __be32 *prop; 195cf00085dSChandru u32 len; 196cf00085dSChandru prop = of_get_property(memory, "linux,drconf-usable-memory", &len); 197cf00085dSChandru if (!prop || len < sizeof(unsigned int)) 198cf00085dSChandru return 0; 199cf00085dSChandru return prop; 200cf00085dSChandru } 201cf00085dSChandru 20241eab6f8SAnton Blanchard int __node_distance(int a, int b) 20341eab6f8SAnton Blanchard { 20441eab6f8SAnton Blanchard int i; 20541eab6f8SAnton Blanchard int distance = LOCAL_DISTANCE; 20641eab6f8SAnton Blanchard 20741eab6f8SAnton Blanchard if (!form1_affinity) 2087122beeeSVaidyanathan Srinivasan return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); 20941eab6f8SAnton Blanchard 21041eab6f8SAnton Blanchard for (i = 0; i < distance_ref_points_depth; i++) { 21141eab6f8SAnton Blanchard if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) 21241eab6f8SAnton Blanchard break; 21341eab6f8SAnton Blanchard 21441eab6f8SAnton Blanchard /* Double the distance for each NUMA level */ 21541eab6f8SAnton Blanchard distance *= 2; 21641eab6f8SAnton Blanchard } 21741eab6f8SAnton Blanchard 21841eab6f8SAnton Blanchard return distance; 21941eab6f8SAnton Blanchard } 22041eab6f8SAnton Blanchard 22141eab6f8SAnton Blanchard static void initialize_distance_lookup_table(int nid, 222*b08a2a12SAlistair Popple const __be32 *associativity) 22341eab6f8SAnton Blanchard { 22441eab6f8SAnton Blanchard int i; 22541eab6f8SAnton Blanchard 22641eab6f8SAnton Blanchard if (!form1_affinity) 22741eab6f8SAnton Blanchard return; 22841eab6f8SAnton Blanchard 22941eab6f8SAnton Blanchard for (i = 0; i < distance_ref_points_depth; i++) { 230*b08a2a12SAlistair Popple const __be32 *entry; 231*b08a2a12SAlistair Popple 232*b08a2a12SAlistair Popple entry = &associativity[be32_to_cpu(distance_ref_points[i])]; 233*b08a2a12SAlistair Popple distance_lookup_table[nid][i] = of_read_number(entry, 1); 23441eab6f8SAnton Blanchard } 23541eab6f8SAnton Blanchard } 23641eab6f8SAnton Blanchard 237482ec7c4SNathan Lynch /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 238482ec7c4SNathan Lynch * info is found. 239482ec7c4SNathan Lynch */ 240*b08a2a12SAlistair Popple static int associativity_to_nid(const __be32 *associativity) 241ab1f9dacSPaul Mackerras { 242482ec7c4SNathan Lynch int nid = -1; 243ab1f9dacSPaul Mackerras 244ab1f9dacSPaul Mackerras if (min_common_depth == -1) 245482ec7c4SNathan Lynch goto out; 246ab1f9dacSPaul Mackerras 247*b08a2a12SAlistair Popple if (of_read_number(associativity, 1) >= min_common_depth) 248*b08a2a12SAlistair Popple nid = of_read_number(&associativity[min_common_depth], 1); 249bc16a759SNathan Lynch 250bc16a759SNathan Lynch /* POWER4 LPAR uses 0xffff as invalid node */ 251482ec7c4SNathan Lynch if (nid == 0xffff || nid >= MAX_NUMNODES) 252482ec7c4SNathan Lynch nid = -1; 25341eab6f8SAnton Blanchard 254*b08a2a12SAlistair Popple if (nid > 0 && 255*b08a2a12SAlistair Popple of_read_number(associativity, 1) >= distance_ref_points_depth) 2569eff1a38SJesse Larrew initialize_distance_lookup_table(nid, associativity); 25741eab6f8SAnton Blanchard 258482ec7c4SNathan Lynch out: 259cf950b7aSNathan Lynch return nid; 260ab1f9dacSPaul Mackerras } 261ab1f9dacSPaul Mackerras 2629eff1a38SJesse Larrew /* Returns the nid associated with the given device tree node, 2639eff1a38SJesse Larrew * or -1 if not found. 2649eff1a38SJesse Larrew */ 2659eff1a38SJesse Larrew static int of_node_to_nid_single(struct device_node *device) 2669eff1a38SJesse Larrew { 2679eff1a38SJesse Larrew int nid = -1; 268*b08a2a12SAlistair Popple const __be32 *tmp; 2699eff1a38SJesse Larrew 2709eff1a38SJesse Larrew tmp = of_get_associativity(device); 2719eff1a38SJesse Larrew if (tmp) 2729eff1a38SJesse Larrew nid = associativity_to_nid(tmp); 2739eff1a38SJesse Larrew return nid; 2749eff1a38SJesse Larrew } 2759eff1a38SJesse Larrew 276953039c8SJeremy Kerr /* Walk the device tree upwards, looking for an associativity id */ 277953039c8SJeremy Kerr int of_node_to_nid(struct device_node *device) 278953039c8SJeremy Kerr { 279953039c8SJeremy Kerr struct device_node *tmp; 280953039c8SJeremy Kerr int nid = -1; 281953039c8SJeremy Kerr 282953039c8SJeremy Kerr of_node_get(device); 283953039c8SJeremy Kerr while (device) { 284953039c8SJeremy Kerr nid = of_node_to_nid_single(device); 285953039c8SJeremy Kerr if (nid != -1) 286953039c8SJeremy Kerr break; 287953039c8SJeremy Kerr 288953039c8SJeremy Kerr tmp = device; 289953039c8SJeremy Kerr device = of_get_parent(tmp); 290953039c8SJeremy Kerr of_node_put(tmp); 291953039c8SJeremy Kerr } 292953039c8SJeremy Kerr of_node_put(device); 293953039c8SJeremy Kerr 294953039c8SJeremy Kerr return nid; 295953039c8SJeremy Kerr } 296953039c8SJeremy Kerr EXPORT_SYMBOL_GPL(of_node_to_nid); 297953039c8SJeremy Kerr 298ab1f9dacSPaul Mackerras static int __init find_min_common_depth(void) 299ab1f9dacSPaul Mackerras { 30041eab6f8SAnton Blanchard int depth; 301e70606ebSMichael Ellerman struct device_node *root; 302ab1f9dacSPaul Mackerras 3031c8ee733SDipankar Sarma if (firmware_has_feature(FW_FEATURE_OPAL)) 3041c8ee733SDipankar Sarma root = of_find_node_by_path("/ibm,opal"); 3051c8ee733SDipankar Sarma else 306e70606ebSMichael Ellerman root = of_find_node_by_path("/rtas"); 307e70606ebSMichael Ellerman if (!root) 308e70606ebSMichael Ellerman root = of_find_node_by_path("/"); 309ab1f9dacSPaul Mackerras 310ab1f9dacSPaul Mackerras /* 31141eab6f8SAnton Blanchard * This property is a set of 32-bit integers, each representing 31241eab6f8SAnton Blanchard * an index into the ibm,associativity nodes. 31341eab6f8SAnton Blanchard * 31441eab6f8SAnton Blanchard * With form 0 affinity the first integer is for an SMP configuration 31541eab6f8SAnton Blanchard * (should be all 0's) and the second is for a normal NUMA 31641eab6f8SAnton Blanchard * configuration. We have only one level of NUMA. 31741eab6f8SAnton Blanchard * 31841eab6f8SAnton Blanchard * With form 1 affinity the first integer is the most significant 31941eab6f8SAnton Blanchard * NUMA boundary and the following are progressively less significant 32041eab6f8SAnton Blanchard * boundaries. There can be more than one level of NUMA. 321ab1f9dacSPaul Mackerras */ 322e70606ebSMichael Ellerman distance_ref_points = of_get_property(root, 32341eab6f8SAnton Blanchard "ibm,associativity-reference-points", 32441eab6f8SAnton Blanchard &distance_ref_points_depth); 325ab1f9dacSPaul Mackerras 32641eab6f8SAnton Blanchard if (!distance_ref_points) { 32741eab6f8SAnton Blanchard dbg("NUMA: ibm,associativity-reference-points not found.\n"); 32841eab6f8SAnton Blanchard goto err; 32941eab6f8SAnton Blanchard } 33041eab6f8SAnton Blanchard 33141eab6f8SAnton Blanchard distance_ref_points_depth /= sizeof(int); 33241eab6f8SAnton Blanchard 3338002b0c5SNathan Fontenot if (firmware_has_feature(FW_FEATURE_OPAL) || 3348002b0c5SNathan Fontenot firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) { 335bc8449ccSAnton Blanchard dbg("Using form 1 affinity\n"); 33641eab6f8SAnton Blanchard form1_affinity = 1; 3374b83c330SAnton Blanchard } 3385b958a7eSGavin Shan 33941eab6f8SAnton Blanchard if (form1_affinity) { 340*b08a2a12SAlistair Popple depth = of_read_number(distance_ref_points, 1); 341ab1f9dacSPaul Mackerras } else { 34241eab6f8SAnton Blanchard if (distance_ref_points_depth < 2) { 34341eab6f8SAnton Blanchard printk(KERN_WARNING "NUMA: " 34441eab6f8SAnton Blanchard "short ibm,associativity-reference-points\n"); 34541eab6f8SAnton Blanchard goto err; 346ab1f9dacSPaul Mackerras } 347ab1f9dacSPaul Mackerras 348*b08a2a12SAlistair Popple depth = of_read_number(&distance_ref_points[1], 1); 34941eab6f8SAnton Blanchard } 35041eab6f8SAnton Blanchard 35141eab6f8SAnton Blanchard /* 35241eab6f8SAnton Blanchard * Warn and cap if the hardware supports more than 35341eab6f8SAnton Blanchard * MAX_DISTANCE_REF_POINTS domains. 35441eab6f8SAnton Blanchard */ 35541eab6f8SAnton Blanchard if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { 35641eab6f8SAnton Blanchard printk(KERN_WARNING "NUMA: distance array capped at " 35741eab6f8SAnton Blanchard "%d entries\n", MAX_DISTANCE_REF_POINTS); 35841eab6f8SAnton Blanchard distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; 35941eab6f8SAnton Blanchard } 36041eab6f8SAnton Blanchard 361e70606ebSMichael Ellerman of_node_put(root); 362ab1f9dacSPaul Mackerras return depth; 36341eab6f8SAnton Blanchard 36441eab6f8SAnton Blanchard err: 365e70606ebSMichael Ellerman of_node_put(root); 36641eab6f8SAnton Blanchard return -1; 367ab1f9dacSPaul Mackerras } 368ab1f9dacSPaul Mackerras 36984c9fdd1SMike Kravetz static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) 370ab1f9dacSPaul Mackerras { 371ab1f9dacSPaul Mackerras struct device_node *memory = NULL; 372ab1f9dacSPaul Mackerras 373ab1f9dacSPaul Mackerras memory = of_find_node_by_type(memory, "memory"); 37454c23310SPaul Mackerras if (!memory) 37584c9fdd1SMike Kravetz panic("numa.c: No memory nodes found!"); 37654c23310SPaul Mackerras 377a8bda5ddSStephen Rothwell *n_addr_cells = of_n_addr_cells(memory); 3789213feeaSStephen Rothwell *n_size_cells = of_n_size_cells(memory); 37984c9fdd1SMike Kravetz of_node_put(memory); 380ab1f9dacSPaul Mackerras } 381ab1f9dacSPaul Mackerras 382*b08a2a12SAlistair Popple static unsigned long read_n_cells(int n, const __be32 **buf) 383ab1f9dacSPaul Mackerras { 384ab1f9dacSPaul Mackerras unsigned long result = 0; 385ab1f9dacSPaul Mackerras 386ab1f9dacSPaul Mackerras while (n--) { 387*b08a2a12SAlistair Popple result = (result << 32) | of_read_number(*buf, 1); 388ab1f9dacSPaul Mackerras (*buf)++; 389ab1f9dacSPaul Mackerras } 390ab1f9dacSPaul Mackerras return result; 391ab1f9dacSPaul Mackerras } 392ab1f9dacSPaul Mackerras 3938342681dSNathan Fontenot /* 39495f72d1eSYinghai Lu * Read the next memblock list entry from the ibm,dynamic-memory property 3958342681dSNathan Fontenot * and return the information in the provided of_drconf_cell structure. 3968342681dSNathan Fontenot */ 397*b08a2a12SAlistair Popple static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp) 3988342681dSNathan Fontenot { 399*b08a2a12SAlistair Popple const __be32 *cp; 4008342681dSNathan Fontenot 4018342681dSNathan Fontenot drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); 4028342681dSNathan Fontenot 4038342681dSNathan Fontenot cp = *cellp; 404*b08a2a12SAlistair Popple drmem->drc_index = of_read_number(cp, 1); 405*b08a2a12SAlistair Popple drmem->reserved = of_read_number(&cp[1], 1); 406*b08a2a12SAlistair Popple drmem->aa_index = of_read_number(&cp[2], 1); 407*b08a2a12SAlistair Popple drmem->flags = of_read_number(&cp[3], 1); 4088342681dSNathan Fontenot 4098342681dSNathan Fontenot *cellp = cp + 4; 4108342681dSNathan Fontenot } 4118342681dSNathan Fontenot 4128342681dSNathan Fontenot /* 41325985edcSLucas De Marchi * Retrieve and validate the ibm,dynamic-memory property of the device tree. 4148342681dSNathan Fontenot * 41595f72d1eSYinghai Lu * The layout of the ibm,dynamic-memory property is a number N of memblock 41695f72d1eSYinghai Lu * list entries followed by N memblock list entries. Each memblock list entry 41725985edcSLucas De Marchi * contains information as laid out in the of_drconf_cell struct above. 4188342681dSNathan Fontenot */ 419*b08a2a12SAlistair Popple static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm) 4208342681dSNathan Fontenot { 421*b08a2a12SAlistair Popple const __be32 *prop; 4228342681dSNathan Fontenot u32 len, entries; 4238342681dSNathan Fontenot 4248342681dSNathan Fontenot prop = of_get_property(memory, "ibm,dynamic-memory", &len); 4258342681dSNathan Fontenot if (!prop || len < sizeof(unsigned int)) 4268342681dSNathan Fontenot return 0; 4278342681dSNathan Fontenot 428*b08a2a12SAlistair Popple entries = of_read_number(prop++, 1); 4298342681dSNathan Fontenot 4308342681dSNathan Fontenot /* Now that we know the number of entries, revalidate the size 4318342681dSNathan Fontenot * of the property read in to ensure we have everything 4328342681dSNathan Fontenot */ 4338342681dSNathan Fontenot if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int)) 4348342681dSNathan Fontenot return 0; 4358342681dSNathan Fontenot 4368342681dSNathan Fontenot *dm = prop; 4378342681dSNathan Fontenot return entries; 4388342681dSNathan Fontenot } 4398342681dSNathan Fontenot 4408342681dSNathan Fontenot /* 44125985edcSLucas De Marchi * Retrieve and validate the ibm,lmb-size property for drconf memory 4428342681dSNathan Fontenot * from the device tree. 4438342681dSNathan Fontenot */ 4443fdfd990SBenjamin Herrenschmidt static u64 of_get_lmb_size(struct device_node *memory) 4458342681dSNathan Fontenot { 446*b08a2a12SAlistair Popple const __be32 *prop; 4478342681dSNathan Fontenot u32 len; 4488342681dSNathan Fontenot 4493fdfd990SBenjamin Herrenschmidt prop = of_get_property(memory, "ibm,lmb-size", &len); 4508342681dSNathan Fontenot if (!prop || len < sizeof(unsigned int)) 4518342681dSNathan Fontenot return 0; 4528342681dSNathan Fontenot 4538342681dSNathan Fontenot return read_n_cells(n_mem_size_cells, &prop); 4548342681dSNathan Fontenot } 4558342681dSNathan Fontenot 4568342681dSNathan Fontenot struct assoc_arrays { 4578342681dSNathan Fontenot u32 n_arrays; 4588342681dSNathan Fontenot u32 array_sz; 459*b08a2a12SAlistair Popple const __be32 *arrays; 4608342681dSNathan Fontenot }; 4618342681dSNathan Fontenot 4628342681dSNathan Fontenot /* 46325985edcSLucas De Marchi * Retrieve and validate the list of associativity arrays for drconf 4648342681dSNathan Fontenot * memory from the ibm,associativity-lookup-arrays property of the 4658342681dSNathan Fontenot * device tree.. 4668342681dSNathan Fontenot * 4678342681dSNathan Fontenot * The layout of the ibm,associativity-lookup-arrays property is a number N 4688342681dSNathan Fontenot * indicating the number of associativity arrays, followed by a number M 4698342681dSNathan Fontenot * indicating the size of each associativity array, followed by a list 4708342681dSNathan Fontenot * of N associativity arrays. 4718342681dSNathan Fontenot */ 4728342681dSNathan Fontenot static int of_get_assoc_arrays(struct device_node *memory, 4738342681dSNathan Fontenot struct assoc_arrays *aa) 4748342681dSNathan Fontenot { 475*b08a2a12SAlistair Popple const __be32 *prop; 4768342681dSNathan Fontenot u32 len; 4778342681dSNathan Fontenot 4788342681dSNathan Fontenot prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); 4798342681dSNathan Fontenot if (!prop || len < 2 * sizeof(unsigned int)) 4808342681dSNathan Fontenot return -1; 4818342681dSNathan Fontenot 482*b08a2a12SAlistair Popple aa->n_arrays = of_read_number(prop++, 1); 483*b08a2a12SAlistair Popple aa->array_sz = of_read_number(prop++, 1); 4848342681dSNathan Fontenot 48542b2aa86SJustin P. Mattock /* Now that we know the number of arrays and size of each array, 4868342681dSNathan Fontenot * revalidate the size of the property read in. 4878342681dSNathan Fontenot */ 4888342681dSNathan Fontenot if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) 4898342681dSNathan Fontenot return -1; 4908342681dSNathan Fontenot 4918342681dSNathan Fontenot aa->arrays = prop; 4928342681dSNathan Fontenot return 0; 4938342681dSNathan Fontenot } 4948342681dSNathan Fontenot 4958342681dSNathan Fontenot /* 4968342681dSNathan Fontenot * This is like of_node_to_nid_single() for memory represented in the 4978342681dSNathan Fontenot * ibm,dynamic-reconfiguration-memory node. 4988342681dSNathan Fontenot */ 4998342681dSNathan Fontenot static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, 5008342681dSNathan Fontenot struct assoc_arrays *aa) 5018342681dSNathan Fontenot { 5028342681dSNathan Fontenot int default_nid = 0; 5038342681dSNathan Fontenot int nid = default_nid; 5048342681dSNathan Fontenot int index; 5058342681dSNathan Fontenot 5068342681dSNathan Fontenot if (min_common_depth > 0 && min_common_depth <= aa->array_sz && 5078342681dSNathan Fontenot !(drmem->flags & DRCONF_MEM_AI_INVALID) && 5088342681dSNathan Fontenot drmem->aa_index < aa->n_arrays) { 5098342681dSNathan Fontenot index = drmem->aa_index * aa->array_sz + min_common_depth - 1; 510*b08a2a12SAlistair Popple nid = of_read_number(&aa->arrays[index], 1); 5118342681dSNathan Fontenot 5128342681dSNathan Fontenot if (nid == 0xffff || nid >= MAX_NUMNODES) 5138342681dSNathan Fontenot nid = default_nid; 5148342681dSNathan Fontenot } 5158342681dSNathan Fontenot 5168342681dSNathan Fontenot return nid; 5178342681dSNathan Fontenot } 5188342681dSNathan Fontenot 519ab1f9dacSPaul Mackerras /* 520ab1f9dacSPaul Mackerras * Figure out to which domain a cpu belongs and stick it there. 521ab1f9dacSPaul Mackerras * Return the id of the domain used. 522ab1f9dacSPaul Mackerras */ 523061d19f2SPaul Gortmaker static int numa_setup_cpu(unsigned long lcpu) 524ab1f9dacSPaul Mackerras { 525cf950b7aSNathan Lynch int nid = 0; 5268b16cd23SMilton Miller struct device_node *cpu = of_get_cpu_node(lcpu, NULL); 527ab1f9dacSPaul Mackerras 528ab1f9dacSPaul Mackerras if (!cpu) { 529ab1f9dacSPaul Mackerras WARN_ON(1); 530ab1f9dacSPaul Mackerras goto out; 531ab1f9dacSPaul Mackerras } 532ab1f9dacSPaul Mackerras 533953039c8SJeremy Kerr nid = of_node_to_nid_single(cpu); 534ab1f9dacSPaul Mackerras 535482ec7c4SNathan Lynch if (nid < 0 || !node_online(nid)) 53672c33688SH Hartley Sweeten nid = first_online_node; 537ab1f9dacSPaul Mackerras out: 538cf950b7aSNathan Lynch map_cpu_to_node(lcpu, nid); 539ab1f9dacSPaul Mackerras 540ab1f9dacSPaul Mackerras of_node_put(cpu); 541ab1f9dacSPaul Mackerras 542cf950b7aSNathan Lynch return nid; 543ab1f9dacSPaul Mackerras } 544ab1f9dacSPaul Mackerras 545061d19f2SPaul Gortmaker static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action, 546ab1f9dacSPaul Mackerras void *hcpu) 547ab1f9dacSPaul Mackerras { 548ab1f9dacSPaul Mackerras unsigned long lcpu = (unsigned long)hcpu; 549ab1f9dacSPaul Mackerras int ret = NOTIFY_DONE; 550ab1f9dacSPaul Mackerras 551ab1f9dacSPaul Mackerras switch (action) { 552ab1f9dacSPaul Mackerras case CPU_UP_PREPARE: 5538bb78442SRafael J. Wysocki case CPU_UP_PREPARE_FROZEN: 554ab1f9dacSPaul Mackerras numa_setup_cpu(lcpu); 555ab1f9dacSPaul Mackerras ret = NOTIFY_OK; 556ab1f9dacSPaul Mackerras break; 557ab1f9dacSPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU 558ab1f9dacSPaul Mackerras case CPU_DEAD: 5598bb78442SRafael J. Wysocki case CPU_DEAD_FROZEN: 560ab1f9dacSPaul Mackerras case CPU_UP_CANCELED: 5618bb78442SRafael J. Wysocki case CPU_UP_CANCELED_FROZEN: 562ab1f9dacSPaul Mackerras unmap_cpu_from_node(lcpu); 563ab1f9dacSPaul Mackerras break; 564ab1f9dacSPaul Mackerras ret = NOTIFY_OK; 565ab1f9dacSPaul Mackerras #endif 566ab1f9dacSPaul Mackerras } 567ab1f9dacSPaul Mackerras return ret; 568ab1f9dacSPaul Mackerras } 569ab1f9dacSPaul Mackerras 570ab1f9dacSPaul Mackerras /* 571ab1f9dacSPaul Mackerras * Check and possibly modify a memory region to enforce the memory limit. 572ab1f9dacSPaul Mackerras * 573ab1f9dacSPaul Mackerras * Returns the size the region should have to enforce the memory limit. 574ab1f9dacSPaul Mackerras * This will either be the original value of size, a truncated value, 575ab1f9dacSPaul Mackerras * or zero. If the returned value of size is 0 the region should be 57625985edcSLucas De Marchi * discarded as it lies wholly above the memory limit. 577ab1f9dacSPaul Mackerras */ 57845fb6ceaSAnton Blanchard static unsigned long __init numa_enforce_memory_limit(unsigned long start, 57945fb6ceaSAnton Blanchard unsigned long size) 580ab1f9dacSPaul Mackerras { 581ab1f9dacSPaul Mackerras /* 58295f72d1eSYinghai Lu * We use memblock_end_of_DRAM() in here instead of memory_limit because 583ab1f9dacSPaul Mackerras * we've already adjusted it for the limit and it takes care of 584fe55249dSMilton Miller * having memory holes below the limit. Also, in the case of 585fe55249dSMilton Miller * iommu_is_off, memory_limit is not set but is implicitly enforced. 586ab1f9dacSPaul Mackerras */ 587ab1f9dacSPaul Mackerras 58895f72d1eSYinghai Lu if (start + size <= memblock_end_of_DRAM()) 589ab1f9dacSPaul Mackerras return size; 590ab1f9dacSPaul Mackerras 59195f72d1eSYinghai Lu if (start >= memblock_end_of_DRAM()) 592ab1f9dacSPaul Mackerras return 0; 593ab1f9dacSPaul Mackerras 59495f72d1eSYinghai Lu return memblock_end_of_DRAM() - start; 595ab1f9dacSPaul Mackerras } 596ab1f9dacSPaul Mackerras 5970204568aSPaul Mackerras /* 598cf00085dSChandru * Reads the counter for a given entry in 599cf00085dSChandru * linux,drconf-usable-memory property 600cf00085dSChandru */ 601*b08a2a12SAlistair Popple static inline int __init read_usm_ranges(const __be32 **usm) 602cf00085dSChandru { 603cf00085dSChandru /* 6043fdfd990SBenjamin Herrenschmidt * For each lmb in ibm,dynamic-memory a corresponding 605cf00085dSChandru * entry in linux,drconf-usable-memory property contains 606cf00085dSChandru * a counter followed by that many (base, size) duple. 607cf00085dSChandru * read the counter from linux,drconf-usable-memory 608cf00085dSChandru */ 609cf00085dSChandru return read_n_cells(n_mem_size_cells, usm); 610cf00085dSChandru } 611cf00085dSChandru 612cf00085dSChandru /* 6130204568aSPaul Mackerras * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 6140204568aSPaul Mackerras * node. This assumes n_mem_{addr,size}_cells have been set. 6150204568aSPaul Mackerras */ 6160204568aSPaul Mackerras static void __init parse_drconf_memory(struct device_node *memory) 6170204568aSPaul Mackerras { 618*b08a2a12SAlistair Popple const __be32 *uninitialized_var(dm), *usm; 619cf00085dSChandru unsigned int n, rc, ranges, is_kexec_kdump = 0; 6203fdfd990SBenjamin Herrenschmidt unsigned long lmb_size, base, size, sz; 6218342681dSNathan Fontenot int nid; 622aa709f3bSBenjamin Herrenschmidt struct assoc_arrays aa = { .arrays = NULL }; 6230204568aSPaul Mackerras 6248342681dSNathan Fontenot n = of_get_drconf_memory(memory, &dm); 6258342681dSNathan Fontenot if (!n) 6260204568aSPaul Mackerras return; 6270204568aSPaul Mackerras 6283fdfd990SBenjamin Herrenschmidt lmb_size = of_get_lmb_size(memory); 6293fdfd990SBenjamin Herrenschmidt if (!lmb_size) 6308342681dSNathan Fontenot return; 6318342681dSNathan Fontenot 6328342681dSNathan Fontenot rc = of_get_assoc_arrays(memory, &aa); 6338342681dSNathan Fontenot if (rc) 6340204568aSPaul Mackerras return; 6350204568aSPaul Mackerras 636cf00085dSChandru /* check if this is a kexec/kdump kernel */ 637cf00085dSChandru usm = of_get_usable_memory(memory); 638cf00085dSChandru if (usm != NULL) 639cf00085dSChandru is_kexec_kdump = 1; 640cf00085dSChandru 6410204568aSPaul Mackerras for (; n != 0; --n) { 6428342681dSNathan Fontenot struct of_drconf_cell drmem; 6431daa6d08SBalbir Singh 6448342681dSNathan Fontenot read_drconf_cell(&drmem, &dm); 6458342681dSNathan Fontenot 6468342681dSNathan Fontenot /* skip this block if the reserved bit is set in flags (0x80) 6478342681dSNathan Fontenot or if the block is not assigned to this partition (0x8) */ 6488342681dSNathan Fontenot if ((drmem.flags & DRCONF_MEM_RESERVED) 6498342681dSNathan Fontenot || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 6508342681dSNathan Fontenot continue; 6518342681dSNathan Fontenot 652cf00085dSChandru base = drmem.base_addr; 6533fdfd990SBenjamin Herrenschmidt size = lmb_size; 654cf00085dSChandru ranges = 1; 6558342681dSNathan Fontenot 656cf00085dSChandru if (is_kexec_kdump) { 657cf00085dSChandru ranges = read_usm_ranges(&usm); 658cf00085dSChandru if (!ranges) /* there are no (base, size) duple */ 6590204568aSPaul Mackerras continue; 660cf00085dSChandru } 661cf00085dSChandru do { 662cf00085dSChandru if (is_kexec_kdump) { 663cf00085dSChandru base = read_n_cells(n_mem_addr_cells, &usm); 664cf00085dSChandru size = read_n_cells(n_mem_size_cells, &usm); 665cf00085dSChandru } 666cf00085dSChandru nid = of_drconf_to_nid_single(&drmem, &aa); 667cf00085dSChandru fake_numa_create_new_node( 668cf00085dSChandru ((base + size) >> PAGE_SHIFT), 669cf00085dSChandru &nid); 670cf00085dSChandru node_set_online(nid); 671cf00085dSChandru sz = numa_enforce_memory_limit(base, size); 672cf00085dSChandru if (sz) 6731d7cfe18STejun Heo memblock_set_node(base, sz, nid); 674cf00085dSChandru } while (--ranges); 6750204568aSPaul Mackerras } 6760204568aSPaul Mackerras } 6770204568aSPaul Mackerras 678ab1f9dacSPaul Mackerras static int __init parse_numa_properties(void) 679ab1f9dacSPaul Mackerras { 68094db7c5eSAnton Blanchard struct device_node *memory; 681482ec7c4SNathan Lynch int default_nid = 0; 682ab1f9dacSPaul Mackerras unsigned long i; 683ab1f9dacSPaul Mackerras 684ab1f9dacSPaul Mackerras if (numa_enabled == 0) { 685ab1f9dacSPaul Mackerras printk(KERN_WARNING "NUMA disabled by user\n"); 686ab1f9dacSPaul Mackerras return -1; 687ab1f9dacSPaul Mackerras } 688ab1f9dacSPaul Mackerras 689ab1f9dacSPaul Mackerras min_common_depth = find_min_common_depth(); 690ab1f9dacSPaul Mackerras 691ab1f9dacSPaul Mackerras if (min_common_depth < 0) 692ab1f9dacSPaul Mackerras return min_common_depth; 693ab1f9dacSPaul Mackerras 694bf4b85b0SNathan Lynch dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); 695bf4b85b0SNathan Lynch 696ab1f9dacSPaul Mackerras /* 697482ec7c4SNathan Lynch * Even though we connect cpus to numa domains later in SMP 698482ec7c4SNathan Lynch * init, we need to know the node ids now. This is because 699482ec7c4SNathan Lynch * each node to be onlined must have NODE_DATA etc backing it. 700ab1f9dacSPaul Mackerras */ 701482ec7c4SNathan Lynch for_each_present_cpu(i) { 702dfbe93a2SAnton Blanchard struct device_node *cpu; 703cf950b7aSNathan Lynch int nid; 704ab1f9dacSPaul Mackerras 7058b16cd23SMilton Miller cpu = of_get_cpu_node(i, NULL); 706482ec7c4SNathan Lynch BUG_ON(!cpu); 707953039c8SJeremy Kerr nid = of_node_to_nid_single(cpu); 708ab1f9dacSPaul Mackerras of_node_put(cpu); 709ab1f9dacSPaul Mackerras 710482ec7c4SNathan Lynch /* 711482ec7c4SNathan Lynch * Don't fall back to default_nid yet -- we will plug 712482ec7c4SNathan Lynch * cpus into nodes once the memory scan has discovered 713482ec7c4SNathan Lynch * the topology. 714482ec7c4SNathan Lynch */ 715482ec7c4SNathan Lynch if (nid < 0) 716482ec7c4SNathan Lynch continue; 717482ec7c4SNathan Lynch node_set_online(nid); 718ab1f9dacSPaul Mackerras } 719ab1f9dacSPaul Mackerras 720237a0989SMike Kravetz get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); 72194db7c5eSAnton Blanchard 72294db7c5eSAnton Blanchard for_each_node_by_type(memory, "memory") { 723ab1f9dacSPaul Mackerras unsigned long start; 724ab1f9dacSPaul Mackerras unsigned long size; 725cf950b7aSNathan Lynch int nid; 726ab1f9dacSPaul Mackerras int ranges; 727*b08a2a12SAlistair Popple const __be32 *memcell_buf; 728ab1f9dacSPaul Mackerras unsigned int len; 729ab1f9dacSPaul Mackerras 730e2eb6392SStephen Rothwell memcell_buf = of_get_property(memory, 731ba759485SMichael Ellerman "linux,usable-memory", &len); 732ba759485SMichael Ellerman if (!memcell_buf || len <= 0) 733e2eb6392SStephen Rothwell memcell_buf = of_get_property(memory, "reg", &len); 734ab1f9dacSPaul Mackerras if (!memcell_buf || len <= 0) 735ab1f9dacSPaul Mackerras continue; 736ab1f9dacSPaul Mackerras 737cc5d0189SBenjamin Herrenschmidt /* ranges in cell */ 738cc5d0189SBenjamin Herrenschmidt ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 739ab1f9dacSPaul Mackerras new_range: 740ab1f9dacSPaul Mackerras /* these are order-sensitive, and modify the buffer pointer */ 741237a0989SMike Kravetz start = read_n_cells(n_mem_addr_cells, &memcell_buf); 742237a0989SMike Kravetz size = read_n_cells(n_mem_size_cells, &memcell_buf); 743ab1f9dacSPaul Mackerras 744482ec7c4SNathan Lynch /* 745482ec7c4SNathan Lynch * Assumption: either all memory nodes or none will 746482ec7c4SNathan Lynch * have associativity properties. If none, then 747482ec7c4SNathan Lynch * everything goes to default_nid. 748482ec7c4SNathan Lynch */ 749953039c8SJeremy Kerr nid = of_node_to_nid_single(memory); 750482ec7c4SNathan Lynch if (nid < 0) 751482ec7c4SNathan Lynch nid = default_nid; 7521daa6d08SBalbir Singh 7531daa6d08SBalbir Singh fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); 754482ec7c4SNathan Lynch node_set_online(nid); 755ab1f9dacSPaul Mackerras 756ab1f9dacSPaul Mackerras if (!(size = numa_enforce_memory_limit(start, size))) { 757ab1f9dacSPaul Mackerras if (--ranges) 758ab1f9dacSPaul Mackerras goto new_range; 759ab1f9dacSPaul Mackerras else 760ab1f9dacSPaul Mackerras continue; 761ab1f9dacSPaul Mackerras } 762ab1f9dacSPaul Mackerras 7631d7cfe18STejun Heo memblock_set_node(start, size, nid); 764ab1f9dacSPaul Mackerras 765ab1f9dacSPaul Mackerras if (--ranges) 766ab1f9dacSPaul Mackerras goto new_range; 767ab1f9dacSPaul Mackerras } 768ab1f9dacSPaul Mackerras 7690204568aSPaul Mackerras /* 770dfbe93a2SAnton Blanchard * Now do the same thing for each MEMBLOCK listed in the 771dfbe93a2SAnton Blanchard * ibm,dynamic-memory property in the 772dfbe93a2SAnton Blanchard * ibm,dynamic-reconfiguration-memory node. 7730204568aSPaul Mackerras */ 7740204568aSPaul Mackerras memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 7750204568aSPaul Mackerras if (memory) 7760204568aSPaul Mackerras parse_drconf_memory(memory); 7770204568aSPaul Mackerras 778ab1f9dacSPaul Mackerras return 0; 779ab1f9dacSPaul Mackerras } 780ab1f9dacSPaul Mackerras 781ab1f9dacSPaul Mackerras static void __init setup_nonnuma(void) 782ab1f9dacSPaul Mackerras { 78395f72d1eSYinghai Lu unsigned long top_of_ram = memblock_end_of_DRAM(); 78495f72d1eSYinghai Lu unsigned long total_ram = memblock_phys_mem_size(); 785c67c3cb4SMel Gorman unsigned long start_pfn, end_pfn; 78628be7072SBenjamin Herrenschmidt unsigned int nid = 0; 78728be7072SBenjamin Herrenschmidt struct memblock_region *reg; 788ab1f9dacSPaul Mackerras 789e110b281SOlof Johansson printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 790ab1f9dacSPaul Mackerras top_of_ram, total_ram); 791e110b281SOlof Johansson printk(KERN_DEBUG "Memory hole size: %ldMB\n", 792ab1f9dacSPaul Mackerras (top_of_ram - total_ram) >> 20); 793ab1f9dacSPaul Mackerras 79428be7072SBenjamin Herrenschmidt for_each_memblock(memory, reg) { 795c7fc2de0SYinghai Lu start_pfn = memblock_region_memory_base_pfn(reg); 796c7fc2de0SYinghai Lu end_pfn = memblock_region_memory_end_pfn(reg); 7971daa6d08SBalbir Singh 7981daa6d08SBalbir Singh fake_numa_create_new_node(end_pfn, &nid); 7991d7cfe18STejun Heo memblock_set_node(PFN_PHYS(start_pfn), 8001d7cfe18STejun Heo PFN_PHYS(end_pfn - start_pfn), nid); 8011daa6d08SBalbir Singh node_set_online(nid); 802c67c3cb4SMel Gorman } 803ab1f9dacSPaul Mackerras } 804ab1f9dacSPaul Mackerras 8054b703a23SAnton Blanchard void __init dump_numa_cpu_topology(void) 8064b703a23SAnton Blanchard { 8074b703a23SAnton Blanchard unsigned int node; 8084b703a23SAnton Blanchard unsigned int cpu, count; 8094b703a23SAnton Blanchard 8104b703a23SAnton Blanchard if (min_common_depth == -1 || !numa_enabled) 8114b703a23SAnton Blanchard return; 8124b703a23SAnton Blanchard 8134b703a23SAnton Blanchard for_each_online_node(node) { 814e110b281SOlof Johansson printk(KERN_DEBUG "Node %d CPUs:", node); 8154b703a23SAnton Blanchard 8164b703a23SAnton Blanchard count = 0; 8174b703a23SAnton Blanchard /* 8184b703a23SAnton Blanchard * If we used a CPU iterator here we would miss printing 8194b703a23SAnton Blanchard * the holes in the cpumap. 8204b703a23SAnton Blanchard */ 82125863de0SAnton Blanchard for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 82225863de0SAnton Blanchard if (cpumask_test_cpu(cpu, 82325863de0SAnton Blanchard node_to_cpumask_map[node])) { 8244b703a23SAnton Blanchard if (count == 0) 8254b703a23SAnton Blanchard printk(" %u", cpu); 8264b703a23SAnton Blanchard ++count; 8274b703a23SAnton Blanchard } else { 8284b703a23SAnton Blanchard if (count > 1) 8294b703a23SAnton Blanchard printk("-%u", cpu - 1); 8304b703a23SAnton Blanchard count = 0; 8314b703a23SAnton Blanchard } 8324b703a23SAnton Blanchard } 8334b703a23SAnton Blanchard 8344b703a23SAnton Blanchard if (count > 1) 83525863de0SAnton Blanchard printk("-%u", nr_cpu_ids - 1); 8364b703a23SAnton Blanchard printk("\n"); 8374b703a23SAnton Blanchard } 8384b703a23SAnton Blanchard } 8394b703a23SAnton Blanchard 8404b703a23SAnton Blanchard static void __init dump_numa_memory_topology(void) 841ab1f9dacSPaul Mackerras { 842ab1f9dacSPaul Mackerras unsigned int node; 843ab1f9dacSPaul Mackerras unsigned int count; 844ab1f9dacSPaul Mackerras 845ab1f9dacSPaul Mackerras if (min_common_depth == -1 || !numa_enabled) 846ab1f9dacSPaul Mackerras return; 847ab1f9dacSPaul Mackerras 848ab1f9dacSPaul Mackerras for_each_online_node(node) { 849ab1f9dacSPaul Mackerras unsigned long i; 850ab1f9dacSPaul Mackerras 851e110b281SOlof Johansson printk(KERN_DEBUG "Node %d Memory:", node); 852ab1f9dacSPaul Mackerras 853ab1f9dacSPaul Mackerras count = 0; 854ab1f9dacSPaul Mackerras 85595f72d1eSYinghai Lu for (i = 0; i < memblock_end_of_DRAM(); 85645fb6ceaSAnton Blanchard i += (1 << SECTION_SIZE_BITS)) { 85745fb6ceaSAnton Blanchard if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { 858ab1f9dacSPaul Mackerras if (count == 0) 859ab1f9dacSPaul Mackerras printk(" 0x%lx", i); 860ab1f9dacSPaul Mackerras ++count; 861ab1f9dacSPaul Mackerras } else { 862ab1f9dacSPaul Mackerras if (count > 0) 863ab1f9dacSPaul Mackerras printk("-0x%lx", i); 864ab1f9dacSPaul Mackerras count = 0; 865ab1f9dacSPaul Mackerras } 866ab1f9dacSPaul Mackerras } 867ab1f9dacSPaul Mackerras 868ab1f9dacSPaul Mackerras if (count > 0) 869ab1f9dacSPaul Mackerras printk("-0x%lx", i); 870ab1f9dacSPaul Mackerras printk("\n"); 871ab1f9dacSPaul Mackerras } 872ab1f9dacSPaul Mackerras } 873ab1f9dacSPaul Mackerras 874ab1f9dacSPaul Mackerras /* 87595f72d1eSYinghai Lu * Allocate some memory, satisfying the memblock or bootmem allocator where 876ab1f9dacSPaul Mackerras * required. nid is the preferred node and end is the physical address of 877ab1f9dacSPaul Mackerras * the highest address in the node. 878ab1f9dacSPaul Mackerras * 8790be210fdSDave Hansen * Returns the virtual address of the memory. 880ab1f9dacSPaul Mackerras */ 881893473dfSDave Hansen static void __init *careful_zallocation(int nid, unsigned long size, 88245fb6ceaSAnton Blanchard unsigned long align, 88345fb6ceaSAnton Blanchard unsigned long end_pfn) 884ab1f9dacSPaul Mackerras { 8850be210fdSDave Hansen void *ret; 88645fb6ceaSAnton Blanchard int new_nid; 8870be210fdSDave Hansen unsigned long ret_paddr; 8880be210fdSDave Hansen 88995f72d1eSYinghai Lu ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); 890ab1f9dacSPaul Mackerras 891ab1f9dacSPaul Mackerras /* retry over all memory */ 8920be210fdSDave Hansen if (!ret_paddr) 89395f72d1eSYinghai Lu ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); 894ab1f9dacSPaul Mackerras 8950be210fdSDave Hansen if (!ret_paddr) 8965d21ea2bSDave Hansen panic("numa.c: cannot allocate %lu bytes for node %d", 897ab1f9dacSPaul Mackerras size, nid); 898ab1f9dacSPaul Mackerras 8990be210fdSDave Hansen ret = __va(ret_paddr); 9000be210fdSDave Hansen 901ab1f9dacSPaul Mackerras /* 902c555e520SDave Hansen * We initialize the nodes in numeric order: 0, 1, 2... 90395f72d1eSYinghai Lu * and hand over control from the MEMBLOCK allocator to the 904c555e520SDave Hansen * bootmem allocator. If this function is called for 905c555e520SDave Hansen * node 5, then we know that all nodes <5 are using the 90695f72d1eSYinghai Lu * bootmem allocator instead of the MEMBLOCK allocator. 907c555e520SDave Hansen * 908c555e520SDave Hansen * So, check the nid from which this allocation came 909c555e520SDave Hansen * and double check to see if we need to use bootmem 91095f72d1eSYinghai Lu * instead of the MEMBLOCK. We don't free the MEMBLOCK memory 911c555e520SDave Hansen * since it would be useless. 912ab1f9dacSPaul Mackerras */ 9130be210fdSDave Hansen new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); 91445fb6ceaSAnton Blanchard if (new_nid < nid) { 9150be210fdSDave Hansen ret = __alloc_bootmem_node(NODE_DATA(new_nid), 916ab1f9dacSPaul Mackerras size, align, 0); 917ab1f9dacSPaul Mackerras 9180be210fdSDave Hansen dbg("alloc_bootmem %p %lx\n", ret, size); 919ab1f9dacSPaul Mackerras } 920ab1f9dacSPaul Mackerras 921893473dfSDave Hansen memset(ret, 0, size); 9220be210fdSDave Hansen return ret; 923ab1f9dacSPaul Mackerras } 924ab1f9dacSPaul Mackerras 925061d19f2SPaul Gortmaker static struct notifier_block ppc64_numa_nb = { 92674b85f37SChandra Seetharaman .notifier_call = cpu_numa_callback, 92774b85f37SChandra Seetharaman .priority = 1 /* Must run before sched domains notifier. */ 92874b85f37SChandra Seetharaman }; 92974b85f37SChandra Seetharaman 93028e86bdbSDavid Rientjes static void __init mark_reserved_regions_for_nid(int nid) 931ab1f9dacSPaul Mackerras { 9324a618669SDave Hansen struct pglist_data *node = NODE_DATA(nid); 93328be7072SBenjamin Herrenschmidt struct memblock_region *reg; 934ab1f9dacSPaul Mackerras 93528be7072SBenjamin Herrenschmidt for_each_memblock(reserved, reg) { 93628be7072SBenjamin Herrenschmidt unsigned long physbase = reg->base; 93728be7072SBenjamin Herrenschmidt unsigned long size = reg->size; 9388f64e1f2SJon Tollefson unsigned long start_pfn = physbase >> PAGE_SHIFT; 93906eccea6SDave Hansen unsigned long end_pfn = PFN_UP(physbase + size); 9408f64e1f2SJon Tollefson struct node_active_region node_ar; 9414a618669SDave Hansen unsigned long node_end_pfn = node->node_start_pfn + 9424a618669SDave Hansen node->node_spanned_pages; 9434a618669SDave Hansen 9444a618669SDave Hansen /* 94595f72d1eSYinghai Lu * Check to make sure that this memblock.reserved area is 9464a618669SDave Hansen * within the bounds of the node that we care about. 9474a618669SDave Hansen * Checking the nid of the start and end points is not 9484a618669SDave Hansen * sufficient because the reserved area could span the 9494a618669SDave Hansen * entire node. 9504a618669SDave Hansen */ 9514a618669SDave Hansen if (end_pfn <= node->node_start_pfn || 9524a618669SDave Hansen start_pfn >= node_end_pfn) 9534a618669SDave Hansen continue; 954ab1f9dacSPaul Mackerras 9558f64e1f2SJon Tollefson get_node_active_region(start_pfn, &node_ar); 956e8170372SJon Tollefson while (start_pfn < end_pfn && 957e8170372SJon Tollefson node_ar.start_pfn < node_ar.end_pfn) { 958e8170372SJon Tollefson unsigned long reserve_size = size; 9598f64e1f2SJon Tollefson /* 9608f64e1f2SJon Tollefson * if reserved region extends past active region 9618f64e1f2SJon Tollefson * then trim size to active region 9628f64e1f2SJon Tollefson */ 9638f64e1f2SJon Tollefson if (end_pfn > node_ar.end_pfn) 964e8170372SJon Tollefson reserve_size = (node_ar.end_pfn << PAGE_SHIFT) 96506eccea6SDave Hansen - physbase; 966a4c74dddSDave Hansen /* 967a4c74dddSDave Hansen * Only worry about *this* node, others may not 968a4c74dddSDave Hansen * yet have valid NODE_DATA(). 969a4c74dddSDave Hansen */ 970a4c74dddSDave Hansen if (node_ar.nid == nid) { 971a4c74dddSDave Hansen dbg("reserve_bootmem %lx %lx nid=%d\n", 972a4c74dddSDave Hansen physbase, reserve_size, node_ar.nid); 973a4c74dddSDave Hansen reserve_bootmem_node(NODE_DATA(node_ar.nid), 974a4c74dddSDave Hansen physbase, reserve_size, 975a4c74dddSDave Hansen BOOTMEM_DEFAULT); 976a4c74dddSDave Hansen } 9778f64e1f2SJon Tollefson /* 9788f64e1f2SJon Tollefson * if reserved region is contained in the active region 9798f64e1f2SJon Tollefson * then done. 9808f64e1f2SJon Tollefson */ 9818f64e1f2SJon Tollefson if (end_pfn <= node_ar.end_pfn) 9828f64e1f2SJon Tollefson break; 9838f64e1f2SJon Tollefson 9848f64e1f2SJon Tollefson /* 9858f64e1f2SJon Tollefson * reserved region extends past the active region 9868f64e1f2SJon Tollefson * get next active region that contains this 9878f64e1f2SJon Tollefson * reserved region 9888f64e1f2SJon Tollefson */ 9898f64e1f2SJon Tollefson start_pfn = node_ar.end_pfn; 9908f64e1f2SJon Tollefson physbase = start_pfn << PAGE_SHIFT; 991e8170372SJon Tollefson size = size - reserve_size; 9928f64e1f2SJon Tollefson get_node_active_region(start_pfn, &node_ar); 993ab1f9dacSPaul Mackerras } 9944a618669SDave Hansen } 995ab1f9dacSPaul Mackerras } 9968f64e1f2SJon Tollefson 9974a618669SDave Hansen 9984a618669SDave Hansen void __init do_init_bootmem(void) 9994a618669SDave Hansen { 10004a618669SDave Hansen int nid; 10014a618669SDave Hansen 10024a618669SDave Hansen min_low_pfn = 0; 100395f72d1eSYinghai Lu max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 10044a618669SDave Hansen max_pfn = max_low_pfn; 10054a618669SDave Hansen 10064a618669SDave Hansen if (parse_numa_properties()) 10074a618669SDave Hansen setup_nonnuma(); 10084a618669SDave Hansen else 10094a618669SDave Hansen dump_numa_memory_topology(); 10104a618669SDave Hansen 10114a618669SDave Hansen for_each_online_node(nid) { 10124a618669SDave Hansen unsigned long start_pfn, end_pfn; 10130be210fdSDave Hansen void *bootmem_vaddr; 10144a618669SDave Hansen unsigned long bootmap_pages; 10154a618669SDave Hansen 10164a618669SDave Hansen get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 10174a618669SDave Hansen 10184a618669SDave Hansen /* 10194a618669SDave Hansen * Allocate the node structure node local if possible 10204a618669SDave Hansen * 10214a618669SDave Hansen * Be careful moving this around, as it relies on all 10224a618669SDave Hansen * previous nodes' bootmem to be initialized and have 10234a618669SDave Hansen * all reserved areas marked. 10244a618669SDave Hansen */ 1025893473dfSDave Hansen NODE_DATA(nid) = careful_zallocation(nid, 10264a618669SDave Hansen sizeof(struct pglist_data), 10274a618669SDave Hansen SMP_CACHE_BYTES, end_pfn); 10284a618669SDave Hansen 10294a618669SDave Hansen dbg("node %d\n", nid); 10304a618669SDave Hansen dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 10314a618669SDave Hansen 10324a618669SDave Hansen NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 10334a618669SDave Hansen NODE_DATA(nid)->node_start_pfn = start_pfn; 10344a618669SDave Hansen NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 10354a618669SDave Hansen 10364a618669SDave Hansen if (NODE_DATA(nid)->node_spanned_pages == 0) 10374a618669SDave Hansen continue; 10384a618669SDave Hansen 10394a618669SDave Hansen dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); 10404a618669SDave Hansen dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); 10414a618669SDave Hansen 10424a618669SDave Hansen bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 1043893473dfSDave Hansen bootmem_vaddr = careful_zallocation(nid, 10444a618669SDave Hansen bootmap_pages << PAGE_SHIFT, 10454a618669SDave Hansen PAGE_SIZE, end_pfn); 10464a618669SDave Hansen 10470be210fdSDave Hansen dbg("bootmap_vaddr = %p\n", bootmem_vaddr); 10484a618669SDave Hansen 10490be210fdSDave Hansen init_bootmem_node(NODE_DATA(nid), 10500be210fdSDave Hansen __pa(bootmem_vaddr) >> PAGE_SHIFT, 10514a618669SDave Hansen start_pfn, end_pfn); 10524a618669SDave Hansen 10534a618669SDave Hansen free_bootmem_with_active_regions(nid, end_pfn); 10544a618669SDave Hansen /* 10554a618669SDave Hansen * Be very careful about moving this around. Future 1056893473dfSDave Hansen * calls to careful_zallocation() depend on this getting 10574a618669SDave Hansen * done correctly. 10584a618669SDave Hansen */ 10594a618669SDave Hansen mark_reserved_regions_for_nid(nid); 10608f64e1f2SJon Tollefson sparse_memory_present_with_active_regions(nid); 1061ab1f9dacSPaul Mackerras } 1062d3f6204aSBenjamin Herrenschmidt 1063d3f6204aSBenjamin Herrenschmidt init_bootmem_done = 1; 106425863de0SAnton Blanchard 106525863de0SAnton Blanchard /* 106625863de0SAnton Blanchard * Now bootmem is initialised we can create the node to cpumask 106725863de0SAnton Blanchard * lookup tables and setup the cpu callback to populate them. 106825863de0SAnton Blanchard */ 106925863de0SAnton Blanchard setup_node_to_cpumask_map(); 107025863de0SAnton Blanchard 107125863de0SAnton Blanchard register_cpu_notifier(&ppc64_numa_nb); 107225863de0SAnton Blanchard cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, 107325863de0SAnton Blanchard (void *)(unsigned long)boot_cpuid); 10744a618669SDave Hansen } 1075ab1f9dacSPaul Mackerras 1076ab1f9dacSPaul Mackerras void __init paging_init(void) 1077ab1f9dacSPaul Mackerras { 10786391af17SMel Gorman unsigned long max_zone_pfns[MAX_NR_ZONES]; 10796391af17SMel Gorman memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 108095f72d1eSYinghai Lu max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; 1081c67c3cb4SMel Gorman free_area_init_nodes(max_zone_pfns); 1082ab1f9dacSPaul Mackerras } 1083ab1f9dacSPaul Mackerras 1084ab1f9dacSPaul Mackerras static int __init early_numa(char *p) 1085ab1f9dacSPaul Mackerras { 1086ab1f9dacSPaul Mackerras if (!p) 1087ab1f9dacSPaul Mackerras return 0; 1088ab1f9dacSPaul Mackerras 1089ab1f9dacSPaul Mackerras if (strstr(p, "off")) 1090ab1f9dacSPaul Mackerras numa_enabled = 0; 1091ab1f9dacSPaul Mackerras 1092ab1f9dacSPaul Mackerras if (strstr(p, "debug")) 1093ab1f9dacSPaul Mackerras numa_debug = 1; 1094ab1f9dacSPaul Mackerras 10951daa6d08SBalbir Singh p = strstr(p, "fake="); 10961daa6d08SBalbir Singh if (p) 10971daa6d08SBalbir Singh cmdline = p + strlen("fake="); 10981daa6d08SBalbir Singh 1099ab1f9dacSPaul Mackerras return 0; 1100ab1f9dacSPaul Mackerras } 1101ab1f9dacSPaul Mackerras early_param("numa", early_numa); 1102237a0989SMike Kravetz 1103237a0989SMike Kravetz #ifdef CONFIG_MEMORY_HOTPLUG 1104237a0989SMike Kravetz /* 11050f16ef7fSNathan Fontenot * Find the node associated with a hot added memory section for 11060f16ef7fSNathan Fontenot * memory represented in the device tree by the property 11070f16ef7fSNathan Fontenot * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. 11080db9360aSNathan Fontenot */ 11090db9360aSNathan Fontenot static int hot_add_drconf_scn_to_nid(struct device_node *memory, 11100db9360aSNathan Fontenot unsigned long scn_addr) 11110db9360aSNathan Fontenot { 1112*b08a2a12SAlistair Popple const __be32 *dm; 11130f16ef7fSNathan Fontenot unsigned int drconf_cell_cnt, rc; 11143fdfd990SBenjamin Herrenschmidt unsigned long lmb_size; 11150db9360aSNathan Fontenot struct assoc_arrays aa; 11160f16ef7fSNathan Fontenot int nid = -1; 11170db9360aSNathan Fontenot 11180f16ef7fSNathan Fontenot drconf_cell_cnt = of_get_drconf_memory(memory, &dm); 11190f16ef7fSNathan Fontenot if (!drconf_cell_cnt) 11200f16ef7fSNathan Fontenot return -1; 11210db9360aSNathan Fontenot 11223fdfd990SBenjamin Herrenschmidt lmb_size = of_get_lmb_size(memory); 11233fdfd990SBenjamin Herrenschmidt if (!lmb_size) 11240f16ef7fSNathan Fontenot return -1; 11250db9360aSNathan Fontenot 11260db9360aSNathan Fontenot rc = of_get_assoc_arrays(memory, &aa); 11270db9360aSNathan Fontenot if (rc) 11280f16ef7fSNathan Fontenot return -1; 11290db9360aSNathan Fontenot 11300f16ef7fSNathan Fontenot for (; drconf_cell_cnt != 0; --drconf_cell_cnt) { 11310db9360aSNathan Fontenot struct of_drconf_cell drmem; 11320db9360aSNathan Fontenot 11330db9360aSNathan Fontenot read_drconf_cell(&drmem, &dm); 11340db9360aSNathan Fontenot 11350db9360aSNathan Fontenot /* skip this block if it is reserved or not assigned to 11360db9360aSNathan Fontenot * this partition */ 11370db9360aSNathan Fontenot if ((drmem.flags & DRCONF_MEM_RESERVED) 11380db9360aSNathan Fontenot || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 11390db9360aSNathan Fontenot continue; 11400db9360aSNathan Fontenot 11410f16ef7fSNathan Fontenot if ((scn_addr < drmem.base_addr) 11423fdfd990SBenjamin Herrenschmidt || (scn_addr >= (drmem.base_addr + lmb_size))) 11430f16ef7fSNathan Fontenot continue; 11440db9360aSNathan Fontenot 11450f16ef7fSNathan Fontenot nid = of_drconf_to_nid_single(&drmem, &aa); 11460f16ef7fSNathan Fontenot break; 11470db9360aSNathan Fontenot } 11480db9360aSNathan Fontenot 11490f16ef7fSNathan Fontenot return nid; 11500db9360aSNathan Fontenot } 11510db9360aSNathan Fontenot 11520db9360aSNathan Fontenot /* 11530f16ef7fSNathan Fontenot * Find the node associated with a hot added memory section for memory 11540f16ef7fSNathan Fontenot * represented in the device tree as a node (i.e. memory@XXXX) for 115595f72d1eSYinghai Lu * each memblock. 1156237a0989SMike Kravetz */ 11570f16ef7fSNathan Fontenot int hot_add_node_scn_to_nid(unsigned long scn_addr) 1158237a0989SMike Kravetz { 115994db7c5eSAnton Blanchard struct device_node *memory; 11600f16ef7fSNathan Fontenot int nid = -1; 1161237a0989SMike Kravetz 116294db7c5eSAnton Blanchard for_each_node_by_type(memory, "memory") { 1163237a0989SMike Kravetz unsigned long start, size; 1164b226e462SMike Kravetz int ranges; 1165*b08a2a12SAlistair Popple const __be32 *memcell_buf; 1166237a0989SMike Kravetz unsigned int len; 1167237a0989SMike Kravetz 1168e2eb6392SStephen Rothwell memcell_buf = of_get_property(memory, "reg", &len); 1169237a0989SMike Kravetz if (!memcell_buf || len <= 0) 1170237a0989SMike Kravetz continue; 1171237a0989SMike Kravetz 1172cc5d0189SBenjamin Herrenschmidt /* ranges in cell */ 1173cc5d0189SBenjamin Herrenschmidt ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 11740f16ef7fSNathan Fontenot 11750f16ef7fSNathan Fontenot while (ranges--) { 1176237a0989SMike Kravetz start = read_n_cells(n_mem_addr_cells, &memcell_buf); 1177237a0989SMike Kravetz size = read_n_cells(n_mem_size_cells, &memcell_buf); 1178237a0989SMike Kravetz 11790f16ef7fSNathan Fontenot if ((scn_addr < start) || (scn_addr >= (start + size))) 11800f16ef7fSNathan Fontenot continue; 11810f16ef7fSNathan Fontenot 11820f16ef7fSNathan Fontenot nid = of_node_to_nid_single(memory); 11830f16ef7fSNathan Fontenot break; 11840f16ef7fSNathan Fontenot } 11850f16ef7fSNathan Fontenot 11860f16ef7fSNathan Fontenot if (nid >= 0) 11870f16ef7fSNathan Fontenot break; 11880f16ef7fSNathan Fontenot } 11890f16ef7fSNathan Fontenot 119060831842SAnton Blanchard of_node_put(memory); 119160831842SAnton Blanchard 11920db9360aSNathan Fontenot return nid; 1193237a0989SMike Kravetz } 1194237a0989SMike Kravetz 11950f16ef7fSNathan Fontenot /* 11960f16ef7fSNathan Fontenot * Find the node associated with a hot added memory section. Section 119795f72d1eSYinghai Lu * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that 119895f72d1eSYinghai Lu * sections are fully contained within a single MEMBLOCK. 11990f16ef7fSNathan Fontenot */ 12000f16ef7fSNathan Fontenot int hot_add_scn_to_nid(unsigned long scn_addr) 12010f16ef7fSNathan Fontenot { 12020f16ef7fSNathan Fontenot struct device_node *memory = NULL; 12030f16ef7fSNathan Fontenot int nid, found = 0; 12040f16ef7fSNathan Fontenot 12050f16ef7fSNathan Fontenot if (!numa_enabled || (min_common_depth < 0)) 120672c33688SH Hartley Sweeten return first_online_node; 12070f16ef7fSNathan Fontenot 12080f16ef7fSNathan Fontenot memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 12090f16ef7fSNathan Fontenot if (memory) { 12100f16ef7fSNathan Fontenot nid = hot_add_drconf_scn_to_nid(memory, scn_addr); 12110f16ef7fSNathan Fontenot of_node_put(memory); 12120f16ef7fSNathan Fontenot } else { 12130f16ef7fSNathan Fontenot nid = hot_add_node_scn_to_nid(scn_addr); 1214237a0989SMike Kravetz } 12150f16ef7fSNathan Fontenot 12160f16ef7fSNathan Fontenot if (nid < 0 || !node_online(nid)) 121772c33688SH Hartley Sweeten nid = first_online_node; 12180f16ef7fSNathan Fontenot 12190f16ef7fSNathan Fontenot if (NODE_DATA(nid)->node_spanned_pages) 12200f16ef7fSNathan Fontenot return nid; 12210f16ef7fSNathan Fontenot 12220f16ef7fSNathan Fontenot for_each_online_node(nid) { 12230f16ef7fSNathan Fontenot if (NODE_DATA(nid)->node_spanned_pages) { 12240f16ef7fSNathan Fontenot found = 1; 12250f16ef7fSNathan Fontenot break; 1226237a0989SMike Kravetz } 12270f16ef7fSNathan Fontenot } 12280f16ef7fSNathan Fontenot 12290f16ef7fSNathan Fontenot BUG_ON(!found); 12300f16ef7fSNathan Fontenot return nid; 12310f16ef7fSNathan Fontenot } 12320f16ef7fSNathan Fontenot 1233cd34206eSNishanth Aravamudan static u64 hot_add_drconf_memory_max(void) 1234cd34206eSNishanth Aravamudan { 1235cd34206eSNishanth Aravamudan struct device_node *memory = NULL; 1236cd34206eSNishanth Aravamudan unsigned int drconf_cell_cnt = 0; 1237cd34206eSNishanth Aravamudan u64 lmb_size = 0; 1238*b08a2a12SAlistair Popple const __be32 *dm = 0; 1239cd34206eSNishanth Aravamudan 1240cd34206eSNishanth Aravamudan memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1241cd34206eSNishanth Aravamudan if (memory) { 1242cd34206eSNishanth Aravamudan drconf_cell_cnt = of_get_drconf_memory(memory, &dm); 1243cd34206eSNishanth Aravamudan lmb_size = of_get_lmb_size(memory); 1244cd34206eSNishanth Aravamudan of_node_put(memory); 1245cd34206eSNishanth Aravamudan } 1246cd34206eSNishanth Aravamudan return lmb_size * drconf_cell_cnt; 1247cd34206eSNishanth Aravamudan } 1248cd34206eSNishanth Aravamudan 1249cd34206eSNishanth Aravamudan /* 1250cd34206eSNishanth Aravamudan * memory_hotplug_max - return max address of memory that may be added 1251cd34206eSNishanth Aravamudan * 1252cd34206eSNishanth Aravamudan * This is currently only used on systems that support drconfig memory 1253cd34206eSNishanth Aravamudan * hotplug. 1254cd34206eSNishanth Aravamudan */ 1255cd34206eSNishanth Aravamudan u64 memory_hotplug_max(void) 1256cd34206eSNishanth Aravamudan { 1257cd34206eSNishanth Aravamudan return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); 1258cd34206eSNishanth Aravamudan } 1259237a0989SMike Kravetz #endif /* CONFIG_MEMORY_HOTPLUG */ 12609eff1a38SJesse Larrew 1261bd03403aSJesse Larrew /* Virtual Processor Home Node (VPHN) support */ 126239bf990eSJesse Larrew #ifdef CONFIG_PPC_SPLPAR 126330c05350SNathan Fontenot struct topology_update_data { 126430c05350SNathan Fontenot struct topology_update_data *next; 126530c05350SNathan Fontenot unsigned int cpu; 126630c05350SNathan Fontenot int old_nid; 126730c05350SNathan Fontenot int new_nid; 126830c05350SNathan Fontenot }; 126930c05350SNathan Fontenot 12705de16699SAnton Blanchard static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; 12719eff1a38SJesse Larrew static cpumask_t cpu_associativity_changes_mask; 12729eff1a38SJesse Larrew static int vphn_enabled; 12735d88aa85SJesse Larrew static int prrn_enabled; 12745d88aa85SJesse Larrew static void reset_topology_timer(void); 12759eff1a38SJesse Larrew 12769eff1a38SJesse Larrew /* 12779eff1a38SJesse Larrew * Store the current values of the associativity change counters in the 12789eff1a38SJesse Larrew * hypervisor. 12799eff1a38SJesse Larrew */ 12809eff1a38SJesse Larrew static void setup_cpu_associativity_change_counters(void) 12819eff1a38SJesse Larrew { 1282cd9d6cc7SJesse Larrew int cpu; 12839eff1a38SJesse Larrew 12845de16699SAnton Blanchard /* The VPHN feature supports a maximum of 8 reference points */ 12855de16699SAnton Blanchard BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); 12865de16699SAnton Blanchard 12879eff1a38SJesse Larrew for_each_possible_cpu(cpu) { 1288cd9d6cc7SJesse Larrew int i; 12899eff1a38SJesse Larrew u8 *counts = vphn_cpu_change_counts[cpu]; 12909eff1a38SJesse Larrew volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 12919eff1a38SJesse Larrew 12925de16699SAnton Blanchard for (i = 0; i < distance_ref_points_depth; i++) 12939eff1a38SJesse Larrew counts[i] = hypervisor_counts[i]; 12949eff1a38SJesse Larrew } 12959eff1a38SJesse Larrew } 12969eff1a38SJesse Larrew 12979eff1a38SJesse Larrew /* 12989eff1a38SJesse Larrew * The hypervisor maintains a set of 8 associativity change counters in 12999eff1a38SJesse Larrew * the VPA of each cpu that correspond to the associativity levels in the 13009eff1a38SJesse Larrew * ibm,associativity-reference-points property. When an associativity 13019eff1a38SJesse Larrew * level changes, the corresponding counter is incremented. 13029eff1a38SJesse Larrew * 13039eff1a38SJesse Larrew * Set a bit in cpu_associativity_changes_mask for each cpu whose home 13049eff1a38SJesse Larrew * node associativity levels have changed. 13059eff1a38SJesse Larrew * 13069eff1a38SJesse Larrew * Returns the number of cpus with unhandled associativity changes. 13079eff1a38SJesse Larrew */ 13089eff1a38SJesse Larrew static int update_cpu_associativity_changes_mask(void) 13099eff1a38SJesse Larrew { 13105d88aa85SJesse Larrew int cpu; 13119eff1a38SJesse Larrew cpumask_t *changes = &cpu_associativity_changes_mask; 13129eff1a38SJesse Larrew 13139eff1a38SJesse Larrew for_each_possible_cpu(cpu) { 13149eff1a38SJesse Larrew int i, changed = 0; 13159eff1a38SJesse Larrew u8 *counts = vphn_cpu_change_counts[cpu]; 13169eff1a38SJesse Larrew volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 13179eff1a38SJesse Larrew 13185de16699SAnton Blanchard for (i = 0; i < distance_ref_points_depth; i++) { 1319d69043e8SAnton Blanchard if (hypervisor_counts[i] != counts[i]) { 13209eff1a38SJesse Larrew counts[i] = hypervisor_counts[i]; 13219eff1a38SJesse Larrew changed = 1; 13229eff1a38SJesse Larrew } 13239eff1a38SJesse Larrew } 13249eff1a38SJesse Larrew if (changed) { 13253be7db6aSRobert Jennings cpumask_or(changes, changes, cpu_sibling_mask(cpu)); 13263be7db6aSRobert Jennings cpu = cpu_last_thread_sibling(cpu); 13279eff1a38SJesse Larrew } 13289eff1a38SJesse Larrew } 13299eff1a38SJesse Larrew 13305d88aa85SJesse Larrew return cpumask_weight(changes); 13319eff1a38SJesse Larrew } 13329eff1a38SJesse Larrew 1333c0e5e46fSAnton Blanchard /* 1334c0e5e46fSAnton Blanchard * 6 64-bit registers unpacked into 12 32-bit associativity values. To form 1335c0e5e46fSAnton Blanchard * the complete property we have to add the length in the first cell. 1336c0e5e46fSAnton Blanchard */ 1337c0e5e46fSAnton Blanchard #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) 13389eff1a38SJesse Larrew 13399eff1a38SJesse Larrew /* 13409eff1a38SJesse Larrew * Convert the associativity domain numbers returned from the hypervisor 13419eff1a38SJesse Larrew * to the sequence they would appear in the ibm,associativity property. 13429eff1a38SJesse Larrew */ 1343*b08a2a12SAlistair Popple static int vphn_unpack_associativity(const long *packed, __be32 *unpacked) 13449eff1a38SJesse Larrew { 1345cd9d6cc7SJesse Larrew int i, nr_assoc_doms = 0; 1346*b08a2a12SAlistair Popple const __be16 *field = (const __be16 *) packed; 13479eff1a38SJesse Larrew 13489eff1a38SJesse Larrew #define VPHN_FIELD_UNUSED (0xffff) 13499eff1a38SJesse Larrew #define VPHN_FIELD_MSB (0x8000) 13509eff1a38SJesse Larrew #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) 13519eff1a38SJesse Larrew 1352c0e5e46fSAnton Blanchard for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { 1353*b08a2a12SAlistair Popple if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) { 13549eff1a38SJesse Larrew /* All significant fields processed, and remaining 13559eff1a38SJesse Larrew * fields contain the reserved value of all 1's. 13569eff1a38SJesse Larrew * Just store them. 13579eff1a38SJesse Larrew */ 1358*b08a2a12SAlistair Popple unpacked[i] = *((__be32 *)field); 13599eff1a38SJesse Larrew field += 2; 1360*b08a2a12SAlistair Popple } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) { 13619eff1a38SJesse Larrew /* Data is in the lower 15 bits of this field */ 1362*b08a2a12SAlistair Popple unpacked[i] = cpu_to_be32( 1363*b08a2a12SAlistair Popple be16_to_cpup(field) & VPHN_FIELD_MASK); 13649eff1a38SJesse Larrew field++; 13659eff1a38SJesse Larrew nr_assoc_doms++; 13667639adaaSJesse Larrew } else { 13679eff1a38SJesse Larrew /* Data is in the lower 15 bits of this field 13689eff1a38SJesse Larrew * concatenated with the next 16 bit field 13699eff1a38SJesse Larrew */ 1370*b08a2a12SAlistair Popple unpacked[i] = *((__be32 *)field); 13719eff1a38SJesse Larrew field += 2; 13729eff1a38SJesse Larrew nr_assoc_doms++; 13739eff1a38SJesse Larrew } 13749eff1a38SJesse Larrew } 13759eff1a38SJesse Larrew 1376c0e5e46fSAnton Blanchard /* The first cell contains the length of the property */ 1377*b08a2a12SAlistair Popple unpacked[0] = cpu_to_be32(nr_assoc_doms); 1378c0e5e46fSAnton Blanchard 13799eff1a38SJesse Larrew return nr_assoc_doms; 13809eff1a38SJesse Larrew } 13819eff1a38SJesse Larrew 13829eff1a38SJesse Larrew /* 13839eff1a38SJesse Larrew * Retrieve the new associativity information for a virtual processor's 13849eff1a38SJesse Larrew * home node. 13859eff1a38SJesse Larrew */ 1386*b08a2a12SAlistair Popple static long hcall_vphn(unsigned long cpu, __be32 *associativity) 13879eff1a38SJesse Larrew { 1388cd9d6cc7SJesse Larrew long rc; 13899eff1a38SJesse Larrew long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 13909eff1a38SJesse Larrew u64 flags = 1; 13919eff1a38SJesse Larrew int hwcpu = get_hard_smp_processor_id(cpu); 13929eff1a38SJesse Larrew 13939eff1a38SJesse Larrew rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); 13949eff1a38SJesse Larrew vphn_unpack_associativity(retbuf, associativity); 13959eff1a38SJesse Larrew 13969eff1a38SJesse Larrew return rc; 13979eff1a38SJesse Larrew } 13989eff1a38SJesse Larrew 13999eff1a38SJesse Larrew static long vphn_get_associativity(unsigned long cpu, 1400*b08a2a12SAlistair Popple __be32 *associativity) 14019eff1a38SJesse Larrew { 1402cd9d6cc7SJesse Larrew long rc; 14039eff1a38SJesse Larrew 14049eff1a38SJesse Larrew rc = hcall_vphn(cpu, associativity); 14059eff1a38SJesse Larrew 14069eff1a38SJesse Larrew switch (rc) { 14079eff1a38SJesse Larrew case H_FUNCTION: 14089eff1a38SJesse Larrew printk(KERN_INFO 14099eff1a38SJesse Larrew "VPHN is not supported. Disabling polling...\n"); 14109eff1a38SJesse Larrew stop_topology_update(); 14119eff1a38SJesse Larrew break; 14129eff1a38SJesse Larrew case H_HARDWARE: 14139eff1a38SJesse Larrew printk(KERN_ERR 14149eff1a38SJesse Larrew "hcall_vphn() experienced a hardware fault " 14159eff1a38SJesse Larrew "preventing VPHN. Disabling polling...\n"); 14169eff1a38SJesse Larrew stop_topology_update(); 14179eff1a38SJesse Larrew } 14189eff1a38SJesse Larrew 14199eff1a38SJesse Larrew return rc; 14209eff1a38SJesse Larrew } 14219eff1a38SJesse Larrew 14229eff1a38SJesse Larrew /* 142330c05350SNathan Fontenot * Update the CPU maps and sysfs entries for a single CPU when its NUMA 142430c05350SNathan Fontenot * characteristics change. This function doesn't perform any locking and is 142530c05350SNathan Fontenot * only safe to call from stop_machine(). 142630c05350SNathan Fontenot */ 142730c05350SNathan Fontenot static int update_cpu_topology(void *data) 142830c05350SNathan Fontenot { 142930c05350SNathan Fontenot struct topology_update_data *update; 143030c05350SNathan Fontenot unsigned long cpu; 143130c05350SNathan Fontenot 143230c05350SNathan Fontenot if (!data) 143330c05350SNathan Fontenot return -EINVAL; 143430c05350SNathan Fontenot 14353be7db6aSRobert Jennings cpu = smp_processor_id(); 143630c05350SNathan Fontenot 143730c05350SNathan Fontenot for (update = data; update; update = update->next) { 143830c05350SNathan Fontenot if (cpu != update->cpu) 143930c05350SNathan Fontenot continue; 144030c05350SNathan Fontenot 144130c05350SNathan Fontenot unmap_cpu_from_node(update->cpu); 144230c05350SNathan Fontenot map_cpu_to_node(update->cpu, update->new_nid); 1443176bbf14SJesse Larrew vdso_getcpu_init(); 144430c05350SNathan Fontenot } 144530c05350SNathan Fontenot 144630c05350SNathan Fontenot return 0; 144730c05350SNathan Fontenot } 144830c05350SNathan Fontenot 144930c05350SNathan Fontenot /* 14509eff1a38SJesse Larrew * Update the node maps and sysfs entries for each cpu whose home node 145179c5fcebSJesse Larrew * has changed. Returns 1 when the topology has changed, and 0 otherwise. 14529eff1a38SJesse Larrew */ 14539eff1a38SJesse Larrew int arch_update_cpu_topology(void) 14549eff1a38SJesse Larrew { 14553be7db6aSRobert Jennings unsigned int cpu, sibling, changed = 0; 145630c05350SNathan Fontenot struct topology_update_data *updates, *ud; 1457*b08a2a12SAlistair Popple __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1458176bbf14SJesse Larrew cpumask_t updated_cpus; 14598a25a2fdSKay Sievers struct device *dev; 14603be7db6aSRobert Jennings int weight, new_nid, i = 0; 146130c05350SNathan Fontenot 146230c05350SNathan Fontenot weight = cpumask_weight(&cpu_associativity_changes_mask); 146330c05350SNathan Fontenot if (!weight) 146430c05350SNathan Fontenot return 0; 146530c05350SNathan Fontenot 146630c05350SNathan Fontenot updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL); 146730c05350SNathan Fontenot if (!updates) 146830c05350SNathan Fontenot return 0; 14699eff1a38SJesse Larrew 1470176bbf14SJesse Larrew cpumask_clear(&updated_cpus); 14719eff1a38SJesse Larrew 1472104699c0SKOSAKI Motohiro for_each_cpu(cpu, &cpu_associativity_changes_mask) { 14733be7db6aSRobert Jennings /* 14743be7db6aSRobert Jennings * If siblings aren't flagged for changes, updates list 14753be7db6aSRobert Jennings * will be too short. Skip on this update and set for next 14763be7db6aSRobert Jennings * update. 14773be7db6aSRobert Jennings */ 14783be7db6aSRobert Jennings if (!cpumask_subset(cpu_sibling_mask(cpu), 14793be7db6aSRobert Jennings &cpu_associativity_changes_mask)) { 14803be7db6aSRobert Jennings pr_info("Sibling bits not set for associativity " 14813be7db6aSRobert Jennings "change, cpu%d\n", cpu); 14823be7db6aSRobert Jennings cpumask_or(&cpu_associativity_changes_mask, 14833be7db6aSRobert Jennings &cpu_associativity_changes_mask, 14843be7db6aSRobert Jennings cpu_sibling_mask(cpu)); 14853be7db6aSRobert Jennings cpu = cpu_last_thread_sibling(cpu); 14863be7db6aSRobert Jennings continue; 14873be7db6aSRobert Jennings } 14883be7db6aSRobert Jennings 14893be7db6aSRobert Jennings /* Use associativity from first thread for all siblings */ 14909eff1a38SJesse Larrew vphn_get_associativity(cpu, associativity); 14913be7db6aSRobert Jennings new_nid = associativity_to_nid(associativity); 14923be7db6aSRobert Jennings if (new_nid < 0 || !node_online(new_nid)) 14933be7db6aSRobert Jennings new_nid = first_online_node; 14949eff1a38SJesse Larrew 14953be7db6aSRobert Jennings if (new_nid == numa_cpu_lookup_table[cpu]) { 14963be7db6aSRobert Jennings cpumask_andnot(&cpu_associativity_changes_mask, 14973be7db6aSRobert Jennings &cpu_associativity_changes_mask, 14983be7db6aSRobert Jennings cpu_sibling_mask(cpu)); 14993be7db6aSRobert Jennings cpu = cpu_last_thread_sibling(cpu); 15003be7db6aSRobert Jennings continue; 15013be7db6aSRobert Jennings } 15029eff1a38SJesse Larrew 15033be7db6aSRobert Jennings for_each_cpu(sibling, cpu_sibling_mask(cpu)) { 15043be7db6aSRobert Jennings ud = &updates[i++]; 15053be7db6aSRobert Jennings ud->cpu = sibling; 15063be7db6aSRobert Jennings ud->new_nid = new_nid; 15073be7db6aSRobert Jennings ud->old_nid = numa_cpu_lookup_table[sibling]; 15083be7db6aSRobert Jennings cpumask_set_cpu(sibling, &updated_cpus); 150930c05350SNathan Fontenot if (i < weight) 151030c05350SNathan Fontenot ud->next = &updates[i]; 151130c05350SNathan Fontenot } 15123be7db6aSRobert Jennings cpu = cpu_last_thread_sibling(cpu); 15133be7db6aSRobert Jennings } 15149eff1a38SJesse Larrew 1515176bbf14SJesse Larrew stop_machine(update_cpu_topology, &updates[0], &updated_cpus); 151630c05350SNathan Fontenot 151730c05350SNathan Fontenot for (ud = &updates[0]; ud; ud = ud->next) { 1518dd023217SNathan Fontenot unregister_cpu_under_node(ud->cpu, ud->old_nid); 1519dd023217SNathan Fontenot register_cpu_under_node(ud->cpu, ud->new_nid); 1520dd023217SNathan Fontenot 152130c05350SNathan Fontenot dev = get_cpu_device(ud->cpu); 15228a25a2fdSKay Sievers if (dev) 15238a25a2fdSKay Sievers kobject_uevent(&dev->kobj, KOBJ_CHANGE); 152430c05350SNathan Fontenot cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask); 152579c5fcebSJesse Larrew changed = 1; 15269eff1a38SJesse Larrew } 15279eff1a38SJesse Larrew 152830c05350SNathan Fontenot kfree(updates); 152979c5fcebSJesse Larrew return changed; 15309eff1a38SJesse Larrew } 15319eff1a38SJesse Larrew 15329eff1a38SJesse Larrew static void topology_work_fn(struct work_struct *work) 15339eff1a38SJesse Larrew { 15349eff1a38SJesse Larrew rebuild_sched_domains(); 15359eff1a38SJesse Larrew } 15369eff1a38SJesse Larrew static DECLARE_WORK(topology_work, topology_work_fn); 15379eff1a38SJesse Larrew 15389eff1a38SJesse Larrew void topology_schedule_update(void) 15399eff1a38SJesse Larrew { 15409eff1a38SJesse Larrew schedule_work(&topology_work); 15419eff1a38SJesse Larrew } 15429eff1a38SJesse Larrew 15439eff1a38SJesse Larrew static void topology_timer_fn(unsigned long ignored) 15449eff1a38SJesse Larrew { 15455d88aa85SJesse Larrew if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask)) 15465d88aa85SJesse Larrew topology_schedule_update(); 15475d88aa85SJesse Larrew else if (vphn_enabled) { 15489eff1a38SJesse Larrew if (update_cpu_associativity_changes_mask() > 0) 15499eff1a38SJesse Larrew topology_schedule_update(); 15505d88aa85SJesse Larrew reset_topology_timer(); 15515d88aa85SJesse Larrew } 15529eff1a38SJesse Larrew } 15539eff1a38SJesse Larrew static struct timer_list topology_timer = 15549eff1a38SJesse Larrew TIMER_INITIALIZER(topology_timer_fn, 0, 0); 15559eff1a38SJesse Larrew 15565d88aa85SJesse Larrew static void reset_topology_timer(void) 15579eff1a38SJesse Larrew { 15589eff1a38SJesse Larrew topology_timer.data = 0; 15599eff1a38SJesse Larrew topology_timer.expires = jiffies + 60 * HZ; 15605d88aa85SJesse Larrew mod_timer(&topology_timer, topology_timer.expires); 15619eff1a38SJesse Larrew } 15629eff1a38SJesse Larrew 1563601abdc3SNathan Fontenot #ifdef CONFIG_SMP 1564601abdc3SNathan Fontenot 15655d88aa85SJesse Larrew static void stage_topology_update(int core_id) 15665d88aa85SJesse Larrew { 15675d88aa85SJesse Larrew cpumask_or(&cpu_associativity_changes_mask, 15685d88aa85SJesse Larrew &cpu_associativity_changes_mask, cpu_sibling_mask(core_id)); 15695d88aa85SJesse Larrew reset_topology_timer(); 15705d88aa85SJesse Larrew } 15715d88aa85SJesse Larrew 15725d88aa85SJesse Larrew static int dt_update_callback(struct notifier_block *nb, 15735d88aa85SJesse Larrew unsigned long action, void *data) 15745d88aa85SJesse Larrew { 15755d88aa85SJesse Larrew struct of_prop_reconfig *update; 15765d88aa85SJesse Larrew int rc = NOTIFY_DONE; 15775d88aa85SJesse Larrew 15785d88aa85SJesse Larrew switch (action) { 15795d88aa85SJesse Larrew case OF_RECONFIG_UPDATE_PROPERTY: 15805d88aa85SJesse Larrew update = (struct of_prop_reconfig *)data; 158130c05350SNathan Fontenot if (!of_prop_cmp(update->dn->type, "cpu") && 158230c05350SNathan Fontenot !of_prop_cmp(update->prop->name, "ibm,associativity")) { 15835d88aa85SJesse Larrew u32 core_id; 15845d88aa85SJesse Larrew of_property_read_u32(update->dn, "reg", &core_id); 15855d88aa85SJesse Larrew stage_topology_update(core_id); 15865d88aa85SJesse Larrew rc = NOTIFY_OK; 15875d88aa85SJesse Larrew } 15885d88aa85SJesse Larrew break; 15895d88aa85SJesse Larrew } 15905d88aa85SJesse Larrew 15915d88aa85SJesse Larrew return rc; 15925d88aa85SJesse Larrew } 15935d88aa85SJesse Larrew 15945d88aa85SJesse Larrew static struct notifier_block dt_update_nb = { 15955d88aa85SJesse Larrew .notifier_call = dt_update_callback, 15965d88aa85SJesse Larrew }; 15975d88aa85SJesse Larrew 1598601abdc3SNathan Fontenot #endif 1599601abdc3SNathan Fontenot 16009eff1a38SJesse Larrew /* 16015d88aa85SJesse Larrew * Start polling for associativity changes. 16029eff1a38SJesse Larrew */ 16039eff1a38SJesse Larrew int start_topology_update(void) 16049eff1a38SJesse Larrew { 16059eff1a38SJesse Larrew int rc = 0; 16069eff1a38SJesse Larrew 16075d88aa85SJesse Larrew if (firmware_has_feature(FW_FEATURE_PRRN)) { 16085d88aa85SJesse Larrew if (!prrn_enabled) { 16095d88aa85SJesse Larrew prrn_enabled = 1; 16105d88aa85SJesse Larrew vphn_enabled = 0; 1611601abdc3SNathan Fontenot #ifdef CONFIG_SMP 16125d88aa85SJesse Larrew rc = of_reconfig_notifier_register(&dt_update_nb); 1613601abdc3SNathan Fontenot #endif 16145d88aa85SJesse Larrew } 1615b7abef04SJesse Larrew } else if (firmware_has_feature(FW_FEATURE_VPHN) && 1616f13c13a0SAnton Blanchard lppaca_shared_proc(get_lppaca())) { 16175d88aa85SJesse Larrew if (!vphn_enabled) { 16185d88aa85SJesse Larrew prrn_enabled = 0; 16199eff1a38SJesse Larrew vphn_enabled = 1; 16209eff1a38SJesse Larrew setup_cpu_associativity_change_counters(); 16219eff1a38SJesse Larrew init_timer_deferrable(&topology_timer); 16225d88aa85SJesse Larrew reset_topology_timer(); 16235d88aa85SJesse Larrew } 16249eff1a38SJesse Larrew } 16259eff1a38SJesse Larrew 16269eff1a38SJesse Larrew return rc; 16279eff1a38SJesse Larrew } 16289eff1a38SJesse Larrew 16299eff1a38SJesse Larrew /* 16309eff1a38SJesse Larrew * Disable polling for VPHN associativity changes. 16319eff1a38SJesse Larrew */ 16329eff1a38SJesse Larrew int stop_topology_update(void) 16339eff1a38SJesse Larrew { 16345d88aa85SJesse Larrew int rc = 0; 16355d88aa85SJesse Larrew 16365d88aa85SJesse Larrew if (prrn_enabled) { 16375d88aa85SJesse Larrew prrn_enabled = 0; 1638601abdc3SNathan Fontenot #ifdef CONFIG_SMP 16395d88aa85SJesse Larrew rc = of_reconfig_notifier_unregister(&dt_update_nb); 1640601abdc3SNathan Fontenot #endif 16415d88aa85SJesse Larrew } else if (vphn_enabled) { 16429eff1a38SJesse Larrew vphn_enabled = 0; 16435d88aa85SJesse Larrew rc = del_timer_sync(&topology_timer); 16449eff1a38SJesse Larrew } 16455d88aa85SJesse Larrew 16465d88aa85SJesse Larrew return rc; 1647ab1f9dacSPaul Mackerras } 1648e04fa612SNathan Fontenot 1649e04fa612SNathan Fontenot int prrn_is_enabled(void) 1650e04fa612SNathan Fontenot { 1651e04fa612SNathan Fontenot return prrn_enabled; 1652e04fa612SNathan Fontenot } 1653e04fa612SNathan Fontenot 1654e04fa612SNathan Fontenot static int topology_read(struct seq_file *file, void *v) 1655e04fa612SNathan Fontenot { 1656e04fa612SNathan Fontenot if (vphn_enabled || prrn_enabled) 1657e04fa612SNathan Fontenot seq_puts(file, "on\n"); 1658e04fa612SNathan Fontenot else 1659e04fa612SNathan Fontenot seq_puts(file, "off\n"); 1660e04fa612SNathan Fontenot 1661e04fa612SNathan Fontenot return 0; 1662e04fa612SNathan Fontenot } 1663e04fa612SNathan Fontenot 1664e04fa612SNathan Fontenot static int topology_open(struct inode *inode, struct file *file) 1665e04fa612SNathan Fontenot { 1666e04fa612SNathan Fontenot return single_open(file, topology_read, NULL); 1667e04fa612SNathan Fontenot } 1668e04fa612SNathan Fontenot 1669e04fa612SNathan Fontenot static ssize_t topology_write(struct file *file, const char __user *buf, 1670e04fa612SNathan Fontenot size_t count, loff_t *off) 1671e04fa612SNathan Fontenot { 1672e04fa612SNathan Fontenot char kbuf[4]; /* "on" or "off" plus null. */ 1673e04fa612SNathan Fontenot int read_len; 1674e04fa612SNathan Fontenot 1675e04fa612SNathan Fontenot read_len = count < 3 ? count : 3; 1676e04fa612SNathan Fontenot if (copy_from_user(kbuf, buf, read_len)) 1677e04fa612SNathan Fontenot return -EINVAL; 1678e04fa612SNathan Fontenot 1679e04fa612SNathan Fontenot kbuf[read_len] = '\0'; 1680e04fa612SNathan Fontenot 1681e04fa612SNathan Fontenot if (!strncmp(kbuf, "on", 2)) 1682e04fa612SNathan Fontenot start_topology_update(); 1683e04fa612SNathan Fontenot else if (!strncmp(kbuf, "off", 3)) 1684e04fa612SNathan Fontenot stop_topology_update(); 1685e04fa612SNathan Fontenot else 1686e04fa612SNathan Fontenot return -EINVAL; 1687e04fa612SNathan Fontenot 1688e04fa612SNathan Fontenot return count; 1689e04fa612SNathan Fontenot } 1690e04fa612SNathan Fontenot 1691e04fa612SNathan Fontenot static const struct file_operations topology_ops = { 1692e04fa612SNathan Fontenot .read = seq_read, 1693e04fa612SNathan Fontenot .write = topology_write, 1694e04fa612SNathan Fontenot .open = topology_open, 1695e04fa612SNathan Fontenot .release = single_release 1696e04fa612SNathan Fontenot }; 1697e04fa612SNathan Fontenot 1698e04fa612SNathan Fontenot static int topology_update_init(void) 1699e04fa612SNathan Fontenot { 1700e04fa612SNathan Fontenot start_topology_update(); 1701e04fa612SNathan Fontenot proc_create("powerpc/topology_updates", 644, NULL, &topology_ops); 1702e04fa612SNathan Fontenot 1703e04fa612SNathan Fontenot return 0; 1704e04fa612SNathan Fontenot } 1705e04fa612SNathan Fontenot device_initcall(topology_update_init); 170639bf990eSJesse Larrew #endif /* CONFIG_PPC_SPLPAR */ 1707