1378b39a4SYinghai Lu #include <linux/kernel.h> 2378b39a4SYinghai Lu #include <linux/module.h> 3378b39a4SYinghai Lu #include <linux/init.h> 4378b39a4SYinghai Lu #include <linux/bootmem.h> 5378b39a4SYinghai Lu #include <linux/percpu.h> 6378b39a4SYinghai Lu #include <linux/kexec.h> 7378b39a4SYinghai Lu #include <linux/crash_dump.h> 8378b39a4SYinghai Lu #include <asm/smp.h> 9378b39a4SYinghai Lu #include <asm/percpu.h> 10378b39a4SYinghai Lu #include <asm/sections.h> 11378b39a4SYinghai Lu #include <asm/processor.h> 12378b39a4SYinghai Lu #include <asm/setup.h> 13378b39a4SYinghai Lu #include <asm/topology.h> 14378b39a4SYinghai Lu #include <asm/mpspec.h> 15378b39a4SYinghai Lu #include <asm/apicdef.h> 16378b39a4SYinghai Lu #include <asm/highmem.h> 17378b39a4SYinghai Lu 18378b39a4SYinghai Lu #ifdef CONFIG_X86_LOCAL_APIC 19378b39a4SYinghai Lu unsigned int num_processors; 20378b39a4SYinghai Lu unsigned disabled_cpus __cpuinitdata; 21378b39a4SYinghai Lu /* Processor that is doing the boot up */ 22378b39a4SYinghai Lu unsigned int boot_cpu_physical_apicid = -1U; 23378b39a4SYinghai Lu unsigned int max_physical_apicid; 24378b39a4SYinghai Lu EXPORT_SYMBOL(boot_cpu_physical_apicid); 25378b39a4SYinghai Lu 26378b39a4SYinghai Lu /* Bitmask of physically existing CPUs */ 27378b39a4SYinghai Lu physid_mask_t phys_cpu_present_map; 28378b39a4SYinghai Lu #endif 29378b39a4SYinghai Lu 30378b39a4SYinghai Lu /* map cpu index to physical APIC ID */ 31378b39a4SYinghai Lu DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); 32378b39a4SYinghai Lu DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); 33378b39a4SYinghai Lu EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); 34378b39a4SYinghai Lu EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); 35378b39a4SYinghai Lu 36378b39a4SYinghai Lu #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 37378b39a4SYinghai Lu #define X86_64_NUMA 1 38378b39a4SYinghai Lu 39378b39a4SYinghai Lu /* map cpu index to node index */ 40378b39a4SYinghai Lu DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 41378b39a4SYinghai Lu EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 42378b39a4SYinghai Lu 43378b39a4SYinghai Lu /* which logical CPUs are on which nodes */ 44378b39a4SYinghai Lu cpumask_t *node_to_cpumask_map; 45378b39a4SYinghai Lu EXPORT_SYMBOL(node_to_cpumask_map); 46378b39a4SYinghai Lu 47378b39a4SYinghai Lu /* setup node_to_cpumask_map */ 48378b39a4SYinghai Lu static void __init setup_node_to_cpumask_map(void); 49378b39a4SYinghai Lu 50378b39a4SYinghai Lu #else 51378b39a4SYinghai Lu static inline void setup_node_to_cpumask_map(void) { } 52378b39a4SYinghai Lu #endif 53378b39a4SYinghai Lu 54378b39a4SYinghai Lu #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) 55378b39a4SYinghai Lu /* 56378b39a4SYinghai Lu * Copy data used in early init routines from the initial arrays to the 57378b39a4SYinghai Lu * per cpu data areas. These arrays then become expendable and the 58378b39a4SYinghai Lu * *_early_ptr's are zeroed indicating that the static arrays are gone. 59378b39a4SYinghai Lu */ 60378b39a4SYinghai Lu static void __init setup_per_cpu_maps(void) 61378b39a4SYinghai Lu { 62378b39a4SYinghai Lu int cpu; 63378b39a4SYinghai Lu 64378b39a4SYinghai Lu for_each_possible_cpu(cpu) { 65378b39a4SYinghai Lu per_cpu(x86_cpu_to_apicid, cpu) = 66378b39a4SYinghai Lu early_per_cpu_map(x86_cpu_to_apicid, cpu); 67378b39a4SYinghai Lu per_cpu(x86_bios_cpu_apicid, cpu) = 68378b39a4SYinghai Lu early_per_cpu_map(x86_bios_cpu_apicid, cpu); 69378b39a4SYinghai Lu #ifdef X86_64_NUMA 70378b39a4SYinghai Lu per_cpu(x86_cpu_to_node_map, cpu) = 71378b39a4SYinghai Lu early_per_cpu_map(x86_cpu_to_node_map, cpu); 72378b39a4SYinghai Lu #endif 73378b39a4SYinghai Lu } 74378b39a4SYinghai Lu 75378b39a4SYinghai Lu /* indicate the early static arrays will soon be gone */ 76378b39a4SYinghai Lu early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 77378b39a4SYinghai Lu early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; 78378b39a4SYinghai Lu #ifdef X86_64_NUMA 79378b39a4SYinghai Lu early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; 80378b39a4SYinghai Lu #endif 81378b39a4SYinghai Lu } 82378b39a4SYinghai Lu 83378b39a4SYinghai Lu #ifdef CONFIG_X86_32 84378b39a4SYinghai Lu /* 85378b39a4SYinghai Lu * Great future not-so-futuristic plan: make i386 and x86_64 do it 86378b39a4SYinghai Lu * the same way 87378b39a4SYinghai Lu */ 88378b39a4SYinghai Lu unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 89378b39a4SYinghai Lu EXPORT_SYMBOL(__per_cpu_offset); 90378b39a4SYinghai Lu static inline void setup_cpu_pda_map(void) { } 91378b39a4SYinghai Lu 92378b39a4SYinghai Lu #elif !defined(CONFIG_SMP) 93378b39a4SYinghai Lu static inline void setup_cpu_pda_map(void) { } 94378b39a4SYinghai Lu 95378b39a4SYinghai Lu #else /* CONFIG_SMP && CONFIG_X86_64 */ 96378b39a4SYinghai Lu 97378b39a4SYinghai Lu /* 98378b39a4SYinghai Lu * Allocate cpu_pda pointer table and array via alloc_bootmem. 99378b39a4SYinghai Lu */ 100378b39a4SYinghai Lu static void __init setup_cpu_pda_map(void) 101378b39a4SYinghai Lu { 102378b39a4SYinghai Lu char *pda; 103378b39a4SYinghai Lu struct x8664_pda **new_cpu_pda; 104378b39a4SYinghai Lu unsigned long size; 105378b39a4SYinghai Lu int cpu; 106378b39a4SYinghai Lu 107378b39a4SYinghai Lu size = roundup(sizeof(struct x8664_pda), cache_line_size()); 108378b39a4SYinghai Lu 109378b39a4SYinghai Lu /* allocate cpu_pda array and pointer table */ 110378b39a4SYinghai Lu { 111378b39a4SYinghai Lu unsigned long tsize = nr_cpu_ids * sizeof(void *); 112378b39a4SYinghai Lu unsigned long asize = size * (nr_cpu_ids - 1); 113378b39a4SYinghai Lu 114378b39a4SYinghai Lu tsize = roundup(tsize, cache_line_size()); 115378b39a4SYinghai Lu new_cpu_pda = alloc_bootmem(tsize + asize); 116378b39a4SYinghai Lu pda = (char *)new_cpu_pda + tsize; 117378b39a4SYinghai Lu } 118378b39a4SYinghai Lu 119378b39a4SYinghai Lu /* initialize pointer table to static pda's */ 120378b39a4SYinghai Lu for_each_possible_cpu(cpu) { 121378b39a4SYinghai Lu if (cpu == 0) { 122378b39a4SYinghai Lu /* leave boot cpu pda in place */ 123378b39a4SYinghai Lu new_cpu_pda[0] = cpu_pda(0); 124378b39a4SYinghai Lu continue; 125378b39a4SYinghai Lu } 126378b39a4SYinghai Lu new_cpu_pda[cpu] = (struct x8664_pda *)pda; 127378b39a4SYinghai Lu new_cpu_pda[cpu]->in_bootmem = 1; 128378b39a4SYinghai Lu pda += size; 129378b39a4SYinghai Lu } 130378b39a4SYinghai Lu 131378b39a4SYinghai Lu /* point to new pointer table */ 132378b39a4SYinghai Lu _cpu_pda = new_cpu_pda; 133378b39a4SYinghai Lu } 134378b39a4SYinghai Lu #endif 135378b39a4SYinghai Lu 136378b39a4SYinghai Lu /* 137378b39a4SYinghai Lu * Great future plan: 138378b39a4SYinghai Lu * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. 139378b39a4SYinghai Lu * Always point %gs to its beginning 140378b39a4SYinghai Lu */ 141378b39a4SYinghai Lu void __init setup_per_cpu_areas(void) 142378b39a4SYinghai Lu { 143d6c88a50SThomas Gleixner ssize_t size, old_size; 144378b39a4SYinghai Lu char *ptr; 145378b39a4SYinghai Lu int cpu; 1461f8ff037SYinghai Lu unsigned long align = 1; 147378b39a4SYinghai Lu 148378b39a4SYinghai Lu /* Setup cpu_pda map */ 149378b39a4SYinghai Lu setup_cpu_pda_map(); 150378b39a4SYinghai Lu 151378b39a4SYinghai Lu /* Copy section for each CPU (we discard the original) */ 1521f3fcd4bSYinghai Lu old_size = PERCPU_ENOUGH_ROOM; 1531f8ff037SYinghai Lu align = max_t(unsigned long, PAGE_SIZE, align); 154d6c88a50SThomas Gleixner size = roundup(old_size, align); 155*a1681965SMike Travis 156*a1681965SMike Travis printk(KERN_INFO 157*a1681965SMike Travis "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 158*a1681965SMike Travis NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 159*a1681965SMike Travis 160378b39a4SYinghai Lu printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 161378b39a4SYinghai Lu size); 162378b39a4SYinghai Lu 163378b39a4SYinghai Lu for_each_possible_cpu(cpu) { 164378b39a4SYinghai Lu #ifndef CONFIG_NEED_MULTIPLE_NODES 1651f8ff037SYinghai Lu ptr = __alloc_bootmem(size, align, 1661f8ff037SYinghai Lu __pa(MAX_DMA_ADDRESS)); 167378b39a4SYinghai Lu #else 168378b39a4SYinghai Lu int node = early_cpu_to_node(cpu); 169378b39a4SYinghai Lu if (!node_online(node) || !NODE_DATA(node)) { 1701f8ff037SYinghai Lu ptr = __alloc_bootmem(size, align, 1711f8ff037SYinghai Lu __pa(MAX_DMA_ADDRESS)); 172378b39a4SYinghai Lu printk(KERN_INFO 173378b39a4SYinghai Lu "cpu %d has no node %d or node-local memory\n", 174378b39a4SYinghai Lu cpu, node); 175a677f58aSYinghai Lu if (ptr) 176*a1681965SMike Travis printk(KERN_DEBUG 177*a1681965SMike Travis "per cpu data for cpu%d at %016lx\n", 178a677f58aSYinghai Lu cpu, __pa(ptr)); 179378b39a4SYinghai Lu } 180a677f58aSYinghai Lu else { 1811f8ff037SYinghai Lu ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 1821f8ff037SYinghai Lu __pa(MAX_DMA_ADDRESS)); 183a677f58aSYinghai Lu if (ptr) 184*a1681965SMike Travis printk(KERN_DEBUG 185*a1681965SMike Travis "per cpu data for cpu%d on node%d " 186*a1681965SMike Travis "at %016lx\n", 187a677f58aSYinghai Lu cpu, node, __pa(ptr)); 188a677f58aSYinghai Lu } 189378b39a4SYinghai Lu #endif 190378b39a4SYinghai Lu per_cpu_offset(cpu) = ptr - __per_cpu_start; 191378b39a4SYinghai Lu memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 192378b39a4SYinghai Lu } 193378b39a4SYinghai Lu 194378b39a4SYinghai Lu /* Setup percpu data maps */ 195378b39a4SYinghai Lu setup_per_cpu_maps(); 196378b39a4SYinghai Lu 197378b39a4SYinghai Lu /* Setup node to cpumask map */ 198378b39a4SYinghai Lu setup_node_to_cpumask_map(); 199378b39a4SYinghai Lu } 200378b39a4SYinghai Lu 201378b39a4SYinghai Lu #endif 202378b39a4SYinghai Lu 203378b39a4SYinghai Lu #ifdef X86_64_NUMA 204378b39a4SYinghai Lu 205378b39a4SYinghai Lu /* 206378b39a4SYinghai Lu * Allocate node_to_cpumask_map based on number of available nodes 207378b39a4SYinghai Lu * Requires node_possible_map to be valid. 208378b39a4SYinghai Lu * 209378b39a4SYinghai Lu * Note: node_to_cpumask() is not valid until after this is done. 210378b39a4SYinghai Lu */ 211378b39a4SYinghai Lu static void __init setup_node_to_cpumask_map(void) 212378b39a4SYinghai Lu { 213378b39a4SYinghai Lu unsigned int node, num = 0; 214378b39a4SYinghai Lu cpumask_t *map; 215378b39a4SYinghai Lu 216378b39a4SYinghai Lu /* setup nr_node_ids if not done yet */ 217378b39a4SYinghai Lu if (nr_node_ids == MAX_NUMNODES) { 218378b39a4SYinghai Lu for_each_node_mask(node, node_possible_map) 219378b39a4SYinghai Lu num = node; 220378b39a4SYinghai Lu nr_node_ids = num + 1; 221378b39a4SYinghai Lu } 222378b39a4SYinghai Lu 223378b39a4SYinghai Lu /* allocate the map */ 224378b39a4SYinghai Lu map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); 225378b39a4SYinghai Lu 22655410791SGustavo F. Padovan pr_debug("Node to cpumask map at %p for %d nodes\n", 227378b39a4SYinghai Lu map, nr_node_ids); 228378b39a4SYinghai Lu 229378b39a4SYinghai Lu /* node_to_cpumask() will now work */ 230378b39a4SYinghai Lu node_to_cpumask_map = map; 231378b39a4SYinghai Lu } 232378b39a4SYinghai Lu 233378b39a4SYinghai Lu void __cpuinit numa_set_node(int cpu, int node) 234378b39a4SYinghai Lu { 235378b39a4SYinghai Lu int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 236378b39a4SYinghai Lu 237378b39a4SYinghai Lu if (cpu_pda(cpu) && node != NUMA_NO_NODE) 238378b39a4SYinghai Lu cpu_pda(cpu)->nodenumber = node; 239378b39a4SYinghai Lu 240378b39a4SYinghai Lu if (cpu_to_node_map) 241378b39a4SYinghai Lu cpu_to_node_map[cpu] = node; 242378b39a4SYinghai Lu 243378b39a4SYinghai Lu else if (per_cpu_offset(cpu)) 244378b39a4SYinghai Lu per_cpu(x86_cpu_to_node_map, cpu) = node; 245378b39a4SYinghai Lu 246378b39a4SYinghai Lu else 247cfc1b9a6SThomas Gleixner pr_debug("Setting node for non-present cpu %d\n", cpu); 248378b39a4SYinghai Lu } 249378b39a4SYinghai Lu 250378b39a4SYinghai Lu void __cpuinit numa_clear_node(int cpu) 251378b39a4SYinghai Lu { 252378b39a4SYinghai Lu numa_set_node(cpu, NUMA_NO_NODE); 253378b39a4SYinghai Lu } 254378b39a4SYinghai Lu 255378b39a4SYinghai Lu #ifndef CONFIG_DEBUG_PER_CPU_MAPS 256378b39a4SYinghai Lu 257378b39a4SYinghai Lu void __cpuinit numa_add_cpu(int cpu) 258378b39a4SYinghai Lu { 259378b39a4SYinghai Lu cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 260378b39a4SYinghai Lu } 261378b39a4SYinghai Lu 262378b39a4SYinghai Lu void __cpuinit numa_remove_cpu(int cpu) 263378b39a4SYinghai Lu { 264378b39a4SYinghai Lu cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); 265378b39a4SYinghai Lu } 266378b39a4SYinghai Lu 267378b39a4SYinghai Lu #else /* CONFIG_DEBUG_PER_CPU_MAPS */ 268378b39a4SYinghai Lu 269378b39a4SYinghai Lu /* 270378b39a4SYinghai Lu * --------- debug versions of the numa functions --------- 271378b39a4SYinghai Lu */ 272378b39a4SYinghai Lu static void __cpuinit numa_set_cpumask(int cpu, int enable) 273378b39a4SYinghai Lu { 274378b39a4SYinghai Lu int node = cpu_to_node(cpu); 275378b39a4SYinghai Lu cpumask_t *mask; 276378b39a4SYinghai Lu char buf[64]; 277378b39a4SYinghai Lu 278378b39a4SYinghai Lu if (node_to_cpumask_map == NULL) { 279378b39a4SYinghai Lu printk(KERN_ERR "node_to_cpumask_map NULL\n"); 280378b39a4SYinghai Lu dump_stack(); 281378b39a4SYinghai Lu return; 282378b39a4SYinghai Lu } 283378b39a4SYinghai Lu 284378b39a4SYinghai Lu mask = &node_to_cpumask_map[node]; 285378b39a4SYinghai Lu if (enable) 286378b39a4SYinghai Lu cpu_set(cpu, *mask); 287378b39a4SYinghai Lu else 288378b39a4SYinghai Lu cpu_clear(cpu, *mask); 289378b39a4SYinghai Lu 29029c0177eSRusty Russell cpulist_scnprintf(buf, sizeof(buf), mask); 291378b39a4SYinghai Lu printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 292378b39a4SYinghai Lu enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); 293378b39a4SYinghai Lu } 294378b39a4SYinghai Lu 295378b39a4SYinghai Lu void __cpuinit numa_add_cpu(int cpu) 296378b39a4SYinghai Lu { 297378b39a4SYinghai Lu numa_set_cpumask(cpu, 1); 298378b39a4SYinghai Lu } 299378b39a4SYinghai Lu 300378b39a4SYinghai Lu void __cpuinit numa_remove_cpu(int cpu) 301378b39a4SYinghai Lu { 302378b39a4SYinghai Lu numa_set_cpumask(cpu, 0); 303378b39a4SYinghai Lu } 304378b39a4SYinghai Lu 305378b39a4SYinghai Lu int cpu_to_node(int cpu) 306378b39a4SYinghai Lu { 307378b39a4SYinghai Lu if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 308378b39a4SYinghai Lu printk(KERN_WARNING 309378b39a4SYinghai Lu "cpu_to_node(%d): usage too early!\n", cpu); 310378b39a4SYinghai Lu dump_stack(); 311378b39a4SYinghai Lu return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 312378b39a4SYinghai Lu } 313378b39a4SYinghai Lu return per_cpu(x86_cpu_to_node_map, cpu); 314378b39a4SYinghai Lu } 315378b39a4SYinghai Lu EXPORT_SYMBOL(cpu_to_node); 316378b39a4SYinghai Lu 317378b39a4SYinghai Lu /* 318378b39a4SYinghai Lu * Same function as cpu_to_node() but used if called before the 319378b39a4SYinghai Lu * per_cpu areas are setup. 320378b39a4SYinghai Lu */ 321378b39a4SYinghai Lu int early_cpu_to_node(int cpu) 322378b39a4SYinghai Lu { 323378b39a4SYinghai Lu if (early_per_cpu_ptr(x86_cpu_to_node_map)) 324378b39a4SYinghai Lu return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 325378b39a4SYinghai Lu 326378b39a4SYinghai Lu if (!per_cpu_offset(cpu)) { 327378b39a4SYinghai Lu printk(KERN_WARNING 328378b39a4SYinghai Lu "early_cpu_to_node(%d): no per_cpu area!\n", cpu); 329378b39a4SYinghai Lu dump_stack(); 330378b39a4SYinghai Lu return NUMA_NO_NODE; 331378b39a4SYinghai Lu } 332378b39a4SYinghai Lu return per_cpu(x86_cpu_to_node_map, cpu); 333378b39a4SYinghai Lu } 334378b39a4SYinghai Lu 3356a2f47caSMike Travis 3366a2f47caSMike Travis /* empty cpumask */ 3376a2f47caSMike Travis static const cpumask_t cpu_mask_none; 3386a2f47caSMike Travis 339378b39a4SYinghai Lu /* 340378b39a4SYinghai Lu * Returns a pointer to the bitmask of CPUs on Node 'node'. 341378b39a4SYinghai Lu */ 34211369f35SMike Travis const cpumask_t *_node_to_cpumask_ptr(int node) 343378b39a4SYinghai Lu { 344378b39a4SYinghai Lu if (node_to_cpumask_map == NULL) { 345378b39a4SYinghai Lu printk(KERN_WARNING 346378b39a4SYinghai Lu "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", 347378b39a4SYinghai Lu node); 348378b39a4SYinghai Lu dump_stack(); 34911369f35SMike Travis return (const cpumask_t *)&cpu_online_map; 350378b39a4SYinghai Lu } 3516a2f47caSMike Travis if (node >= nr_node_ids) { 3526a2f47caSMike Travis printk(KERN_WARNING 3536a2f47caSMike Travis "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", 3546a2f47caSMike Travis node, nr_node_ids); 3556a2f47caSMike Travis dump_stack(); 35611369f35SMike Travis return &cpu_mask_none; 3576a2f47caSMike Travis } 35811369f35SMike Travis return &node_to_cpumask_map[node]; 359378b39a4SYinghai Lu } 360378b39a4SYinghai Lu EXPORT_SYMBOL(_node_to_cpumask_ptr); 361378b39a4SYinghai Lu 362378b39a4SYinghai Lu /* 363378b39a4SYinghai Lu * Returns a bitmask of CPUs on Node 'node'. 3646a2f47caSMike Travis * 3656a2f47caSMike Travis * Side note: this function creates the returned cpumask on the stack 3666a2f47caSMike Travis * so with a high NR_CPUS count, excessive stack space is used. The 3676a2f47caSMike Travis * node_to_cpumask_ptr function should be used whenever possible. 368378b39a4SYinghai Lu */ 369378b39a4SYinghai Lu cpumask_t node_to_cpumask(int node) 370378b39a4SYinghai Lu { 371378b39a4SYinghai Lu if (node_to_cpumask_map == NULL) { 372378b39a4SYinghai Lu printk(KERN_WARNING 373378b39a4SYinghai Lu "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); 374378b39a4SYinghai Lu dump_stack(); 375378b39a4SYinghai Lu return cpu_online_map; 376378b39a4SYinghai Lu } 3776a2f47caSMike Travis if (node >= nr_node_ids) { 3786a2f47caSMike Travis printk(KERN_WARNING 3796a2f47caSMike Travis "node_to_cpumask(%d): node > nr_node_ids(%d)\n", 3806a2f47caSMike Travis node, nr_node_ids); 3816a2f47caSMike Travis dump_stack(); 3826a2f47caSMike Travis return cpu_mask_none; 3836a2f47caSMike Travis } 384378b39a4SYinghai Lu return node_to_cpumask_map[node]; 385378b39a4SYinghai Lu } 386378b39a4SYinghai Lu EXPORT_SYMBOL(node_to_cpumask); 387378b39a4SYinghai Lu 388378b39a4SYinghai Lu /* 389378b39a4SYinghai Lu * --------- end of debug versions of the numa functions --------- 390378b39a4SYinghai Lu */ 391378b39a4SYinghai Lu 392378b39a4SYinghai Lu #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ 393378b39a4SYinghai Lu 394378b39a4SYinghai Lu #endif /* X86_64_NUMA */ 395378b39a4SYinghai Lu 396