xref: /linux/arch/x86/kernel/setup.c (revision f7511d5f66f01fc451747b24e79f3ada7a3af9af)
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <asm/smp.h>
7 #include <asm/percpu.h>
8 #include <asm/sections.h>
9 #include <asm/processor.h>
10 #include <asm/setup.h>
11 #include <asm/topology.h>
12 #include <asm/mpspec.h>
13 #include <asm/apicdef.h>
14 
15 unsigned int num_processors;
16 unsigned disabled_cpus __cpuinitdata;
17 /* Processor that is doing the boot up */
18 unsigned int boot_cpu_physical_apicid = -1U;
19 EXPORT_SYMBOL(boot_cpu_physical_apicid);
20 
21 DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
22 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
23 
24 /* Bitmask of physically existing CPUs */
25 physid_mask_t phys_cpu_present_map;
26 
27 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP)
28 /*
29  * Copy data used in early init routines from the initial arrays to the
30  * per cpu data areas.  These arrays then become expendable and the
31  * *_early_ptr's are zeroed indicating that the static arrays are gone.
32  */
33 static void __init setup_per_cpu_maps(void)
34 {
35 	int cpu;
36 
37 	for_each_possible_cpu(cpu) {
38 		per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu];
39 		per_cpu(x86_bios_cpu_apicid, cpu) =
40 						x86_bios_cpu_apicid_init[cpu];
41 #ifdef CONFIG_NUMA
42 		per_cpu(x86_cpu_to_node_map, cpu) =
43 						x86_cpu_to_node_map_init[cpu];
44 #endif
45 	}
46 
47 	/* indicate the early static arrays will soon be gone */
48 	x86_cpu_to_apicid_early_ptr = NULL;
49 	x86_bios_cpu_apicid_early_ptr = NULL;
50 #ifdef CONFIG_NUMA
51 	x86_cpu_to_node_map_early_ptr = NULL;
52 #endif
53 }
54 
55 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
56 cpumask_t *cpumask_of_cpu_map __read_mostly;
57 EXPORT_SYMBOL(cpumask_of_cpu_map);
58 
59 /* requires nr_cpu_ids to be initialized */
60 static void __init setup_cpumask_of_cpu(void)
61 {
62 	int i;
63 
64 	/* alloc_bootmem zeroes memory */
65 	cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
66 	for (i = 0; i < nr_cpu_ids; i++)
67 		cpu_set(i, cpumask_of_cpu_map[i]);
68 }
69 #else
70 static inline void setup_cpumask_of_cpu(void) { }
71 #endif
72 
73 #ifdef CONFIG_X86_32
74 /*
75  * Great future not-so-futuristic plan: make i386 and x86_64 do it
76  * the same way
77  */
78 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
79 EXPORT_SYMBOL(__per_cpu_offset);
80 #endif
81 
82 /*
83  * Great future plan:
84  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
85  * Always point %gs to its beginning
86  */
87 void __init setup_per_cpu_areas(void)
88 {
89 	int i, highest_cpu = 0;
90 	unsigned long size;
91 
92 #ifdef CONFIG_HOTPLUG_CPU
93 	prefill_possible_map();
94 #endif
95 
96 	/* Copy section for each CPU (we discard the original) */
97 	size = PERCPU_ENOUGH_ROOM;
98 	printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
99 			  size);
100 
101 	for_each_possible_cpu(i) {
102 		char *ptr;
103 #ifndef CONFIG_NEED_MULTIPLE_NODES
104 		ptr = alloc_bootmem_pages(size);
105 #else
106 		int node = early_cpu_to_node(i);
107 		if (!node_online(node) || !NODE_DATA(node)) {
108 			ptr = alloc_bootmem_pages(size);
109 			printk(KERN_INFO
110 			       "cpu %d has no node or node-local memory\n", i);
111 		}
112 		else
113 			ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
114 #endif
115 		if (!ptr)
116 			panic("Cannot allocate cpu data for CPU %d\n", i);
117 #ifdef CONFIG_X86_64
118 		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
119 #else
120 		__per_cpu_offset[i] = ptr - __per_cpu_start;
121 #endif
122 		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
123 
124 		highest_cpu = i;
125 	}
126 
127 	nr_cpu_ids = highest_cpu + 1;
128 	printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d\n", NR_CPUS, nr_cpu_ids);
129 
130 	/* Setup percpu data maps */
131 	setup_per_cpu_maps();
132 
133 	/* Setup cpumask_of_cpu map */
134 	setup_cpumask_of_cpu();
135 }
136 
137 #endif
138