1378b39a4SYinghai Lu #include <linux/kernel.h> 2378b39a4SYinghai Lu #include <linux/module.h> 3378b39a4SYinghai Lu #include <linux/init.h> 4378b39a4SYinghai Lu #include <linux/bootmem.h> 5378b39a4SYinghai Lu #include <linux/percpu.h> 6378b39a4SYinghai Lu #include <linux/kexec.h> 7378b39a4SYinghai Lu #include <linux/crash_dump.h> 88a87dd9aSJaswinder Singh Rajput #include <linux/smp.h> 98a87dd9aSJaswinder Singh Rajput #include <linux/topology.h> 105f5d8405STejun Heo #include <linux/pfn.h> 11378b39a4SYinghai Lu #include <asm/sections.h> 12378b39a4SYinghai Lu #include <asm/processor.h> 13378b39a4SYinghai Lu #include <asm/setup.h> 14378b39a4SYinghai Lu #include <asm/mpspec.h> 15378b39a4SYinghai Lu #include <asm/apicdef.h> 16378b39a4SYinghai Lu #include <asm/highmem.h> 171a51e3a0STejun Heo #include <asm/proto.h> 1806879033SJaswinder Singh Rajput #include <asm/cpumask.h> 1934019be1SBrian Gerst #include <asm/cpu.h> 2060a5317fSTejun Heo #include <asm/stackprotector.h> 21378b39a4SYinghai Lu 22c90aa894SMike Travis #ifdef CONFIG_DEBUG_PER_CPU_MAPS 23c90aa894SMike Travis # define DBG(x...) printk(KERN_DEBUG x) 24c90aa894SMike Travis #else 25c90aa894SMike Travis # define DBG(x...) 26c90aa894SMike Travis #endif 27c90aa894SMike Travis 28ea927906SBrian Gerst DEFINE_PER_CPU(int, cpu_number); 29ea927906SBrian Gerst EXPORT_PER_CPU_SYMBOL(cpu_number); 30ea927906SBrian Gerst 311688401aSBrian Gerst #ifdef CONFIG_X86_64 321688401aSBrian Gerst #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) 331688401aSBrian Gerst #else 341688401aSBrian Gerst #define BOOT_PERCPU_OFFSET 0 351688401aSBrian Gerst #endif 361688401aSBrian Gerst 371688401aSBrian Gerst DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; 381688401aSBrian Gerst EXPORT_PER_CPU_SYMBOL(this_cpu_off); 391688401aSBrian Gerst 409939ddafSTejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { 4134019be1SBrian Gerst [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, 429939ddafSTejun Heo }; 439939ddafSTejun Heo EXPORT_SYMBOL(__per_cpu_offset); 44378b39a4SYinghai Lu 456b19b0c2STejun Heo /* 466b19b0c2STejun Heo * On x86_64 symbols referenced from code should be reachable using 476b19b0c2STejun Heo * 32bit relocations. Reserve space for static percpu variables in 486b19b0c2STejun Heo * modules so that they are always served from the first chunk which 496b19b0c2STejun Heo * is located at the percpu segment base. On x86_32, anything can 506b19b0c2STejun Heo * address anywhere. No need to reserve space in the first chunk. 516b19b0c2STejun Heo */ 526b19b0c2STejun Heo #ifdef CONFIG_X86_64 536b19b0c2STejun Heo #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE 546b19b0c2STejun Heo #else 556b19b0c2STejun Heo #define PERCPU_FIRST_CHUNK_RESERVE 0 566b19b0c2STejun Heo #endif 576b19b0c2STejun Heo 585f5d8405STejun Heo /** 5989c92151STejun Heo * pcpu_need_numa - determine percpu allocation needs to consider NUMA 6089c92151STejun Heo * 6189c92151STejun Heo * If NUMA is not configured or there is only one NUMA node available, 6289c92151STejun Heo * there is no reason to consider NUMA. This function determines 6389c92151STejun Heo * whether percpu allocation should consider NUMA or not. 6489c92151STejun Heo * 6589c92151STejun Heo * RETURNS: 6689c92151STejun Heo * true if NUMA should be considered; otherwise, false. 6789c92151STejun Heo */ 6889c92151STejun Heo static bool __init pcpu_need_numa(void) 6989c92151STejun Heo { 7089c92151STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES 7189c92151STejun Heo pg_data_t *last = NULL; 7289c92151STejun Heo unsigned int cpu; 7389c92151STejun Heo 7489c92151STejun Heo for_each_possible_cpu(cpu) { 7589c92151STejun Heo int node = early_cpu_to_node(cpu); 7689c92151STejun Heo 7789c92151STejun Heo if (node_online(node) && NODE_DATA(node) && 7889c92151STejun Heo last && last != NODE_DATA(node)) 7989c92151STejun Heo return true; 8089c92151STejun Heo 8189c92151STejun Heo last = NODE_DATA(node); 8289c92151STejun Heo } 8389c92151STejun Heo #endif 8489c92151STejun Heo return false; 8589c92151STejun Heo } 8689c92151STejun Heo 8789c92151STejun Heo /** 885f5d8405STejun Heo * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu 895f5d8405STejun Heo * @cpu: cpu to allocate for 905f5d8405STejun Heo * @size: size allocation in bytes 915f5d8405STejun Heo * @align: alignment 925f5d8405STejun Heo * 935f5d8405STejun Heo * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper 945f5d8405STejun Heo * does the right thing for NUMA regardless of the current 955f5d8405STejun Heo * configuration. 965f5d8405STejun Heo * 975f5d8405STejun Heo * RETURNS: 985f5d8405STejun Heo * Pointer to the allocated area on success, NULL on failure. 995f5d8405STejun Heo */ 1005f5d8405STejun Heo static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, 1015f5d8405STejun Heo unsigned long align) 1025f5d8405STejun Heo { 1035f5d8405STejun Heo const unsigned long goal = __pa(MAX_DMA_ADDRESS); 1045f5d8405STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES 1055f5d8405STejun Heo int node = early_cpu_to_node(cpu); 1065f5d8405STejun Heo void *ptr; 1075f5d8405STejun Heo 1085f5d8405STejun Heo if (!node_online(node) || !NODE_DATA(node)) { 1095f5d8405STejun Heo ptr = __alloc_bootmem_nopanic(size, align, goal); 1105f5d8405STejun Heo pr_info("cpu %d has no node %d or node-local memory\n", 1115f5d8405STejun Heo cpu, node); 1125f5d8405STejun Heo pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", 1135f5d8405STejun Heo cpu, size, __pa(ptr)); 1145f5d8405STejun Heo } else { 1155f5d8405STejun Heo ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), 1165f5d8405STejun Heo size, align, goal); 1175f5d8405STejun Heo pr_debug("per cpu data for cpu%d %lu bytes on node%d at " 1185f5d8405STejun Heo "%016lx\n", cpu, size, node, __pa(ptr)); 1195f5d8405STejun Heo } 1205f5d8405STejun Heo return ptr; 1215f5d8405STejun Heo #else 1225f5d8405STejun Heo return __alloc_bootmem_nopanic(size, align, goal); 1235f5d8405STejun Heo #endif 1245f5d8405STejun Heo } 1255f5d8405STejun Heo 1265f5d8405STejun Heo /* 12797c9bf06STejun Heo * Large page remap allocator 1288ac83757STejun Heo * 1298ac83757STejun Heo * This allocator uses PMD page as unit. A PMD page is allocated for 1308ac83757STejun Heo * each cpu and each is remapped into vmalloc area using PMD mapping. 1318ac83757STejun Heo * As PMD page is quite large, only part of it is used for the first 1328ac83757STejun Heo * chunk. Unused part is returned to the bootmem allocator. 1338ac83757STejun Heo * 1348ac83757STejun Heo * So, the PMD pages are mapped twice - once to the physical mapping 1358ac83757STejun Heo * and to the vmalloc area for the first percpu chunk. The double 1368ac83757STejun Heo * mapping does add one more PMD TLB entry pressure but still is much 1378ac83757STejun Heo * better than only using 4k mappings while still being NUMA friendly. 1388ac83757STejun Heo */ 1398ac83757STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES 140*0ff2587fSTejun Heo struct pcpul_ent { 141*0ff2587fSTejun Heo unsigned int cpu; 142*0ff2587fSTejun Heo void *ptr; 143*0ff2587fSTejun Heo }; 144*0ff2587fSTejun Heo 14597c9bf06STejun Heo static size_t pcpul_size __initdata; 146*0ff2587fSTejun Heo static struct pcpul_ent *pcpul_map __initdata; 147*0ff2587fSTejun Heo static struct vm_struct pcpul_vm; 1488ac83757STejun Heo 14997c9bf06STejun Heo static struct page * __init pcpul_get_page(unsigned int cpu, int pageno) 1508ac83757STejun Heo { 1518ac83757STejun Heo size_t off = (size_t)pageno << PAGE_SHIFT; 1528ac83757STejun Heo 15397c9bf06STejun Heo if (off >= pcpul_size) 1548ac83757STejun Heo return NULL; 1558ac83757STejun Heo 156*0ff2587fSTejun Heo return virt_to_page(pcpul_map[cpu].ptr + off); 1578ac83757STejun Heo } 1588ac83757STejun Heo 15997c9bf06STejun Heo static ssize_t __init setup_pcpu_lpage(size_t static_size) 1608ac83757STejun Heo { 161*0ff2587fSTejun Heo size_t map_size, dyn_size; 1628ac83757STejun Heo unsigned int cpu; 1638ac83757STejun Heo ssize_t ret; 1648ac83757STejun Heo 1658ac83757STejun Heo /* 1668ac83757STejun Heo * If large page isn't supported, there's no benefit in doing 1678ac83757STejun Heo * this. Also, on non-NUMA, embedding is better. 16871c9d8b6STejun Heo * 16971c9d8b6STejun Heo * NOTE: disabled for now. 1708ac83757STejun Heo */ 17171c9d8b6STejun Heo if (true || !cpu_has_pse || !pcpu_need_numa()) 1728ac83757STejun Heo return -EINVAL; 1738ac83757STejun Heo 1748ac83757STejun Heo /* 1758ac83757STejun Heo * Currently supports only single page. Supporting multiple 1768ac83757STejun Heo * pages won't be too difficult if it ever becomes necessary. 1778ac83757STejun Heo */ 17897c9bf06STejun Heo pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 1796b19b0c2STejun Heo PERCPU_DYNAMIC_RESERVE); 18097c9bf06STejun Heo if (pcpul_size > PMD_SIZE) { 1818ac83757STejun Heo pr_warning("PERCPU: static data is larger than large page, " 1828ac83757STejun Heo "can't use large page\n"); 1838ac83757STejun Heo return -EINVAL; 1848ac83757STejun Heo } 18597c9bf06STejun Heo dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; 1868ac83757STejun Heo 1878ac83757STejun Heo /* allocate pointer array and alloc large pages */ 188*0ff2587fSTejun Heo map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0])); 189*0ff2587fSTejun Heo pcpul_map = alloc_bootmem(map_size); 1908ac83757STejun Heo 1918ac83757STejun Heo for_each_possible_cpu(cpu) { 192*0ff2587fSTejun Heo pcpul_map[cpu].cpu = cpu; 193*0ff2587fSTejun Heo pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE, 194*0ff2587fSTejun Heo PMD_SIZE); 195*0ff2587fSTejun Heo if (!pcpul_map[cpu].ptr) 1968ac83757STejun Heo goto enomem; 1978ac83757STejun Heo 1988ac83757STejun Heo /* 19997c9bf06STejun Heo * Only use pcpul_size bytes and give back the rest. 2008ac83757STejun Heo * 2018ac83757STejun Heo * Ingo: The 2MB up-rounding bootmem is needed to make 2028ac83757STejun Heo * sure the partial 2MB page is still fully RAM - it's 2038ac83757STejun Heo * not well-specified to have a PAT-incompatible area 2048ac83757STejun Heo * (unmapped RAM, device memory, etc.) in that hole. 2058ac83757STejun Heo */ 206*0ff2587fSTejun Heo free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size), 20797c9bf06STejun Heo PMD_SIZE - pcpul_size); 2088ac83757STejun Heo 209*0ff2587fSTejun Heo memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size); 2108ac83757STejun Heo } 2118ac83757STejun Heo 2128ac83757STejun Heo /* allocate address and map */ 213*0ff2587fSTejun Heo pcpul_vm.flags = VM_ALLOC; 214*0ff2587fSTejun Heo pcpul_vm.size = num_possible_cpus() * PMD_SIZE; 215*0ff2587fSTejun Heo vm_area_register_early(&pcpul_vm, PMD_SIZE); 2168ac83757STejun Heo 2178ac83757STejun Heo for_each_possible_cpu(cpu) { 218*0ff2587fSTejun Heo pmd_t *pmd, pmd_v; 2198ac83757STejun Heo 220*0ff2587fSTejun Heo pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr + 221*0ff2587fSTejun Heo cpu * PMD_SIZE); 222*0ff2587fSTejun Heo pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)), 223*0ff2587fSTejun Heo PAGE_KERNEL_LARGE); 224*0ff2587fSTejun Heo set_pmd(pmd, pmd_v); 2258ac83757STejun Heo } 2268ac83757STejun Heo 2278ac83757STejun Heo /* we're ready, commit */ 2288ac83757STejun Heo pr_info("PERCPU: Remapped at %p with large pages, static data " 229*0ff2587fSTejun Heo "%zu bytes\n", pcpul_vm.addr, static_size); 2308ac83757STejun Heo 23197c9bf06STejun Heo ret = pcpu_setup_first_chunk(pcpul_get_page, static_size, 2326074d5b0STejun Heo PERCPU_FIRST_CHUNK_RESERVE, dyn_size, 233*0ff2587fSTejun Heo PMD_SIZE, pcpul_vm.addr, NULL); 234*0ff2587fSTejun Heo goto out_free_map; 2358ac83757STejun Heo 2368ac83757STejun Heo enomem: 2378ac83757STejun Heo for_each_possible_cpu(cpu) 238*0ff2587fSTejun Heo if (pcpul_map[cpu].ptr) 239*0ff2587fSTejun Heo free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size); 2408ac83757STejun Heo ret = -ENOMEM; 241*0ff2587fSTejun Heo out_free_map: 242*0ff2587fSTejun Heo free_bootmem(__pa(pcpul_map), map_size); 2438ac83757STejun Heo return ret; 2448ac83757STejun Heo } 2458ac83757STejun Heo #else 24697c9bf06STejun Heo static ssize_t __init setup_pcpu_lpage(size_t static_size) 2478ac83757STejun Heo { 2488ac83757STejun Heo return -EINVAL; 2498ac83757STejun Heo } 2508ac83757STejun Heo #endif 2518ac83757STejun Heo 2528ac83757STejun Heo /* 25389c92151STejun Heo * Embedding allocator 25489c92151STejun Heo * 25589c92151STejun Heo * The first chunk is sized to just contain the static area plus 25666c3a757STejun Heo * module and dynamic reserves and embedded into linear physical 25766c3a757STejun Heo * mapping so that it can use PMD mapping without additional TLB 25866c3a757STejun Heo * pressure. 25989c92151STejun Heo */ 26089c92151STejun Heo static ssize_t __init setup_pcpu_embed(size_t static_size) 26189c92151STejun Heo { 26266c3a757STejun Heo size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 26389c92151STejun Heo 26489c92151STejun Heo /* 26589c92151STejun Heo * If large page isn't supported, there's no benefit in doing 26689c92151STejun Heo * this. Also, embedding allocation doesn't play well with 26789c92151STejun Heo * NUMA. 26889c92151STejun Heo */ 26989c92151STejun Heo if (!cpu_has_pse || pcpu_need_numa()) 27089c92151STejun Heo return -EINVAL; 27189c92151STejun Heo 27266c3a757STejun Heo return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 27366c3a757STejun Heo reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); 27489c92151STejun Heo } 27589c92151STejun Heo 27689c92151STejun Heo /* 2775f5d8405STejun Heo * 4k page allocator 2785f5d8405STejun Heo * 2795f5d8405STejun Heo * This is the basic allocator. Static percpu area is allocated 2805f5d8405STejun Heo * page-by-page and most of initialization is done by the generic 2815f5d8405STejun Heo * setup function. 2825f5d8405STejun Heo */ 2838d408b4bSTejun Heo static struct page **pcpu4k_pages __initdata; 2848d408b4bSTejun Heo static int pcpu4k_nr_static_pages __initdata; 2858d408b4bSTejun Heo 2868d408b4bSTejun Heo static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) 2878d408b4bSTejun Heo { 2888d408b4bSTejun Heo if (pageno < pcpu4k_nr_static_pages) 2898d408b4bSTejun Heo return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; 2908d408b4bSTejun Heo return NULL; 2918d408b4bSTejun Heo } 2928d408b4bSTejun Heo 293458a3e64STejun Heo static void __init pcpu4k_populate_pte(unsigned long addr) 294458a3e64STejun Heo { 295458a3e64STejun Heo populate_extra_pte(addr); 296458a3e64STejun Heo } 297458a3e64STejun Heo 2985f5d8405STejun Heo static ssize_t __init setup_pcpu_4k(size_t static_size) 2995f5d8405STejun Heo { 3005f5d8405STejun Heo size_t pages_size; 3015f5d8405STejun Heo unsigned int cpu; 3025f5d8405STejun Heo int i, j; 3035f5d8405STejun Heo ssize_t ret; 3045f5d8405STejun Heo 3055f5d8405STejun Heo pcpu4k_nr_static_pages = PFN_UP(static_size); 3065f5d8405STejun Heo 3075f5d8405STejun Heo /* unaligned allocations can't be freed, round up to page size */ 3085f5d8405STejun Heo pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() 3095f5d8405STejun Heo * sizeof(pcpu4k_pages[0])); 3105f5d8405STejun Heo pcpu4k_pages = alloc_bootmem(pages_size); 3115f5d8405STejun Heo 3125f5d8405STejun Heo /* allocate and copy */ 3135f5d8405STejun Heo j = 0; 3145f5d8405STejun Heo for_each_possible_cpu(cpu) 3155f5d8405STejun Heo for (i = 0; i < pcpu4k_nr_static_pages; i++) { 3165f5d8405STejun Heo void *ptr; 3175f5d8405STejun Heo 3185f5d8405STejun Heo ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); 3195f5d8405STejun Heo if (!ptr) 3205f5d8405STejun Heo goto enomem; 3215f5d8405STejun Heo 3225f5d8405STejun Heo memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); 3235f5d8405STejun Heo pcpu4k_pages[j++] = virt_to_page(ptr); 3245f5d8405STejun Heo } 3255f5d8405STejun Heo 3265f5d8405STejun Heo /* we're ready, commit */ 3275f5d8405STejun Heo pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", 3285f5d8405STejun Heo pcpu4k_nr_static_pages, static_size); 3295f5d8405STejun Heo 3306b19b0c2STejun Heo ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 3316074d5b0STejun Heo PERCPU_FIRST_CHUNK_RESERVE, -1, 3326074d5b0STejun Heo -1, NULL, pcpu4k_populate_pte); 3335f5d8405STejun Heo goto out_free_ar; 3345f5d8405STejun Heo 3355f5d8405STejun Heo enomem: 3365f5d8405STejun Heo while (--j >= 0) 3375f5d8405STejun Heo free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); 3385f5d8405STejun Heo ret = -ENOMEM; 3395f5d8405STejun Heo out_free_ar: 3405f5d8405STejun Heo free_bootmem(__pa(pcpu4k_pages), pages_size); 3415f5d8405STejun Heo return ret; 3425f5d8405STejun Heo } 3435f5d8405STejun Heo 344b2d2f431SBrian Gerst static inline void setup_percpu_segment(int cpu) 345b2d2f431SBrian Gerst { 346b2d2f431SBrian Gerst #ifdef CONFIG_X86_32 347b2d2f431SBrian Gerst struct desc_struct gdt; 348b2d2f431SBrian Gerst 349b2d2f431SBrian Gerst pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, 350b2d2f431SBrian Gerst 0x2 | DESCTYPE_S, 0x8); 351b2d2f431SBrian Gerst gdt.s = 1; 352b2d2f431SBrian Gerst write_gdt_entry(get_cpu_gdt_table(cpu), 353b2d2f431SBrian Gerst GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); 354b2d2f431SBrian Gerst #endif 355b2d2f431SBrian Gerst } 356b2d2f431SBrian Gerst 357378b39a4SYinghai Lu /* 358378b39a4SYinghai Lu * Great future plan: 359378b39a4SYinghai Lu * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. 360378b39a4SYinghai Lu * Always point %gs to its beginning 361378b39a4SYinghai Lu */ 362378b39a4SYinghai Lu void __init setup_per_cpu_areas(void) 363378b39a4SYinghai Lu { 3645f5d8405STejun Heo size_t static_size = __per_cpu_end - __per_cpu_start; 3655f5d8405STejun Heo unsigned int cpu; 36611124411STejun Heo unsigned long delta; 36711124411STejun Heo size_t pcpu_unit_size; 3685f5d8405STejun Heo ssize_t ret; 369a1681965SMike Travis 370ab14398aSCyrill Gorcunov pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 371a1681965SMike Travis NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 372a1681965SMike Travis 3738ac83757STejun Heo /* 3748ac83757STejun Heo * Allocate percpu area. If PSE is supported, try to make use 3758ac83757STejun Heo * of large page mappings. Please read comments on top of 3768ac83757STejun Heo * each allocator for details. 3778ac83757STejun Heo */ 37897c9bf06STejun Heo ret = setup_pcpu_lpage(static_size); 3798ac83757STejun Heo if (ret < 0) 38089c92151STejun Heo ret = setup_pcpu_embed(static_size); 38189c92151STejun Heo if (ret < 0) 3825f5d8405STejun Heo ret = setup_pcpu_4k(static_size); 3835f5d8405STejun Heo if (ret < 0) 3845f5d8405STejun Heo panic("cannot allocate static percpu area (%zu bytes, err=%zd)", 3855f5d8405STejun Heo static_size, ret); 386378b39a4SYinghai Lu 3875f5d8405STejun Heo pcpu_unit_size = ret; 38811124411STejun Heo 3895f5d8405STejun Heo /* alrighty, percpu areas up and running */ 39011124411STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 39111124411STejun Heo for_each_possible_cpu(cpu) { 39211124411STejun Heo per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 39326f80bd6SBrian Gerst per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 394ea927906SBrian Gerst per_cpu(cpu_number, cpu) = cpu; 395b2d2f431SBrian Gerst setup_percpu_segment(cpu); 39660a5317fSTejun Heo setup_stack_canary_segment(cpu); 3970d77e7f0SBrian Gerst /* 398cf3997f5STejun Heo * Copy data used in early init routines from the 399cf3997f5STejun Heo * initial arrays to the per cpu data areas. These 400cf3997f5STejun Heo * arrays then become expendable and the *_early_ptr's 401cf3997f5STejun Heo * are zeroed indicating that the static arrays are 402cf3997f5STejun Heo * gone. 4030d77e7f0SBrian Gerst */ 404ec70de8bSBrian Gerst #ifdef CONFIG_X86_LOCAL_APIC 4050d77e7f0SBrian Gerst per_cpu(x86_cpu_to_apicid, cpu) = 4060d77e7f0SBrian Gerst early_per_cpu_map(x86_cpu_to_apicid, cpu); 4070d77e7f0SBrian Gerst per_cpu(x86_bios_cpu_apicid, cpu) = 4080d77e7f0SBrian Gerst early_per_cpu_map(x86_bios_cpu_apicid, cpu); 409ec70de8bSBrian Gerst #endif 4101a51e3a0STejun Heo #ifdef CONFIG_X86_64 41126f80bd6SBrian Gerst per_cpu(irq_stack_ptr, cpu) = 412cf3997f5STejun Heo per_cpu(irq_stack_union.irq_stack, cpu) + 413cf3997f5STejun Heo IRQ_STACK_SIZE - 64; 4146470aff6SBrian Gerst #ifdef CONFIG_NUMA 4156470aff6SBrian Gerst per_cpu(x86_cpu_to_node_map, cpu) = 4166470aff6SBrian Gerst early_per_cpu_map(x86_cpu_to_node_map, cpu); 4176470aff6SBrian Gerst #endif 4182697fbd5SBrian Gerst #endif 4191a51e3a0STejun Heo /* 42034019be1SBrian Gerst * Up to this point, the boot CPU has been using .data.init 4212697fbd5SBrian Gerst * area. Reload any changed state for the boot CPU. 4221a51e3a0STejun Heo */ 42334019be1SBrian Gerst if (cpu == boot_cpu_id) 424552be871SBrian Gerst switch_to_new_gdt(cpu); 425378b39a4SYinghai Lu } 426378b39a4SYinghai Lu 4270d77e7f0SBrian Gerst /* indicate the early static arrays will soon be gone */ 42822f25138SJames Bottomley #ifdef CONFIG_X86_LOCAL_APIC 4290d77e7f0SBrian Gerst early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 4300d77e7f0SBrian Gerst early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; 43122f25138SJames Bottomley #endif 4326470aff6SBrian Gerst #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) 4330d77e7f0SBrian Gerst early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; 4340d77e7f0SBrian Gerst #endif 435378b39a4SYinghai Lu 43635d5a9a6SYinghai Lu #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) 43735d5a9a6SYinghai Lu /* 43835d5a9a6SYinghai Lu * make sure boot cpu node_number is right, when boot cpu is on the 43935d5a9a6SYinghai Lu * node that doesn't have mem installed 44035d5a9a6SYinghai Lu */ 44135d5a9a6SYinghai Lu per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id); 44235d5a9a6SYinghai Lu #endif 44335d5a9a6SYinghai Lu 444378b39a4SYinghai Lu /* Setup node to cpumask map */ 445378b39a4SYinghai Lu setup_node_to_cpumask_map(); 446c2d1cec1SMike Travis 447c2d1cec1SMike Travis /* Setup cpu initialized, callin, callout masks */ 448c2d1cec1SMike Travis setup_cpu_local_masks(); 449378b39a4SYinghai Lu } 450