1fbf59bc9STejun Heo /* 288999a89STejun Heo * mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 1088999a89STejun Heo * areas. Percpu areas are allocated in chunks. Each chunk is 1188999a89STejun Heo * consisted of boot-time determined number of units and the first 1288999a89STejun Heo * chunk is used for static percpu variables in the kernel image 132f39e637STejun Heo * (special boot time alloc/init handling necessary as these areas 142f39e637STejun Heo * need to be brought up before allocation services are running). 152f39e637STejun Heo * Unit grows as necessary and all units grow or shrink in unison. 1688999a89STejun Heo * When a chunk is filled up, another chunk is allocated. 17fbf59bc9STejun Heo * 18fbf59bc9STejun Heo * c0 c1 c2 19fbf59bc9STejun Heo * ------------------- ------------------- ------------ 20fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 22fbf59bc9STejun Heo * 23fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 24fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 252f39e637STejun Heo * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 262f39e637STejun Heo * cpus. On NUMA, the mapping can be non-linear and even sparse. 272f39e637STejun Heo * Percpu access can be done by configuring percpu base registers 282f39e637STejun Heo * according to cpu to unit mapping and pcpu_unit_size. 29fbf59bc9STejun Heo * 302f39e637STejun Heo * There are usually many small percpu allocations many of them being 312f39e637STejun Heo * as small as 4 bytes. The allocator organizes chunks into lists 32fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 33fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 344785879eSNamhyung Kim * guaranteed to be equal to or larger than the maximum contiguous 35fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 36fbf59bc9STejun Heo * chunk maps unnecessarily. 37fbf59bc9STejun Heo * 38fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 39fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 40fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 41fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 42fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 43e1b9aa3fSChristoph Lameter * Chunks can be determined from the address using the index field 44e1b9aa3fSChristoph Lameter * in the page struct. The index field contains a pointer to the chunk. 45fbf59bc9STejun Heo * 46fbf59bc9STejun Heo * To use this allocator, arch code should do the followings. 47fbf59bc9STejun Heo * 48fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49e0100983STejun Heo * regular address to percpu pointer and back if they need to be 50e0100983STejun Heo * different from the default 51fbf59bc9STejun Heo * 528d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 538d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 54fbf59bc9STejun Heo */ 55fbf59bc9STejun Heo 56fbf59bc9STejun Heo #include <linux/bitmap.h> 57fbf59bc9STejun Heo #include <linux/bootmem.h> 58fd1e8a1fSTejun Heo #include <linux/err.h> 59fbf59bc9STejun Heo #include <linux/list.h> 60a530b795STejun Heo #include <linux/log2.h> 61fbf59bc9STejun Heo #include <linux/mm.h> 62fbf59bc9STejun Heo #include <linux/module.h> 63fbf59bc9STejun Heo #include <linux/mutex.h> 64fbf59bc9STejun Heo #include <linux/percpu.h> 65fbf59bc9STejun Heo #include <linux/pfn.h> 66fbf59bc9STejun Heo #include <linux/slab.h> 67ccea34b5STejun Heo #include <linux/spinlock.h> 68fbf59bc9STejun Heo #include <linux/vmalloc.h> 69a56dbddfSTejun Heo #include <linux/workqueue.h> 70f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 71fbf59bc9STejun Heo 72fbf59bc9STejun Heo #include <asm/cacheflush.h> 73e0100983STejun Heo #include <asm/sections.h> 74fbf59bc9STejun Heo #include <asm/tlbflush.h> 753b034b0dSVivek Goyal #include <asm/io.h> 76fbf59bc9STejun Heo 77fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 78fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 79fbf59bc9STejun Heo 80bbddff05STejun Heo #ifdef CONFIG_SMP 81e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 82e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 83e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 8443cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 8543cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 8643cf38ebSTejun Heo (unsigned long)__per_cpu_start) 87e0100983STejun Heo #endif 88e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 89e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 9043cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 9143cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 9243cf38ebSTejun Heo (unsigned long)__per_cpu_start) 93e0100983STejun Heo #endif 94bbddff05STejun Heo #else /* CONFIG_SMP */ 95bbddff05STejun Heo /* on UP, it's always identity mapped */ 96bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 97bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 98bbddff05STejun Heo #endif /* CONFIG_SMP */ 99e0100983STejun Heo 100fbf59bc9STejun Heo struct pcpu_chunk { 101fbf59bc9STejun Heo struct list_head list; /* linked to pcpu_slot lists */ 102fbf59bc9STejun Heo int free_size; /* free bytes in the chunk */ 103fbf59bc9STejun Heo int contig_hint; /* max contiguous size hint */ 104bba174f5STejun Heo void *base_addr; /* base address of this chunk */ 105723ad1d9SAl Viro int map_used; /* # of map entries used before the sentry */ 106fbf59bc9STejun Heo int map_alloc; /* # of map entries allocated */ 107fbf59bc9STejun Heo int *map; /* allocation map */ 10888999a89STejun Heo void *data; /* chunk data */ 1093d331ad7SAl Viro int first_free; /* no free below this */ 1108d408b4bSTejun Heo bool immutable; /* no [de]population allowed */ 111ce3141a2STejun Heo unsigned long populated[]; /* populated bitmap */ 112fbf59bc9STejun Heo }; 113fbf59bc9STejun Heo 11440150d37STejun Heo static int pcpu_unit_pages __read_mostly; 11540150d37STejun Heo static int pcpu_unit_size __read_mostly; 1162f39e637STejun Heo static int pcpu_nr_units __read_mostly; 1176563297cSTejun Heo static int pcpu_atom_size __read_mostly; 11840150d37STejun Heo static int pcpu_nr_slots __read_mostly; 11940150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly; 120fbf59bc9STejun Heo 121a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 122a855b84cSTejun Heo static unsigned int pcpu_low_unit_cpu __read_mostly; 123a855b84cSTejun Heo static unsigned int pcpu_high_unit_cpu __read_mostly; 1242f39e637STejun Heo 125fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 12640150d37STejun Heo void *pcpu_base_addr __read_mostly; 127fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 128fbf59bc9STejun Heo 129fb435d52STejun Heo static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ 130fb435d52STejun Heo const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ 1312f39e637STejun Heo 1326563297cSTejun Heo /* group information, used for vm allocation */ 1336563297cSTejun Heo static int pcpu_nr_groups __read_mostly; 1346563297cSTejun Heo static const unsigned long *pcpu_group_offsets __read_mostly; 1356563297cSTejun Heo static const size_t *pcpu_group_sizes __read_mostly; 1366563297cSTejun Heo 137ae9e6bc9STejun Heo /* 138ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 139ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 140ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 141ae9e6bc9STejun Heo */ 142ae9e6bc9STejun Heo static struct pcpu_chunk *pcpu_first_chunk; 143ae9e6bc9STejun Heo 144ae9e6bc9STejun Heo /* 145ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 146ae9e6bc9STejun Heo * chunk and serves it for reserved allocations. The amount of 147ae9e6bc9STejun Heo * reserved offset is in pcpu_reserved_chunk_limit. When reserved 148ae9e6bc9STejun Heo * area doesn't exist, the following variables contain NULL and 0 149ae9e6bc9STejun Heo * respectively. 150ae9e6bc9STejun Heo */ 151edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk; 152edcb4639STejun Heo static int pcpu_reserved_chunk_limit; 153edcb4639STejun Heo 154fbf59bc9STejun Heo /* 155b38d08f3STejun Heo * Free path accesses and alters only the index data structures and can be 156b38d08f3STejun Heo * safely called from atomic context. When memory needs to be returned to 157b38d08f3STejun Heo * the system, free path schedules reclaim_work. 158fbf59bc9STejun Heo */ 159b38d08f3STejun Heo static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 160b38d08f3STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ 161fbf59bc9STejun Heo 16240150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 163fbf59bc9STejun Heo 164a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */ 165a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work); 166a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 167a56dbddfSTejun Heo 168020ec653STejun Heo static bool pcpu_addr_in_first_chunk(void *addr) 169020ec653STejun Heo { 170020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 171020ec653STejun Heo 172020ec653STejun Heo return addr >= first_start && addr < first_start + pcpu_unit_size; 173020ec653STejun Heo } 174020ec653STejun Heo 175020ec653STejun Heo static bool pcpu_addr_in_reserved_chunk(void *addr) 176020ec653STejun Heo { 177020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 178020ec653STejun Heo 179020ec653STejun Heo return addr >= first_start && 180020ec653STejun Heo addr < first_start + pcpu_reserved_chunk_limit; 181020ec653STejun Heo } 182020ec653STejun Heo 183d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 184fbf59bc9STejun Heo { 185cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 186fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 187fbf59bc9STejun Heo } 188fbf59bc9STejun Heo 189d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 190d9b55eebSTejun Heo { 191d9b55eebSTejun Heo if (size == pcpu_unit_size) 192d9b55eebSTejun Heo return pcpu_nr_slots - 1; 193d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 194d9b55eebSTejun Heo } 195d9b55eebSTejun Heo 196fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 197fbf59bc9STejun Heo { 198fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 199fbf59bc9STejun Heo return 0; 200fbf59bc9STejun Heo 201fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 202fbf59bc9STejun Heo } 203fbf59bc9STejun Heo 20488999a89STejun Heo /* set the pointer to a chunk in a page struct */ 20588999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 20688999a89STejun Heo { 20788999a89STejun Heo page->index = (unsigned long)pcpu; 20888999a89STejun Heo } 20988999a89STejun Heo 21088999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 21188999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 21288999a89STejun Heo { 21388999a89STejun Heo return (struct pcpu_chunk *)page->index; 21488999a89STejun Heo } 21588999a89STejun Heo 21688999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 217fbf59bc9STejun Heo { 2182f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 219fbf59bc9STejun Heo } 220fbf59bc9STejun Heo 2219983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 222fbf59bc9STejun Heo unsigned int cpu, int page_idx) 223fbf59bc9STejun Heo { 224bba174f5STejun Heo return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 225fb435d52STejun Heo (page_idx << PAGE_SHIFT); 226fbf59bc9STejun Heo } 227fbf59bc9STejun Heo 22888999a89STejun Heo static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, 22988999a89STejun Heo int *rs, int *re, int end) 230ce3141a2STejun Heo { 231ce3141a2STejun Heo *rs = find_next_zero_bit(chunk->populated, end, *rs); 232ce3141a2STejun Heo *re = find_next_bit(chunk->populated, end, *rs + 1); 233ce3141a2STejun Heo } 234ce3141a2STejun Heo 23588999a89STejun Heo static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, 23688999a89STejun Heo int *rs, int *re, int end) 237ce3141a2STejun Heo { 238ce3141a2STejun Heo *rs = find_next_bit(chunk->populated, end, *rs); 239ce3141a2STejun Heo *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 240ce3141a2STejun Heo } 241ce3141a2STejun Heo 242ce3141a2STejun Heo /* 243ce3141a2STejun Heo * (Un)populated page region iterators. Iterate over (un)populated 244b595076aSUwe Kleine-König * page regions between @start and @end in @chunk. @rs and @re should 245ce3141a2STejun Heo * be integer variables and will be set to start and end page index of 246ce3141a2STejun Heo * the current region. 247ce3141a2STejun Heo */ 248ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 249ce3141a2STejun Heo for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 250ce3141a2STejun Heo (rs) < (re); \ 251ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 252ce3141a2STejun Heo 253ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 254ce3141a2STejun Heo for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 255ce3141a2STejun Heo (rs) < (re); \ 256ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 257ce3141a2STejun Heo 258fbf59bc9STejun Heo /** 25990459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 2601880d93bSTejun Heo * @size: bytes to allocate 261fbf59bc9STejun Heo * 2621880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 26390459ce0SBob Liu * kzalloc() is used; otherwise, vzalloc() is used. The returned 2641880d93bSTejun Heo * memory is always zeroed. 265fbf59bc9STejun Heo * 266ccea34b5STejun Heo * CONTEXT: 267ccea34b5STejun Heo * Does GFP_KERNEL allocation. 268ccea34b5STejun Heo * 269fbf59bc9STejun Heo * RETURNS: 2701880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 271fbf59bc9STejun Heo */ 27290459ce0SBob Liu static void *pcpu_mem_zalloc(size_t size) 273fbf59bc9STejun Heo { 274099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 275099a19d9STejun Heo return NULL; 276099a19d9STejun Heo 277fbf59bc9STejun Heo if (size <= PAGE_SIZE) 2781880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 2797af4c093SJesper Juhl else 2807af4c093SJesper Juhl return vzalloc(size); 2811880d93bSTejun Heo } 282fbf59bc9STejun Heo 2831880d93bSTejun Heo /** 2841880d93bSTejun Heo * pcpu_mem_free - free memory 2851880d93bSTejun Heo * @ptr: memory to free 2861880d93bSTejun Heo * @size: size of the area 2871880d93bSTejun Heo * 28890459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 2891880d93bSTejun Heo */ 2901880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size) 2911880d93bSTejun Heo { 2921880d93bSTejun Heo if (size <= PAGE_SIZE) 2931880d93bSTejun Heo kfree(ptr); 2941880d93bSTejun Heo else 2951880d93bSTejun Heo vfree(ptr); 296fbf59bc9STejun Heo } 297fbf59bc9STejun Heo 298fbf59bc9STejun Heo /** 299fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 300fbf59bc9STejun Heo * @chunk: chunk of interest 301fbf59bc9STejun Heo * @oslot: the previous slot it was on 302fbf59bc9STejun Heo * 303fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 304fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 305edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 306edcb4639STejun Heo * chunk slots. 307ccea34b5STejun Heo * 308ccea34b5STejun Heo * CONTEXT: 309ccea34b5STejun Heo * pcpu_lock. 310fbf59bc9STejun Heo */ 311fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 312fbf59bc9STejun Heo { 313fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 314fbf59bc9STejun Heo 315edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 316fbf59bc9STejun Heo if (oslot < nslot) 317fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 318fbf59bc9STejun Heo else 319fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 320fbf59bc9STejun Heo } 321fbf59bc9STejun Heo } 322fbf59bc9STejun Heo 323fbf59bc9STejun Heo /** 324833af842STejun Heo * pcpu_need_to_extend - determine whether chunk area map needs to be extended 325833af842STejun Heo * @chunk: chunk of interest 3269f7dcf22STejun Heo * 327833af842STejun Heo * Determine whether area map of @chunk needs to be extended to 32825985edcSLucas De Marchi * accommodate a new allocation. 3299f7dcf22STejun Heo * 330ccea34b5STejun Heo * CONTEXT: 331833af842STejun Heo * pcpu_lock. 332ccea34b5STejun Heo * 3339f7dcf22STejun Heo * RETURNS: 334833af842STejun Heo * New target map allocation length if extension is necessary, 0 335833af842STejun Heo * otherwise. 3369f7dcf22STejun Heo */ 337833af842STejun Heo static int pcpu_need_to_extend(struct pcpu_chunk *chunk) 3389f7dcf22STejun Heo { 3399f7dcf22STejun Heo int new_alloc; 3409f7dcf22STejun Heo 341723ad1d9SAl Viro if (chunk->map_alloc >= chunk->map_used + 3) 3429f7dcf22STejun Heo return 0; 3439f7dcf22STejun Heo 3449f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 345723ad1d9SAl Viro while (new_alloc < chunk->map_used + 3) 3469f7dcf22STejun Heo new_alloc *= 2; 3479f7dcf22STejun Heo 348833af842STejun Heo return new_alloc; 349ccea34b5STejun Heo } 350ccea34b5STejun Heo 351833af842STejun Heo /** 352833af842STejun Heo * pcpu_extend_area_map - extend area map of a chunk 353833af842STejun Heo * @chunk: chunk of interest 354833af842STejun Heo * @new_alloc: new target allocation length of the area map 355833af842STejun Heo * 356833af842STejun Heo * Extend area map of @chunk to have @new_alloc entries. 357833af842STejun Heo * 358833af842STejun Heo * CONTEXT: 359833af842STejun Heo * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 360833af842STejun Heo * 361833af842STejun Heo * RETURNS: 362833af842STejun Heo * 0 on success, -errno on failure. 363ccea34b5STejun Heo */ 364833af842STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 365833af842STejun Heo { 366833af842STejun Heo int *old = NULL, *new = NULL; 367833af842STejun Heo size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 368833af842STejun Heo unsigned long flags; 3699f7dcf22STejun Heo 37090459ce0SBob Liu new = pcpu_mem_zalloc(new_size); 371833af842STejun Heo if (!new) 372833af842STejun Heo return -ENOMEM; 373833af842STejun Heo 374833af842STejun Heo /* acquire pcpu_lock and switch to new area map */ 375833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 376833af842STejun Heo 377833af842STejun Heo if (new_alloc <= chunk->map_alloc) 378833af842STejun Heo goto out_unlock; 379833af842STejun Heo 380833af842STejun Heo old_size = chunk->map_alloc * sizeof(chunk->map[0]); 381a002d148SHuang Shijie old = chunk->map; 382a002d148SHuang Shijie 383a002d148SHuang Shijie memcpy(new, old, old_size); 3849f7dcf22STejun Heo 3859f7dcf22STejun Heo chunk->map_alloc = new_alloc; 3869f7dcf22STejun Heo chunk->map = new; 387833af842STejun Heo new = NULL; 388833af842STejun Heo 389833af842STejun Heo out_unlock: 390833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 391833af842STejun Heo 392833af842STejun Heo /* 393833af842STejun Heo * pcpu_mem_free() might end up calling vfree() which uses 394833af842STejun Heo * IRQ-unsafe lock and thus can't be called under pcpu_lock. 395833af842STejun Heo */ 396833af842STejun Heo pcpu_mem_free(old, old_size); 397833af842STejun Heo pcpu_mem_free(new, new_size); 398833af842STejun Heo 3999f7dcf22STejun Heo return 0; 4009f7dcf22STejun Heo } 4019f7dcf22STejun Heo 4029f7dcf22STejun Heo /** 403a16037c8STejun Heo * pcpu_fit_in_area - try to fit the requested allocation in a candidate area 404a16037c8STejun Heo * @chunk: chunk the candidate area belongs to 405a16037c8STejun Heo * @off: the offset to the start of the candidate area 406a16037c8STejun Heo * @this_size: the size of the candidate area 407a16037c8STejun Heo * @size: the size of the target allocation 408a16037c8STejun Heo * @align: the alignment of the target allocation 409a16037c8STejun Heo * @pop_only: only allocate from already populated region 410a16037c8STejun Heo * 411a16037c8STejun Heo * We're trying to allocate @size bytes aligned at @align. @chunk's area 412a16037c8STejun Heo * at @off sized @this_size is a candidate. This function determines 413a16037c8STejun Heo * whether the target allocation fits in the candidate area and returns the 414a16037c8STejun Heo * number of bytes to pad after @off. If the target area doesn't fit, -1 415a16037c8STejun Heo * is returned. 416a16037c8STejun Heo * 417a16037c8STejun Heo * If @pop_only is %true, this function only considers the already 418a16037c8STejun Heo * populated part of the candidate area. 419a16037c8STejun Heo */ 420a16037c8STejun Heo static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, 421a16037c8STejun Heo int size, int align, bool pop_only) 422a16037c8STejun Heo { 423a16037c8STejun Heo int cand_off = off; 424a16037c8STejun Heo 425a16037c8STejun Heo while (true) { 426a16037c8STejun Heo int head = ALIGN(cand_off, align) - off; 427a16037c8STejun Heo int page_start, page_end, rs, re; 428a16037c8STejun Heo 429a16037c8STejun Heo if (this_size < head + size) 430a16037c8STejun Heo return -1; 431a16037c8STejun Heo 432a16037c8STejun Heo if (!pop_only) 433a16037c8STejun Heo return head; 434a16037c8STejun Heo 435a16037c8STejun Heo /* 436a16037c8STejun Heo * If the first unpopulated page is beyond the end of the 437a16037c8STejun Heo * allocation, the whole allocation is populated; 438a16037c8STejun Heo * otherwise, retry from the end of the unpopulated area. 439a16037c8STejun Heo */ 440a16037c8STejun Heo page_start = PFN_DOWN(head + off); 441a16037c8STejun Heo page_end = PFN_UP(head + off + size); 442a16037c8STejun Heo 443a16037c8STejun Heo rs = page_start; 444a16037c8STejun Heo pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); 445a16037c8STejun Heo if (rs >= page_end) 446a16037c8STejun Heo return head; 447a16037c8STejun Heo cand_off = re * PAGE_SIZE; 448a16037c8STejun Heo } 449a16037c8STejun Heo } 450a16037c8STejun Heo 451a16037c8STejun Heo /** 452fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 453fbf59bc9STejun Heo * @chunk: chunk of interest 454cae3aeb8STejun Heo * @size: wanted size in bytes 455fbf59bc9STejun Heo * @align: wanted align 456a16037c8STejun Heo * @pop_only: allocate only from the populated area 457fbf59bc9STejun Heo * 458fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 459fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 460fbf59bc9STejun Heo * populate or map the area. 461fbf59bc9STejun Heo * 4629f7dcf22STejun Heo * @chunk->map must have at least two free slots. 4639f7dcf22STejun Heo * 464ccea34b5STejun Heo * CONTEXT: 465ccea34b5STejun Heo * pcpu_lock. 466ccea34b5STejun Heo * 467fbf59bc9STejun Heo * RETURNS: 4689f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 4699f7dcf22STejun Heo * found. 470fbf59bc9STejun Heo */ 471a16037c8STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, 472a16037c8STejun Heo bool pop_only) 473fbf59bc9STejun Heo { 474fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 475fbf59bc9STejun Heo int max_contig = 0; 476fbf59bc9STejun Heo int i, off; 4773d331ad7SAl Viro bool seen_free = false; 478723ad1d9SAl Viro int *p; 479fbf59bc9STejun Heo 4803d331ad7SAl Viro for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { 481fbf59bc9STejun Heo int head, tail; 482723ad1d9SAl Viro int this_size; 483723ad1d9SAl Viro 484723ad1d9SAl Viro off = *p; 485723ad1d9SAl Viro if (off & 1) 486723ad1d9SAl Viro continue; 487fbf59bc9STejun Heo 488723ad1d9SAl Viro this_size = (p[1] & ~1) - off; 489a16037c8STejun Heo 490a16037c8STejun Heo head = pcpu_fit_in_area(chunk, off, this_size, size, align, 491a16037c8STejun Heo pop_only); 492a16037c8STejun Heo if (head < 0) { 4933d331ad7SAl Viro if (!seen_free) { 4943d331ad7SAl Viro chunk->first_free = i; 4953d331ad7SAl Viro seen_free = true; 4963d331ad7SAl Viro } 497723ad1d9SAl Viro max_contig = max(this_size, max_contig); 498fbf59bc9STejun Heo continue; 499fbf59bc9STejun Heo } 500fbf59bc9STejun Heo 501fbf59bc9STejun Heo /* 502fbf59bc9STejun Heo * If head is small or the previous block is free, 503fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 504fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 505fbf59bc9STejun Heo * uncommon for percpu allocations. 506fbf59bc9STejun Heo */ 507723ad1d9SAl Viro if (head && (head < sizeof(int) || !(p[-1] & 1))) { 50821ddfd38SJianyu Zhan *p = off += head; 509723ad1d9SAl Viro if (p[-1] & 1) 510fbf59bc9STejun Heo chunk->free_size -= head; 51121ddfd38SJianyu Zhan else 51221ddfd38SJianyu Zhan max_contig = max(*p - p[-1], max_contig); 513723ad1d9SAl Viro this_size -= head; 514fbf59bc9STejun Heo head = 0; 515fbf59bc9STejun Heo } 516fbf59bc9STejun Heo 517fbf59bc9STejun Heo /* if tail is small, just keep it around */ 518723ad1d9SAl Viro tail = this_size - head - size; 519723ad1d9SAl Viro if (tail < sizeof(int)) { 520fbf59bc9STejun Heo tail = 0; 521723ad1d9SAl Viro size = this_size - head; 522723ad1d9SAl Viro } 523fbf59bc9STejun Heo 524fbf59bc9STejun Heo /* split if warranted */ 525fbf59bc9STejun Heo if (head || tail) { 526706c16f2SAl Viro int nr_extra = !!head + !!tail; 527706c16f2SAl Viro 528706c16f2SAl Viro /* insert new subblocks */ 529723ad1d9SAl Viro memmove(p + nr_extra + 1, p + 1, 530706c16f2SAl Viro sizeof(chunk->map[0]) * (chunk->map_used - i)); 531706c16f2SAl Viro chunk->map_used += nr_extra; 532706c16f2SAl Viro 533fbf59bc9STejun Heo if (head) { 5343d331ad7SAl Viro if (!seen_free) { 5353d331ad7SAl Viro chunk->first_free = i; 5363d331ad7SAl Viro seen_free = true; 5373d331ad7SAl Viro } 538723ad1d9SAl Viro *++p = off += head; 539723ad1d9SAl Viro ++i; 540706c16f2SAl Viro max_contig = max(head, max_contig); 541fbf59bc9STejun Heo } 542706c16f2SAl Viro if (tail) { 543723ad1d9SAl Viro p[1] = off + size; 544706c16f2SAl Viro max_contig = max(tail, max_contig); 545706c16f2SAl Viro } 546fbf59bc9STejun Heo } 547fbf59bc9STejun Heo 5483d331ad7SAl Viro if (!seen_free) 5493d331ad7SAl Viro chunk->first_free = i + 1; 5503d331ad7SAl Viro 551fbf59bc9STejun Heo /* update hint and mark allocated */ 552723ad1d9SAl Viro if (i + 1 == chunk->map_used) 553fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 554fbf59bc9STejun Heo else 555fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 556fbf59bc9STejun Heo max_contig); 557fbf59bc9STejun Heo 558723ad1d9SAl Viro chunk->free_size -= size; 559723ad1d9SAl Viro *p |= 1; 560fbf59bc9STejun Heo 561fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 562fbf59bc9STejun Heo return off; 563fbf59bc9STejun Heo } 564fbf59bc9STejun Heo 565fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 566fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 567fbf59bc9STejun Heo 5689f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 5699f7dcf22STejun Heo return -1; 570fbf59bc9STejun Heo } 571fbf59bc9STejun Heo 572fbf59bc9STejun Heo /** 573fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 574fbf59bc9STejun Heo * @chunk: chunk of interest 575fbf59bc9STejun Heo * @freeme: offset of area to free 576fbf59bc9STejun Heo * 577fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 578fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 579fbf59bc9STejun Heo * the area. 580ccea34b5STejun Heo * 581ccea34b5STejun Heo * CONTEXT: 582ccea34b5STejun Heo * pcpu_lock. 583fbf59bc9STejun Heo */ 584fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 585fbf59bc9STejun Heo { 586fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 587723ad1d9SAl Viro int off = 0; 588723ad1d9SAl Viro unsigned i, j; 589723ad1d9SAl Viro int to_free = 0; 590723ad1d9SAl Viro int *p; 591fbf59bc9STejun Heo 592723ad1d9SAl Viro freeme |= 1; /* we are searching for <given offset, in use> pair */ 593723ad1d9SAl Viro 594723ad1d9SAl Viro i = 0; 595723ad1d9SAl Viro j = chunk->map_used; 596723ad1d9SAl Viro while (i != j) { 597723ad1d9SAl Viro unsigned k = (i + j) / 2; 598723ad1d9SAl Viro off = chunk->map[k]; 599723ad1d9SAl Viro if (off < freeme) 600723ad1d9SAl Viro i = k + 1; 601723ad1d9SAl Viro else if (off > freeme) 602723ad1d9SAl Viro j = k; 603723ad1d9SAl Viro else 604723ad1d9SAl Viro i = j = k; 605723ad1d9SAl Viro } 606fbf59bc9STejun Heo BUG_ON(off != freeme); 607fbf59bc9STejun Heo 6083d331ad7SAl Viro if (i < chunk->first_free) 6093d331ad7SAl Viro chunk->first_free = i; 6103d331ad7SAl Viro 611723ad1d9SAl Viro p = chunk->map + i; 612723ad1d9SAl Viro *p = off &= ~1; 613723ad1d9SAl Viro chunk->free_size += (p[1] & ~1) - off; 614fbf59bc9STejun Heo 615fbf59bc9STejun Heo /* merge with next? */ 616723ad1d9SAl Viro if (!(p[1] & 1)) 617723ad1d9SAl Viro to_free++; 618723ad1d9SAl Viro /* merge with previous? */ 619723ad1d9SAl Viro if (i > 0 && !(p[-1] & 1)) { 620723ad1d9SAl Viro to_free++; 621723ad1d9SAl Viro i--; 622723ad1d9SAl Viro p--; 623723ad1d9SAl Viro } 624723ad1d9SAl Viro if (to_free) { 625723ad1d9SAl Viro chunk->map_used -= to_free; 626723ad1d9SAl Viro memmove(p + 1, p + 1 + to_free, 627723ad1d9SAl Viro (chunk->map_used - i) * sizeof(chunk->map[0])); 628fbf59bc9STejun Heo } 629fbf59bc9STejun Heo 630723ad1d9SAl Viro chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); 631fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 632fbf59bc9STejun Heo } 633fbf59bc9STejun Heo 6346081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void) 6356081089fSTejun Heo { 6366081089fSTejun Heo struct pcpu_chunk *chunk; 6376081089fSTejun Heo 63890459ce0SBob Liu chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); 6396081089fSTejun Heo if (!chunk) 6406081089fSTejun Heo return NULL; 6416081089fSTejun Heo 64290459ce0SBob Liu chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * 64390459ce0SBob Liu sizeof(chunk->map[0])); 6446081089fSTejun Heo if (!chunk->map) { 6455a838c3bSJianyu Zhan pcpu_mem_free(chunk, pcpu_chunk_struct_size); 6466081089fSTejun Heo return NULL; 6476081089fSTejun Heo } 6486081089fSTejun Heo 6496081089fSTejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 650723ad1d9SAl Viro chunk->map[0] = 0; 651723ad1d9SAl Viro chunk->map[1] = pcpu_unit_size | 1; 652723ad1d9SAl Viro chunk->map_used = 1; 6536081089fSTejun Heo 6546081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 6556081089fSTejun Heo chunk->free_size = pcpu_unit_size; 6566081089fSTejun Heo chunk->contig_hint = pcpu_unit_size; 6576081089fSTejun Heo 6586081089fSTejun Heo return chunk; 6596081089fSTejun Heo } 6606081089fSTejun Heo 6616081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 6626081089fSTejun Heo { 6636081089fSTejun Heo if (!chunk) 6646081089fSTejun Heo return; 6656081089fSTejun Heo pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 666b4916cb1SJoonsoo Kim pcpu_mem_free(chunk, pcpu_chunk_struct_size); 6676081089fSTejun Heo } 6686081089fSTejun Heo 669fbf59bc9STejun Heo /* 6709f645532STejun Heo * Chunk management implementation. 671fbf59bc9STejun Heo * 6729f645532STejun Heo * To allow different implementations, chunk alloc/free and 6739f645532STejun Heo * [de]population are implemented in a separate file which is pulled 6749f645532STejun Heo * into this file and compiled together. The following functions 6759f645532STejun Heo * should be implemented. 676ccea34b5STejun Heo * 6779f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 6789f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 6799f645532STejun Heo * pcpu_create_chunk - create a new chunk 6809f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 6819f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 6829f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 683fbf59bc9STejun Heo */ 6849f645532STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); 6859f645532STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); 6869f645532STejun Heo static struct pcpu_chunk *pcpu_create_chunk(void); 6879f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 6889f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 6899f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 690fbf59bc9STejun Heo 691b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 692b0c9778bSTejun Heo #include "percpu-km.c" 693b0c9778bSTejun Heo #else 6949f645532STejun Heo #include "percpu-vm.c" 695b0c9778bSTejun Heo #endif 696fbf59bc9STejun Heo 697fbf59bc9STejun Heo /** 69888999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 69988999a89STejun Heo * @addr: address for which the chunk needs to be determined. 70088999a89STejun Heo * 70188999a89STejun Heo * RETURNS: 70288999a89STejun Heo * The address of the found chunk. 70388999a89STejun Heo */ 70488999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 70588999a89STejun Heo { 70688999a89STejun Heo /* is it in the first chunk? */ 70788999a89STejun Heo if (pcpu_addr_in_first_chunk(addr)) { 70888999a89STejun Heo /* is it in the reserved area? */ 70988999a89STejun Heo if (pcpu_addr_in_reserved_chunk(addr)) 71088999a89STejun Heo return pcpu_reserved_chunk; 71188999a89STejun Heo return pcpu_first_chunk; 71288999a89STejun Heo } 71388999a89STejun Heo 71488999a89STejun Heo /* 71588999a89STejun Heo * The address is relative to unit0 which might be unused and 71688999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 71788999a89STejun Heo * current processor before looking it up in the vmalloc 71888999a89STejun Heo * space. Note that any possible cpu id can be used here, so 71988999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 72088999a89STejun Heo */ 72188999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 7229f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 72388999a89STejun Heo } 72488999a89STejun Heo 72588999a89STejun Heo /** 726edcb4639STejun Heo * pcpu_alloc - the percpu allocator 727cae3aeb8STejun Heo * @size: size of area to allocate in bytes 728fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 729edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 730fbf59bc9STejun Heo * 731ccea34b5STejun Heo * Allocate percpu area of @size bytes aligned at @align. 732ccea34b5STejun Heo * 733ccea34b5STejun Heo * CONTEXT: 734ccea34b5STejun Heo * Does GFP_KERNEL allocation. 735fbf59bc9STejun Heo * 736fbf59bc9STejun Heo * RETURNS: 737fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 738fbf59bc9STejun Heo */ 73943cf38ebSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) 740fbf59bc9STejun Heo { 741f2badb0cSTejun Heo static int warn_limit = 10; 742fbf59bc9STejun Heo struct pcpu_chunk *chunk; 743f2badb0cSTejun Heo const char *err; 744b38d08f3STejun Heo int slot, off, new_alloc, cpu, ret; 745403a91b1SJiri Kosina unsigned long flags; 746f528f0b8SCatalin Marinas void __percpu *ptr; 747fbf59bc9STejun Heo 748723ad1d9SAl Viro /* 749723ad1d9SAl Viro * We want the lowest bit of offset available for in-use/free 7502f69fa82SViro * indicator, so force >= 16bit alignment and make size even. 751723ad1d9SAl Viro */ 752723ad1d9SAl Viro if (unlikely(align < 2)) 753723ad1d9SAl Viro align = 2; 754723ad1d9SAl Viro 755fb009e3aSChristoph Lameter size = ALIGN(size, 2); 7562f69fa82SViro 7578d408b4bSTejun Heo if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 758fbf59bc9STejun Heo WARN(true, "illegal size (%zu) or align (%zu) for " 759fbf59bc9STejun Heo "percpu allocation\n", size, align); 760fbf59bc9STejun Heo return NULL; 761fbf59bc9STejun Heo } 762fbf59bc9STejun Heo 763403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 764fbf59bc9STejun Heo 765edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 766edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 767edcb4639STejun Heo chunk = pcpu_reserved_chunk; 768833af842STejun Heo 769833af842STejun Heo if (size > chunk->contig_hint) { 770833af842STejun Heo err = "alloc from reserved chunk failed"; 771ccea34b5STejun Heo goto fail_unlock; 772f2badb0cSTejun Heo } 773833af842STejun Heo 774833af842STejun Heo while ((new_alloc = pcpu_need_to_extend(chunk))) { 775833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 776833af842STejun Heo if (pcpu_extend_area_map(chunk, new_alloc) < 0) { 777833af842STejun Heo err = "failed to extend area map of reserved chunk"; 778b38d08f3STejun Heo goto fail; 779833af842STejun Heo } 780833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 781833af842STejun Heo } 782833af842STejun Heo 783a16037c8STejun Heo off = pcpu_alloc_area(chunk, size, align, false); 784edcb4639STejun Heo if (off >= 0) 785edcb4639STejun Heo goto area_found; 786833af842STejun Heo 787f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 788ccea34b5STejun Heo goto fail_unlock; 789edcb4639STejun Heo } 790edcb4639STejun Heo 791ccea34b5STejun Heo restart: 792edcb4639STejun Heo /* search through normal chunks */ 793fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 794fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 795fbf59bc9STejun Heo if (size > chunk->contig_hint) 796fbf59bc9STejun Heo continue; 797ccea34b5STejun Heo 798833af842STejun Heo new_alloc = pcpu_need_to_extend(chunk); 799833af842STejun Heo if (new_alloc) { 800833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 801833af842STejun Heo if (pcpu_extend_area_map(chunk, 802833af842STejun Heo new_alloc) < 0) { 803f2badb0cSTejun Heo err = "failed to extend area map"; 804b38d08f3STejun Heo goto fail; 805833af842STejun Heo } 806833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 807833af842STejun Heo /* 808833af842STejun Heo * pcpu_lock has been dropped, need to 809833af842STejun Heo * restart cpu_slot list walking. 810833af842STejun Heo */ 811833af842STejun Heo goto restart; 812ccea34b5STejun Heo } 813ccea34b5STejun Heo 814a16037c8STejun Heo off = pcpu_alloc_area(chunk, size, align, false); 815fbf59bc9STejun Heo if (off >= 0) 816fbf59bc9STejun Heo goto area_found; 817fbf59bc9STejun Heo } 818fbf59bc9STejun Heo } 819fbf59bc9STejun Heo 820403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 821ccea34b5STejun Heo 822b38d08f3STejun Heo /* 823b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 824b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 825b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 826b38d08f3STejun Heo */ 827b38d08f3STejun Heo mutex_lock(&pcpu_alloc_mutex); 828b38d08f3STejun Heo 829b38d08f3STejun Heo if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 8306081089fSTejun Heo chunk = pcpu_create_chunk(); 831f2badb0cSTejun Heo if (!chunk) { 832f2badb0cSTejun Heo err = "failed to allocate new chunk"; 833b38d08f3STejun Heo goto fail; 834f2badb0cSTejun Heo } 835ccea34b5STejun Heo 836403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 837fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 838b38d08f3STejun Heo } else { 839b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 840b38d08f3STejun Heo } 841b38d08f3STejun Heo 842b38d08f3STejun Heo mutex_unlock(&pcpu_alloc_mutex); 843ccea34b5STejun Heo goto restart; 844fbf59bc9STejun Heo 845fbf59bc9STejun Heo area_found: 846403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 847ccea34b5STejun Heo 848dca49645STejun Heo /* populate if not all pages are already there */ 849*e04d3208STejun Heo if (true) { 850*e04d3208STejun Heo int page_start, page_end, rs, re; 851*e04d3208STejun Heo 852b38d08f3STejun Heo mutex_lock(&pcpu_alloc_mutex); 853*e04d3208STejun Heo 854dca49645STejun Heo page_start = PFN_DOWN(off); 855dca49645STejun Heo page_end = PFN_UP(off + size); 856dca49645STejun Heo 857a93ace48STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 858dca49645STejun Heo WARN_ON(chunk->immutable); 859dca49645STejun Heo 860b38d08f3STejun Heo ret = pcpu_populate_chunk(chunk, rs, re); 861b38d08f3STejun Heo 862403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 863b38d08f3STejun Heo if (ret) { 864b38d08f3STejun Heo mutex_unlock(&pcpu_alloc_mutex); 865fbf59bc9STejun Heo pcpu_free_area(chunk, off); 866f2badb0cSTejun Heo err = "failed to populate"; 867ccea34b5STejun Heo goto fail_unlock; 868fbf59bc9STejun Heo } 869a93ace48STejun Heo bitmap_set(chunk->populated, rs, re - rs); 870b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 871dca49645STejun Heo } 872dca49645STejun Heo 873ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 874*e04d3208STejun Heo } 875ccea34b5STejun Heo 876dca49645STejun Heo /* clear the areas and return address relative to base address */ 877dca49645STejun Heo for_each_possible_cpu(cpu) 878dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 879dca49645STejun Heo 880f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 881f528f0b8SCatalin Marinas kmemleak_alloc_percpu(ptr, size); 882f528f0b8SCatalin Marinas return ptr; 883ccea34b5STejun Heo 884ccea34b5STejun Heo fail_unlock: 885403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 886b38d08f3STejun Heo fail: 887f2badb0cSTejun Heo if (warn_limit) { 888f2badb0cSTejun Heo pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " 889f2badb0cSTejun Heo "%s\n", size, align, err); 890f2badb0cSTejun Heo dump_stack(); 891f2badb0cSTejun Heo if (!--warn_limit) 892f2badb0cSTejun Heo pr_info("PERCPU: limit reached, disable warning\n"); 893f2badb0cSTejun Heo } 894ccea34b5STejun Heo return NULL; 895fbf59bc9STejun Heo } 896edcb4639STejun Heo 897edcb4639STejun Heo /** 898edcb4639STejun Heo * __alloc_percpu - allocate dynamic percpu area 899edcb4639STejun Heo * @size: size of area to allocate in bytes 900edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 901edcb4639STejun Heo * 9029329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. 9039329ba97STejun Heo * Might sleep. Might trigger writeouts. 904edcb4639STejun Heo * 905ccea34b5STejun Heo * CONTEXT: 906ccea34b5STejun Heo * Does GFP_KERNEL allocation. 907ccea34b5STejun Heo * 908edcb4639STejun Heo * RETURNS: 909edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 910edcb4639STejun Heo */ 91143cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 912edcb4639STejun Heo { 913edcb4639STejun Heo return pcpu_alloc(size, align, false); 914edcb4639STejun Heo } 915fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 916fbf59bc9STejun Heo 917edcb4639STejun Heo /** 918edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 919edcb4639STejun Heo * @size: size of area to allocate in bytes 920edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 921edcb4639STejun Heo * 9229329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 9239329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 9249329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 9259329ba97STejun Heo * Might trigger writeouts. 926edcb4639STejun Heo * 927ccea34b5STejun Heo * CONTEXT: 928ccea34b5STejun Heo * Does GFP_KERNEL allocation. 929ccea34b5STejun Heo * 930edcb4639STejun Heo * RETURNS: 931edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 932edcb4639STejun Heo */ 93343cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 934edcb4639STejun Heo { 935edcb4639STejun Heo return pcpu_alloc(size, align, true); 936edcb4639STejun Heo } 937edcb4639STejun Heo 938a56dbddfSTejun Heo /** 939a56dbddfSTejun Heo * pcpu_reclaim - reclaim fully free chunks, workqueue function 940a56dbddfSTejun Heo * @work: unused 941a56dbddfSTejun Heo * 942a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 943ccea34b5STejun Heo * 944ccea34b5STejun Heo * CONTEXT: 945ccea34b5STejun Heo * workqueue context. 946a56dbddfSTejun Heo */ 947a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work) 948fbf59bc9STejun Heo { 949a56dbddfSTejun Heo LIST_HEAD(todo); 950a56dbddfSTejun Heo struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 951a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 952a56dbddfSTejun Heo 953ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 954ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 955a56dbddfSTejun Heo 956a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, head, list) { 9578d408b4bSTejun Heo WARN_ON(chunk->immutable); 958a56dbddfSTejun Heo 959a56dbddfSTejun Heo /* spare the first one */ 960a56dbddfSTejun Heo if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 961a56dbddfSTejun Heo continue; 962a56dbddfSTejun Heo 963a56dbddfSTejun Heo list_move(&chunk->list, &todo); 964a56dbddfSTejun Heo } 965a56dbddfSTejun Heo 966ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 967a56dbddfSTejun Heo 968a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, &todo, list) { 969a93ace48STejun Heo int rs, re; 970dca49645STejun Heo 971a93ace48STejun Heo pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { 972a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 973a93ace48STejun Heo bitmap_clear(chunk->populated, rs, re - rs); 974a93ace48STejun Heo } 9756081089fSTejun Heo pcpu_destroy_chunk(chunk); 976fbf59bc9STejun Heo } 977971f3918STejun Heo 978971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 979a56dbddfSTejun Heo } 980fbf59bc9STejun Heo 981fbf59bc9STejun Heo /** 982fbf59bc9STejun Heo * free_percpu - free percpu area 983fbf59bc9STejun Heo * @ptr: pointer to area to free 984fbf59bc9STejun Heo * 985ccea34b5STejun Heo * Free percpu area @ptr. 986ccea34b5STejun Heo * 987ccea34b5STejun Heo * CONTEXT: 988ccea34b5STejun Heo * Can be called from atomic context. 989fbf59bc9STejun Heo */ 99043cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 991fbf59bc9STejun Heo { 992129182e5SAndrew Morton void *addr; 993fbf59bc9STejun Heo struct pcpu_chunk *chunk; 994ccea34b5STejun Heo unsigned long flags; 995fbf59bc9STejun Heo int off; 996fbf59bc9STejun Heo 997fbf59bc9STejun Heo if (!ptr) 998fbf59bc9STejun Heo return; 999fbf59bc9STejun Heo 1000f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 1001f528f0b8SCatalin Marinas 1002129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 1003129182e5SAndrew Morton 1004ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1005fbf59bc9STejun Heo 1006fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1007bba174f5STejun Heo off = addr - chunk->base_addr; 1008fbf59bc9STejun Heo 1009fbf59bc9STejun Heo pcpu_free_area(chunk, off); 1010fbf59bc9STejun Heo 1011a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 1012fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 1013fbf59bc9STejun Heo struct pcpu_chunk *pos; 1014fbf59bc9STejun Heo 1015a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1016fbf59bc9STejun Heo if (pos != chunk) { 1017a56dbddfSTejun Heo schedule_work(&pcpu_reclaim_work); 1018fbf59bc9STejun Heo break; 1019fbf59bc9STejun Heo } 1020fbf59bc9STejun Heo } 1021fbf59bc9STejun Heo 1022ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1023fbf59bc9STejun Heo } 1024fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1025fbf59bc9STejun Heo 10263b034b0dSVivek Goyal /** 102710fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 102810fad5e4STejun Heo * @addr: address to test 102910fad5e4STejun Heo * 103010fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 103110fad5e4STejun Heo * static percpu areas are not considered. For those, use 103210fad5e4STejun Heo * is_module_percpu_address(). 103310fad5e4STejun Heo * 103410fad5e4STejun Heo * RETURNS: 103510fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 103610fad5e4STejun Heo */ 103710fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 103810fad5e4STejun Heo { 1039bbddff05STejun Heo #ifdef CONFIG_SMP 104010fad5e4STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 104110fad5e4STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 104210fad5e4STejun Heo unsigned int cpu; 104310fad5e4STejun Heo 104410fad5e4STejun Heo for_each_possible_cpu(cpu) { 104510fad5e4STejun Heo void *start = per_cpu_ptr(base, cpu); 104610fad5e4STejun Heo 104710fad5e4STejun Heo if ((void *)addr >= start && (void *)addr < start + static_size) 104810fad5e4STejun Heo return true; 104910fad5e4STejun Heo } 1050bbddff05STejun Heo #endif 1051bbddff05STejun Heo /* on UP, can't distinguish from other static vars, always false */ 105210fad5e4STejun Heo return false; 105310fad5e4STejun Heo } 105410fad5e4STejun Heo 105510fad5e4STejun Heo /** 10563b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 10573b034b0dSVivek Goyal * @addr: the address to be converted to physical address 10583b034b0dSVivek Goyal * 10593b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 10603b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 10613b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 10623b034b0dSVivek Goyal * until this function finishes. 10633b034b0dSVivek Goyal * 106467589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 106567589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 106667589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 106767589c71SDave Young * km) provides translation. 106867589c71SDave Young * 106967589c71SDave Young * The addr can be tranlated simply without checking if it falls into the 107067589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 107167589c71SDave Young * actually works, and the verification can discover both bugs in percpu 107267589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 107367589c71SDave Young * code. 107467589c71SDave Young * 10753b034b0dSVivek Goyal * RETURNS: 10763b034b0dSVivek Goyal * The physical address for @addr. 10773b034b0dSVivek Goyal */ 10783b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 10793b034b0dSVivek Goyal { 10809983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 10819983b6f0STejun Heo bool in_first_chunk = false; 1082a855b84cSTejun Heo unsigned long first_low, first_high; 10839983b6f0STejun Heo unsigned int cpu; 10849983b6f0STejun Heo 10859983b6f0STejun Heo /* 1086a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 10879983b6f0STejun Heo * necessary but will speed up lookups of addresses which 10889983b6f0STejun Heo * aren't in the first chunk. 10899983b6f0STejun Heo */ 1090a855b84cSTejun Heo first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); 1091a855b84cSTejun Heo first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, 10929983b6f0STejun Heo pcpu_unit_pages); 1093a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 1094a855b84cSTejun Heo (unsigned long)addr < first_high) { 10959983b6f0STejun Heo for_each_possible_cpu(cpu) { 10969983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 10979983b6f0STejun Heo 10989983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 10999983b6f0STejun Heo in_first_chunk = true; 11009983b6f0STejun Heo break; 11019983b6f0STejun Heo } 11029983b6f0STejun Heo } 11039983b6f0STejun Heo } 11049983b6f0STejun Heo 11059983b6f0STejun Heo if (in_first_chunk) { 1106eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 11073b034b0dSVivek Goyal return __pa(addr); 11083b034b0dSVivek Goyal else 11099f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 11109f57bd4dSEugene Surovegin offset_in_page(addr); 1111020ec653STejun Heo } else 11129f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 11139f57bd4dSEugene Surovegin offset_in_page(addr); 11143b034b0dSVivek Goyal } 11153b034b0dSVivek Goyal 1116fbf59bc9STejun Heo /** 1117fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 1118fd1e8a1fSTejun Heo * @nr_groups: the number of groups 1119fd1e8a1fSTejun Heo * @nr_units: the number of units 1120033e48fbSTejun Heo * 1121fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 1122fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 1123fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 1124fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1125fd1e8a1fSTejun Heo * pointer of other groups. 1126033e48fbSTejun Heo * 1127033e48fbSTejun Heo * RETURNS: 1128fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 1129fd1e8a1fSTejun Heo * failure. 1130033e48fbSTejun Heo */ 1131fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1132fd1e8a1fSTejun Heo int nr_units) 1133fd1e8a1fSTejun Heo { 1134fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1135fd1e8a1fSTejun Heo size_t base_size, ai_size; 1136fd1e8a1fSTejun Heo void *ptr; 1137fd1e8a1fSTejun Heo int unit; 1138fd1e8a1fSTejun Heo 1139fd1e8a1fSTejun Heo base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1140fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 1141fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1142fd1e8a1fSTejun Heo 1143999c17e3SSantosh Shilimkar ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); 1144fd1e8a1fSTejun Heo if (!ptr) 1145fd1e8a1fSTejun Heo return NULL; 1146fd1e8a1fSTejun Heo ai = ptr; 1147fd1e8a1fSTejun Heo ptr += base_size; 1148fd1e8a1fSTejun Heo 1149fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 1150fd1e8a1fSTejun Heo 1151fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 1152fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 1153fd1e8a1fSTejun Heo 1154fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 1155fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 1156fd1e8a1fSTejun Heo 1157fd1e8a1fSTejun Heo return ai; 1158fd1e8a1fSTejun Heo } 1159fd1e8a1fSTejun Heo 1160fd1e8a1fSTejun Heo /** 1161fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 1162fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 1163fd1e8a1fSTejun Heo * 1164fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1165fd1e8a1fSTejun Heo */ 1166fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1167fd1e8a1fSTejun Heo { 1168999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 1169fd1e8a1fSTejun Heo } 1170fd1e8a1fSTejun Heo 1171fd1e8a1fSTejun Heo /** 1172fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1173fd1e8a1fSTejun Heo * @lvl: loglevel 1174fd1e8a1fSTejun Heo * @ai: allocation info to dump 1175fd1e8a1fSTejun Heo * 1176fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 1177fd1e8a1fSTejun Heo */ 1178fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 1179fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 1180033e48fbSTejun Heo { 1181fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 1182033e48fbSTejun Heo char empty_str[] = "--------"; 1183fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 1184fd1e8a1fSTejun Heo int group, v; 1185fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 1186033e48fbSTejun Heo 1187fd1e8a1fSTejun Heo v = ai->nr_groups; 1188033e48fbSTejun Heo while (v /= 10) 1189fd1e8a1fSTejun Heo group_width++; 1190033e48fbSTejun Heo 1191fd1e8a1fSTejun Heo v = num_possible_cpus(); 1192fd1e8a1fSTejun Heo while (v /= 10) 1193fd1e8a1fSTejun Heo cpu_width++; 1194fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1195033e48fbSTejun Heo 1196fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 1197fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 1198fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 1199033e48fbSTejun Heo 1200fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1201fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1202fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1203fd1e8a1fSTejun Heo 1204fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 1205fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 1206fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 1207fd1e8a1fSTejun Heo 1208fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 1209fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 1210fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 1211fd1e8a1fSTejun Heo if (!(alloc % apl)) { 1212cb129820STejun Heo printk(KERN_CONT "\n"); 1213fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 1214033e48fbSTejun Heo } 1215cb129820STejun Heo printk(KERN_CONT "[%0*d] ", group_width, group); 1216fd1e8a1fSTejun Heo 1217fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 1218fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 1219cb129820STejun Heo printk(KERN_CONT "%0*d ", cpu_width, 1220fd1e8a1fSTejun Heo gi->cpu_map[unit]); 1221033e48fbSTejun Heo else 1222cb129820STejun Heo printk(KERN_CONT "%s ", empty_str); 1223033e48fbSTejun Heo } 1224fd1e8a1fSTejun Heo } 1225cb129820STejun Heo printk(KERN_CONT "\n"); 1226033e48fbSTejun Heo } 1227033e48fbSTejun Heo 1228fbf59bc9STejun Heo /** 12298d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 1230fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 123138a6be52STejun Heo * @base_addr: mapped address 1232fbf59bc9STejun Heo * 12338d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 12348d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 123538a6be52STejun Heo * setup path. 12368d408b4bSTejun Heo * 1237fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 1238fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 12398d408b4bSTejun Heo * 1240fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 1241fd1e8a1fSTejun Heo * 1242fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1243edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1244edcb4639STejun Heo * the first chunk such that it's available only through reserved 1245edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1246edcb4639STejun Heo * static areas on architectures where the addressing model has 1247edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1248edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1249edcb4639STejun Heo * 1250fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 1251fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 1252fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 12536074d5b0STejun Heo * 1254fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1255fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 1256fd1e8a1fSTejun Heo * @ai->dyn_size. 12578d408b4bSTejun Heo * 1258fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 1259fd1e8a1fSTejun Heo * for vm areas. 12608d408b4bSTejun Heo * 1261fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 1262fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 1263fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 1264fd1e8a1fSTejun Heo * 1265fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 1266fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 1267fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 1268fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 1269fd1e8a1fSTejun Heo * all units is assumed. 12708d408b4bSTejun Heo * 127138a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 127238a6be52STejun Heo * copied static data to each unit. 1273fbf59bc9STejun Heo * 1274edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1275edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1276edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1277edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1278edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1279edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1280edcb4639STejun Heo * 1281fbf59bc9STejun Heo * RETURNS: 1282fb435d52STejun Heo * 0 on success, -errno on failure. 1283fbf59bc9STejun Heo */ 1284fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1285fd1e8a1fSTejun Heo void *base_addr) 1286fbf59bc9STejun Heo { 1287635b75fcSTejun Heo static char cpus_buf[4096] __initdata; 1288099a19d9STejun Heo static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1289099a19d9STejun Heo static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1290fd1e8a1fSTejun Heo size_t dyn_size = ai->dyn_size; 1291fd1e8a1fSTejun Heo size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1292edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 12936563297cSTejun Heo unsigned long *group_offsets; 12946563297cSTejun Heo size_t *group_sizes; 1295fb435d52STejun Heo unsigned long *unit_off; 1296fbf59bc9STejun Heo unsigned int cpu; 1297fd1e8a1fSTejun Heo int *unit_map; 1298fd1e8a1fSTejun Heo int group, unit, i; 1299fbf59bc9STejun Heo 1300635b75fcSTejun Heo cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); 1301635b75fcSTejun Heo 1302635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 1303635b75fcSTejun Heo if (unlikely(cond)) { \ 1304635b75fcSTejun Heo pr_emerg("PERCPU: failed to initialize, %s", #cond); \ 1305635b75fcSTejun Heo pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ 1306635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1307635b75fcSTejun Heo BUG(); \ 1308635b75fcSTejun Heo } \ 1309635b75fcSTejun Heo } while (0) 1310635b75fcSTejun Heo 13112f39e637STejun Heo /* sanity checks */ 1312635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1313bbddff05STejun Heo #ifdef CONFIG_SMP 1314635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 13150415b00dSTejun Heo PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK); 1316bbddff05STejun Heo #endif 1317635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 13180415b00dSTejun Heo PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK); 1319635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1320635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1321635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1322099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 13239f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 13248d408b4bSTejun Heo 13256563297cSTejun Heo /* process group information and build config tables accordingly */ 1326999c17e3SSantosh Shilimkar group_offsets = memblock_virt_alloc(ai->nr_groups * 1327999c17e3SSantosh Shilimkar sizeof(group_offsets[0]), 0); 1328999c17e3SSantosh Shilimkar group_sizes = memblock_virt_alloc(ai->nr_groups * 1329999c17e3SSantosh Shilimkar sizeof(group_sizes[0]), 0); 1330999c17e3SSantosh Shilimkar unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); 1331999c17e3SSantosh Shilimkar unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); 13322f39e637STejun Heo 1333fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1334ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 1335a855b84cSTejun Heo 1336a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 1337a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 13382f39e637STejun Heo 1339fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1340fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 13412f39e637STejun Heo 13426563297cSTejun Heo group_offsets[group] = gi->base_offset; 13436563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 13446563297cSTejun Heo 1345fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 1346fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 1347fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 1348fd1e8a1fSTejun Heo continue; 1349fd1e8a1fSTejun Heo 1350635b75fcSTejun Heo PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); 1351635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1352635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1353fd1e8a1fSTejun Heo 1354fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 1355fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1356fb435d52STejun Heo 1357a855b84cSTejun Heo /* determine low/high unit_cpu */ 1358a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 1359a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 1360a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 1361a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 1362a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 1363a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 13640fc0531eSLinus Torvalds } 13650fc0531eSLinus Torvalds } 1366fd1e8a1fSTejun Heo pcpu_nr_units = unit; 13672f39e637STejun Heo 13682f39e637STejun Heo for_each_possible_cpu(cpu) 1369635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1370635b75fcSTejun Heo 1371635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 1372635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 1373bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 13742f39e637STejun Heo 13756563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 13766563297cSTejun Heo pcpu_group_offsets = group_offsets; 13776563297cSTejun Heo pcpu_group_sizes = group_sizes; 1378fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 1379fb435d52STejun Heo pcpu_unit_offsets = unit_off; 13802f39e637STejun Heo 13812f39e637STejun Heo /* determine basic parameters */ 1382fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1383d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 13846563297cSTejun Heo pcpu_atom_size = ai->atom_size; 1385ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1386ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1387cafe8816STejun Heo 1388d9b55eebSTejun Heo /* 1389d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1390d9b55eebSTejun Heo * empty chunks. 1391d9b55eebSTejun Heo */ 1392d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1393999c17e3SSantosh Shilimkar pcpu_slot = memblock_virt_alloc( 1394999c17e3SSantosh Shilimkar pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); 1395fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1396fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1397fbf59bc9STejun Heo 1398edcb4639STejun Heo /* 1399edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1400edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1401edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1402edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1403edcb4639STejun Heo * static percpu allocation). 1404edcb4639STejun Heo */ 1405999c17e3SSantosh Shilimkar schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 14062441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 1407bba174f5STejun Heo schunk->base_addr = base_addr; 140861ace7faSTejun Heo schunk->map = smap; 140961ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 141038a6be52STejun Heo schunk->immutable = true; 1411ce3141a2STejun Heo bitmap_fill(schunk->populated, pcpu_unit_pages); 1412edcb4639STejun Heo 1413fd1e8a1fSTejun Heo if (ai->reserved_size) { 1414fd1e8a1fSTejun Heo schunk->free_size = ai->reserved_size; 1415ae9e6bc9STejun Heo pcpu_reserved_chunk = schunk; 1416fd1e8a1fSTejun Heo pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; 1417edcb4639STejun Heo } else { 14182441d15cSTejun Heo schunk->free_size = dyn_size; 1419edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1420edcb4639STejun Heo } 14212441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1422fbf59bc9STejun Heo 1423723ad1d9SAl Viro schunk->map[0] = 1; 1424723ad1d9SAl Viro schunk->map[1] = ai->static_size; 1425723ad1d9SAl Viro schunk->map_used = 1; 142661ace7faSTejun Heo if (schunk->free_size) 1427723ad1d9SAl Viro schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size); 1428723ad1d9SAl Viro else 1429723ad1d9SAl Viro schunk->map[1] |= 1; 143061ace7faSTejun Heo 1431edcb4639STejun Heo /* init dynamic chunk if necessary */ 1432edcb4639STejun Heo if (dyn_size) { 1433999c17e3SSantosh Shilimkar dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1434edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 1435bba174f5STejun Heo dchunk->base_addr = base_addr; 1436edcb4639STejun Heo dchunk->map = dmap; 1437edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 143838a6be52STejun Heo dchunk->immutable = true; 1439ce3141a2STejun Heo bitmap_fill(dchunk->populated, pcpu_unit_pages); 1440edcb4639STejun Heo 1441edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1442723ad1d9SAl Viro dchunk->map[0] = 1; 1443723ad1d9SAl Viro dchunk->map[1] = pcpu_reserved_chunk_limit; 1444723ad1d9SAl Viro dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1; 1445723ad1d9SAl Viro dchunk->map_used = 2; 1446edcb4639STejun Heo } 1447edcb4639STejun Heo 14482441d15cSTejun Heo /* link the first chunk in */ 1449ae9e6bc9STejun Heo pcpu_first_chunk = dchunk ?: schunk; 1450ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 1451fbf59bc9STejun Heo 1452fbf59bc9STejun Heo /* we're done */ 1453bba174f5STejun Heo pcpu_base_addr = base_addr; 1454fb435d52STejun Heo return 0; 1455fbf59bc9STejun Heo } 145666c3a757STejun Heo 1457bbddff05STejun Heo #ifdef CONFIG_SMP 1458bbddff05STejun Heo 145917f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 1460f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 1461f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 1462f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 1463f58dc01bSTejun Heo }; 146466c3a757STejun Heo 1465f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1466f58dc01bSTejun Heo 1467f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 146866c3a757STejun Heo { 14695479c78aSCyrill Gorcunov if (!str) 14705479c78aSCyrill Gorcunov return -EINVAL; 14715479c78aSCyrill Gorcunov 1472f58dc01bSTejun Heo if (0) 1473f58dc01bSTejun Heo /* nada */; 1474f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1475f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 1476f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 1477f58dc01bSTejun Heo #endif 1478f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1479f58dc01bSTejun Heo else if (!strcmp(str, "page")) 1480f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 1481f58dc01bSTejun Heo #endif 1482f58dc01bSTejun Heo else 1483f58dc01bSTejun Heo pr_warning("PERCPU: unknown allocator %s specified\n", str); 148466c3a757STejun Heo 1485f58dc01bSTejun Heo return 0; 148666c3a757STejun Heo } 1487f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 148866c3a757STejun Heo 14893c9a024fSTejun Heo /* 14903c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 14913c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 14923c9a024fSTejun Heo * to be used. 14933c9a024fSTejun Heo */ 149408fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 149508fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 14963c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 14973c9a024fSTejun Heo #endif 14983c9a024fSTejun Heo 14993c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 15003c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 15013c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 15023c9a024fSTejun Heo #endif 15033c9a024fSTejun Heo 15043c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 15053c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 15063c9a024fSTejun Heo /** 1507fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1508fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1509fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1510fbf59bc9STejun Heo * @atom_size: allocation atom size 1511fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1512fbf59bc9STejun Heo * 1513fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 1514fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 1515fbf59bc9STejun Heo * atom size and distances between CPUs. 1516fbf59bc9STejun Heo * 1517fbf59bc9STejun Heo * Groups are always mutliples of atom size and CPUs which are of 1518fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 1519fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 1520fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 1521fbf59bc9STejun Heo * of allocated virtual address space. 1522fbf59bc9STejun Heo * 1523fbf59bc9STejun Heo * RETURNS: 1524fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 1525fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 1526fbf59bc9STejun Heo */ 1527fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1528fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 1529fbf59bc9STejun Heo size_t atom_size, 1530fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1531fbf59bc9STejun Heo { 1532fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 1533fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 1534fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 1535fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 1536fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 1537fbf59bc9STejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1538fbf59bc9STejun Heo int last_allocs, group, unit; 1539fbf59bc9STejun Heo unsigned int cpu, tcpu; 1540fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 1541fbf59bc9STejun Heo unsigned int *cpu_map; 1542fbf59bc9STejun Heo 1543fbf59bc9STejun Heo /* this function may be called multiple times */ 1544fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 1545fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 1546fbf59bc9STejun Heo 1547fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 1548fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 1549fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 1550fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 1551fbf59bc9STejun Heo 1552fbf59bc9STejun Heo /* 1553fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 1554fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 155525985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 1556fbf59bc9STejun Heo * or larger than min_unit_size. 1557fbf59bc9STejun Heo */ 1558fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1559fbf59bc9STejun Heo 1560fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 1561fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 1562fbf59bc9STejun Heo while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1563fbf59bc9STejun Heo upa--; 1564fbf59bc9STejun Heo max_upa = upa; 1565fbf59bc9STejun Heo 1566fbf59bc9STejun Heo /* group cpus according to their proximity */ 1567fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 1568fbf59bc9STejun Heo group = 0; 1569fbf59bc9STejun Heo next_group: 1570fbf59bc9STejun Heo for_each_possible_cpu(tcpu) { 1571fbf59bc9STejun Heo if (cpu == tcpu) 1572fbf59bc9STejun Heo break; 1573fbf59bc9STejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 1574fbf59bc9STejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1575fbf59bc9STejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1576fbf59bc9STejun Heo group++; 1577fbf59bc9STejun Heo nr_groups = max(nr_groups, group + 1); 1578fbf59bc9STejun Heo goto next_group; 1579fbf59bc9STejun Heo } 1580fbf59bc9STejun Heo } 1581fbf59bc9STejun Heo group_map[cpu] = group; 1582fbf59bc9STejun Heo group_cnt[group]++; 1583fbf59bc9STejun Heo } 1584fbf59bc9STejun Heo 1585fbf59bc9STejun Heo /* 1586fbf59bc9STejun Heo * Expand unit size until address space usage goes over 75% 1587fbf59bc9STejun Heo * and then as much as possible without using more address 1588fbf59bc9STejun Heo * space. 1589fbf59bc9STejun Heo */ 1590fbf59bc9STejun Heo last_allocs = INT_MAX; 1591fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 1592fbf59bc9STejun Heo int allocs = 0, wasted = 0; 1593fbf59bc9STejun Heo 1594fbf59bc9STejun Heo if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1595fbf59bc9STejun Heo continue; 1596fbf59bc9STejun Heo 1597fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 1598fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1599fbf59bc9STejun Heo allocs += this_allocs; 1600fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 1601fbf59bc9STejun Heo } 1602fbf59bc9STejun Heo 1603fbf59bc9STejun Heo /* 1604fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 1605fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 1606fbf59bc9STejun Heo * passes the following check. 1607fbf59bc9STejun Heo */ 1608fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 1609fbf59bc9STejun Heo continue; 1610fbf59bc9STejun Heo 1611fbf59bc9STejun Heo /* and then don't consume more memory */ 1612fbf59bc9STejun Heo if (allocs > last_allocs) 1613fbf59bc9STejun Heo break; 1614fbf59bc9STejun Heo last_allocs = allocs; 1615fbf59bc9STejun Heo best_upa = upa; 1616fbf59bc9STejun Heo } 1617fbf59bc9STejun Heo upa = best_upa; 1618fbf59bc9STejun Heo 1619fbf59bc9STejun Heo /* allocate and fill alloc_info */ 1620fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 1621fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 1622fbf59bc9STejun Heo 1623fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1624fbf59bc9STejun Heo if (!ai) 1625fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 1626fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 1627fbf59bc9STejun Heo 1628fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 1629fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 1630fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 1631fbf59bc9STejun Heo } 1632fbf59bc9STejun Heo 1633fbf59bc9STejun Heo ai->static_size = static_size; 1634fbf59bc9STejun Heo ai->reserved_size = reserved_size; 1635fbf59bc9STejun Heo ai->dyn_size = dyn_size; 1636fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 1637fbf59bc9STejun Heo ai->atom_size = atom_size; 1638fbf59bc9STejun Heo ai->alloc_size = alloc_size; 1639fbf59bc9STejun Heo 1640fbf59bc9STejun Heo for (group = 0, unit = 0; group_cnt[group]; group++) { 1641fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1642fbf59bc9STejun Heo 1643fbf59bc9STejun Heo /* 1644fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 1645fbf59bc9STejun Heo * back-to-back. The caller should update this to 1646fbf59bc9STejun Heo * reflect actual allocation. 1647fbf59bc9STejun Heo */ 1648fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 1649fbf59bc9STejun Heo 1650fbf59bc9STejun Heo for_each_possible_cpu(cpu) 1651fbf59bc9STejun Heo if (group_map[cpu] == group) 1652fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 1653fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 1654fbf59bc9STejun Heo unit += gi->nr_units; 1655fbf59bc9STejun Heo } 1656fbf59bc9STejun Heo BUG_ON(unit != nr_units); 1657fbf59bc9STejun Heo 1658fbf59bc9STejun Heo return ai; 1659fbf59bc9STejun Heo } 16603c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 1661fbf59bc9STejun Heo 16623c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 166366c3a757STejun Heo /** 166466c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 166566c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 16664ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1667c8826dd5STejun Heo * @atom_size: allocation atom size 1668c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1669c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 167025985edcSLucas De Marchi * @free_fn: function to free percpu page 167166c3a757STejun Heo * 167266c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 167366c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 167466c3a757STejun Heo * 167566c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 1676c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 1677c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 1678c8826dd5STejun Heo * aligned to @atom_size. 1679c8826dd5STejun Heo * 1680c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 1681c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 1682c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 1683c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 1684c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 1685c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 168666c3a757STejun Heo * 16874ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 168866c3a757STejun Heo * 168966c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 1690c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 169166c3a757STejun Heo * 169266c3a757STejun Heo * RETURNS: 1693fb435d52STejun Heo * 0 on success, -errno on failure. 169466c3a757STejun Heo */ 16954ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 1696c8826dd5STejun Heo size_t atom_size, 1697c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1698c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1699c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 170066c3a757STejun Heo { 1701c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 1702c8826dd5STejun Heo void **areas = NULL; 1703fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 17046ea529a2STejun Heo size_t size_sum, areas_size, max_distance; 1705c8826dd5STejun Heo int group, i, rc; 170666c3a757STejun Heo 1707c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1708c8826dd5STejun Heo cpu_distance_fn); 1709fd1e8a1fSTejun Heo if (IS_ERR(ai)) 1710fd1e8a1fSTejun Heo return PTR_ERR(ai); 171166c3a757STejun Heo 1712fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1713c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 171466c3a757STejun Heo 1715999c17e3SSantosh Shilimkar areas = memblock_virt_alloc_nopanic(areas_size, 0); 1716c8826dd5STejun Heo if (!areas) { 1717fb435d52STejun Heo rc = -ENOMEM; 1718c8826dd5STejun Heo goto out_free; 1719fa8a7094STejun Heo } 172066c3a757STejun Heo 1721c8826dd5STejun Heo /* allocate, copy and determine base address */ 1722c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 1723c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1724c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 1725c8826dd5STejun Heo void *ptr; 172666c3a757STejun Heo 1727c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 1728c8826dd5STejun Heo cpu = gi->cpu_map[i]; 1729c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 1730c8826dd5STejun Heo 1731c8826dd5STejun Heo /* allocate space for the whole group */ 1732c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 1733c8826dd5STejun Heo if (!ptr) { 1734c8826dd5STejun Heo rc = -ENOMEM; 1735c8826dd5STejun Heo goto out_free_areas; 1736c8826dd5STejun Heo } 1737f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 1738f528f0b8SCatalin Marinas kmemleak_free(ptr); 1739c8826dd5STejun Heo areas[group] = ptr; 1740c8826dd5STejun Heo 1741c8826dd5STejun Heo base = min(ptr, base); 174242b64281STejun Heo } 174342b64281STejun Heo 174442b64281STejun Heo /* 174542b64281STejun Heo * Copy data and free unused parts. This should happen after all 174642b64281STejun Heo * allocations are complete; otherwise, we may end up with 174742b64281STejun Heo * overlapping groups. 174842b64281STejun Heo */ 174942b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 175042b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 175142b64281STejun Heo void *ptr = areas[group]; 1752c8826dd5STejun Heo 1753c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1754c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 1755c8826dd5STejun Heo /* unused unit, free whole */ 1756c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 1757c8826dd5STejun Heo continue; 1758c8826dd5STejun Heo } 1759c8826dd5STejun Heo /* copy and return the unused part */ 1760fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 1761c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 1762c8826dd5STejun Heo } 176366c3a757STejun Heo } 176466c3a757STejun Heo 1765c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 17666ea529a2STejun Heo max_distance = 0; 17676ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 1768c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 17691a0c3298STejun Heo max_distance = max_t(size_t, max_distance, 17701a0c3298STejun Heo ai->groups[group].base_offset); 17716ea529a2STejun Heo } 17726ea529a2STejun Heo max_distance += ai->unit_size; 17736ea529a2STejun Heo 17746ea529a2STejun Heo /* warn if maximum distance is further than 75% of vmalloc space */ 17758a092171SLaura Abbott if (max_distance > VMALLOC_TOTAL * 3 / 4) { 17761a0c3298STejun Heo pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 1777787e5b06SMike Frysinger "space 0x%lx\n", max_distance, 17788a092171SLaura Abbott VMALLOC_TOTAL); 17796ea529a2STejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 17806ea529a2STejun Heo /* and fail if we have fallback */ 17816ea529a2STejun Heo rc = -EINVAL; 17826ea529a2STejun Heo goto out_free; 17836ea529a2STejun Heo #endif 17846ea529a2STejun Heo } 1785c8826dd5STejun Heo 1786004018e2STejun Heo pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1787fd1e8a1fSTejun Heo PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1788fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 178966c3a757STejun Heo 1790fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, base); 1791c8826dd5STejun Heo goto out_free; 1792c8826dd5STejun Heo 1793c8826dd5STejun Heo out_free_areas: 1794c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 1795f851c8d8SMichael Holzheu if (areas[group]) 1796c8826dd5STejun Heo free_fn(areas[group], 1797c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 1798c8826dd5STejun Heo out_free: 1799fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 1800c8826dd5STejun Heo if (areas) 1801999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 1802fb435d52STejun Heo return rc; 1803d4b95f80STejun Heo } 18043c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 1805d4b95f80STejun Heo 18063c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 1807d4b95f80STejun Heo /** 180800ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1809d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1810d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 181125985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 1812d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 1813d4b95f80STejun Heo * 181400ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 181500ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 1816d4b95f80STejun Heo * 1817d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 1818d4b95f80STejun Heo * page-by-page into vmalloc area. 1819d4b95f80STejun Heo * 1820d4b95f80STejun Heo * RETURNS: 1821fb435d52STejun Heo * 0 on success, -errno on failure. 1822d4b95f80STejun Heo */ 1823fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 1824d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1825d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 1826d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 1827d4b95f80STejun Heo { 18288f05a6a6STejun Heo static struct vm_struct vm; 1829fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 183000ae4064STejun Heo char psize_str[16]; 1831ce3141a2STejun Heo int unit_pages; 1832d4b95f80STejun Heo size_t pages_size; 1833ce3141a2STejun Heo struct page **pages; 1834fb435d52STejun Heo int unit, i, j, rc; 1835d4b95f80STejun Heo 183600ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 183700ae4064STejun Heo 18384ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 1839fd1e8a1fSTejun Heo if (IS_ERR(ai)) 1840fd1e8a1fSTejun Heo return PTR_ERR(ai); 1841fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 1842fd1e8a1fSTejun Heo BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); 1843fd1e8a1fSTejun Heo 1844fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 1845d4b95f80STejun Heo 1846d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 1847fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 1848fd1e8a1fSTejun Heo sizeof(pages[0])); 1849999c17e3SSantosh Shilimkar pages = memblock_virt_alloc(pages_size, 0); 1850d4b95f80STejun Heo 18518f05a6a6STejun Heo /* allocate pages */ 1852d4b95f80STejun Heo j = 0; 1853fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) 1854ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) { 1855fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 1856d4b95f80STejun Heo void *ptr; 1857d4b95f80STejun Heo 18583cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 1859d4b95f80STejun Heo if (!ptr) { 186000ae4064STejun Heo pr_warning("PERCPU: failed to allocate %s page " 186100ae4064STejun Heo "for cpu%u\n", psize_str, cpu); 1862d4b95f80STejun Heo goto enomem; 1863d4b95f80STejun Heo } 1864f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 1865f528f0b8SCatalin Marinas kmemleak_free(ptr); 1866ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 1867d4b95f80STejun Heo } 1868d4b95f80STejun Heo 18698f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 18708f05a6a6STejun Heo vm.flags = VM_ALLOC; 1871fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 18728f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 18738f05a6a6STejun Heo 1874fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 18751d9d3257STejun Heo unsigned long unit_addr = 1876fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 18778f05a6a6STejun Heo 1878ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 18798f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 18808f05a6a6STejun Heo 18818f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 1882fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 1883ce3141a2STejun Heo unit_pages); 1884fb435d52STejun Heo if (rc < 0) 1885fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 18868f05a6a6STejun Heo 18878f05a6a6STejun Heo /* 18888f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 18898f05a6a6STejun Heo * cache for the linear mapping here - something 18908f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 18918f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 18928f05a6a6STejun Heo * data structures are not set up yet. 18938f05a6a6STejun Heo */ 18948f05a6a6STejun Heo 18958f05a6a6STejun Heo /* copy static data */ 1896fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 189766c3a757STejun Heo } 189866c3a757STejun Heo 189966c3a757STejun Heo /* we're ready, commit */ 19001d9d3257STejun Heo pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", 1901fd1e8a1fSTejun Heo unit_pages, psize_str, vm.addr, ai->static_size, 1902fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 190366c3a757STejun Heo 1904fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, vm.addr); 1905d4b95f80STejun Heo goto out_free_ar; 1906d4b95f80STejun Heo 1907d4b95f80STejun Heo enomem: 1908d4b95f80STejun Heo while (--j >= 0) 1909ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 1910fb435d52STejun Heo rc = -ENOMEM; 1911d4b95f80STejun Heo out_free_ar: 1912999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 1913fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 1914fb435d52STejun Heo return rc; 191566c3a757STejun Heo } 19163c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 1917d4b95f80STejun Heo 1918bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 19198c4bfc6eSTejun Heo /* 1920bbddff05STejun Heo * Generic SMP percpu area setup. 1921e74e3962STejun Heo * 1922e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 1923e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 1924e74e3962STejun Heo * important because many archs have addressing restrictions and might 1925e74e3962STejun Heo * fail if the percpu area is located far away from the previous 1926e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 1927e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 1928e74e3962STejun Heo * on the physical linear memory mapping which uses large page 1929e74e3962STejun Heo * mappings on applicable archs. 1930e74e3962STejun Heo */ 1931e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 1932e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 1933e74e3962STejun Heo 1934c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 1935c8826dd5STejun Heo size_t align) 1936c8826dd5STejun Heo { 1937999c17e3SSantosh Shilimkar return memblock_virt_alloc_from_nopanic( 1938999c17e3SSantosh Shilimkar size, align, __pa(MAX_DMA_ADDRESS)); 1939c8826dd5STejun Heo } 1940c8826dd5STejun Heo 1941c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 1942c8826dd5STejun Heo { 1943999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 1944c8826dd5STejun Heo } 1945c8826dd5STejun Heo 1946e74e3962STejun Heo void __init setup_per_cpu_areas(void) 1947e74e3962STejun Heo { 1948e74e3962STejun Heo unsigned long delta; 1949e74e3962STejun Heo unsigned int cpu; 1950fb435d52STejun Heo int rc; 1951e74e3962STejun Heo 1952e74e3962STejun Heo /* 1953e74e3962STejun Heo * Always reserve area for module percpu variables. That's 1954e74e3962STejun Heo * what the legacy allocator did. 1955e74e3962STejun Heo */ 1956fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 1957c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 1958c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 1959fb435d52STejun Heo if (rc < 0) 1960bbddff05STejun Heo panic("Failed to initialize percpu areas."); 1961e74e3962STejun Heo 1962e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1963e74e3962STejun Heo for_each_possible_cpu(cpu) 1964fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 1965e74e3962STejun Heo } 1966e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1967099a19d9STejun Heo 1968bbddff05STejun Heo #else /* CONFIG_SMP */ 1969bbddff05STejun Heo 1970bbddff05STejun Heo /* 1971bbddff05STejun Heo * UP percpu area setup. 1972bbddff05STejun Heo * 1973bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 1974bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 1975bbddff05STejun Heo * variables and don't require any special preparation. 1976bbddff05STejun Heo */ 1977bbddff05STejun Heo void __init setup_per_cpu_areas(void) 1978bbddff05STejun Heo { 1979bbddff05STejun Heo const size_t unit_size = 1980bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 1981bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 1982bbddff05STejun Heo struct pcpu_alloc_info *ai; 1983bbddff05STejun Heo void *fc; 1984bbddff05STejun Heo 1985bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 1986999c17e3SSantosh Shilimkar fc = memblock_virt_alloc_from_nopanic(unit_size, 1987999c17e3SSantosh Shilimkar PAGE_SIZE, 1988999c17e3SSantosh Shilimkar __pa(MAX_DMA_ADDRESS)); 1989bbddff05STejun Heo if (!ai || !fc) 1990bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 1991100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 1992100d13c3SCatalin Marinas kmemleak_free(fc); 1993bbddff05STejun Heo 1994bbddff05STejun Heo ai->dyn_size = unit_size; 1995bbddff05STejun Heo ai->unit_size = unit_size; 1996bbddff05STejun Heo ai->atom_size = unit_size; 1997bbddff05STejun Heo ai->alloc_size = unit_size; 1998bbddff05STejun Heo ai->groups[0].nr_units = 1; 1999bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 2000bbddff05STejun Heo 2001bbddff05STejun Heo if (pcpu_setup_first_chunk(ai, fc) < 0) 2002bbddff05STejun Heo panic("Failed to initialize percpu areas."); 20033189eddbSHonggang Li 20043189eddbSHonggang Li pcpu_free_alloc_info(ai); 2005bbddff05STejun Heo } 2006bbddff05STejun Heo 2007bbddff05STejun Heo #endif /* CONFIG_SMP */ 2008bbddff05STejun Heo 2009099a19d9STejun Heo /* 2010099a19d9STejun Heo * First and reserved chunks are initialized with temporary allocation 2011099a19d9STejun Heo * map in initdata so that they can be used before slab is online. 2012099a19d9STejun Heo * This function is called after slab is brought up and replaces those 2013099a19d9STejun Heo * with properly allocated maps. 2014099a19d9STejun Heo */ 2015099a19d9STejun Heo void __init percpu_init_late(void) 2016099a19d9STejun Heo { 2017099a19d9STejun Heo struct pcpu_chunk *target_chunks[] = 2018099a19d9STejun Heo { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; 2019099a19d9STejun Heo struct pcpu_chunk *chunk; 2020099a19d9STejun Heo unsigned long flags; 2021099a19d9STejun Heo int i; 2022099a19d9STejun Heo 2023099a19d9STejun Heo for (i = 0; (chunk = target_chunks[i]); i++) { 2024099a19d9STejun Heo int *map; 2025099a19d9STejun Heo const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); 2026099a19d9STejun Heo 2027099a19d9STejun Heo BUILD_BUG_ON(size > PAGE_SIZE); 2028099a19d9STejun Heo 202990459ce0SBob Liu map = pcpu_mem_zalloc(size); 2030099a19d9STejun Heo BUG_ON(!map); 2031099a19d9STejun Heo 2032099a19d9STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 2033099a19d9STejun Heo memcpy(map, chunk->map, size); 2034099a19d9STejun Heo chunk->map = map; 2035099a19d9STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 2036099a19d9STejun Heo } 2037099a19d9STejun Heo } 2038