1fbf59bc9STejun Heo /* 288999a89STejun Heo * mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 1088999a89STejun Heo * areas. Percpu areas are allocated in chunks. Each chunk is 1188999a89STejun Heo * consisted of boot-time determined number of units and the first 1288999a89STejun Heo * chunk is used for static percpu variables in the kernel image 132f39e637STejun Heo * (special boot time alloc/init handling necessary as these areas 142f39e637STejun Heo * need to be brought up before allocation services are running). 152f39e637STejun Heo * Unit grows as necessary and all units grow or shrink in unison. 1688999a89STejun Heo * When a chunk is filled up, another chunk is allocated. 17fbf59bc9STejun Heo * 18fbf59bc9STejun Heo * c0 c1 c2 19fbf59bc9STejun Heo * ------------------- ------------------- ------------ 20fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 22fbf59bc9STejun Heo * 23fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 24fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 252f39e637STejun Heo * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 262f39e637STejun Heo * cpus. On NUMA, the mapping can be non-linear and even sparse. 272f39e637STejun Heo * Percpu access can be done by configuring percpu base registers 282f39e637STejun Heo * according to cpu to unit mapping and pcpu_unit_size. 29fbf59bc9STejun Heo * 302f39e637STejun Heo * There are usually many small percpu allocations many of them being 312f39e637STejun Heo * as small as 4 bytes. The allocator organizes chunks into lists 32fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 33fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 344785879eSNamhyung Kim * guaranteed to be equal to or larger than the maximum contiguous 35fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 36fbf59bc9STejun Heo * chunk maps unnecessarily. 37fbf59bc9STejun Heo * 38fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 39fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 40fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 41fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 42fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 43e1b9aa3fSChristoph Lameter * Chunks can be determined from the address using the index field 44e1b9aa3fSChristoph Lameter * in the page struct. The index field contains a pointer to the chunk. 45fbf59bc9STejun Heo * 46fbf59bc9STejun Heo * To use this allocator, arch code should do the followings. 47fbf59bc9STejun Heo * 48fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49e0100983STejun Heo * regular address to percpu pointer and back if they need to be 50e0100983STejun Heo * different from the default 51fbf59bc9STejun Heo * 528d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 538d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 54fbf59bc9STejun Heo */ 55fbf59bc9STejun Heo 56fbf59bc9STejun Heo #include <linux/bitmap.h> 57fbf59bc9STejun Heo #include <linux/bootmem.h> 58fd1e8a1fSTejun Heo #include <linux/err.h> 59fbf59bc9STejun Heo #include <linux/list.h> 60a530b795STejun Heo #include <linux/log2.h> 61fbf59bc9STejun Heo #include <linux/mm.h> 62fbf59bc9STejun Heo #include <linux/module.h> 63fbf59bc9STejun Heo #include <linux/mutex.h> 64fbf59bc9STejun Heo #include <linux/percpu.h> 65fbf59bc9STejun Heo #include <linux/pfn.h> 66fbf59bc9STejun Heo #include <linux/slab.h> 67ccea34b5STejun Heo #include <linux/spinlock.h> 68fbf59bc9STejun Heo #include <linux/vmalloc.h> 69a56dbddfSTejun Heo #include <linux/workqueue.h> 70f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 71fbf59bc9STejun Heo 72fbf59bc9STejun Heo #include <asm/cacheflush.h> 73e0100983STejun Heo #include <asm/sections.h> 74fbf59bc9STejun Heo #include <asm/tlbflush.h> 753b034b0dSVivek Goyal #include <asm/io.h> 76fbf59bc9STejun Heo 77fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 78fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 799c824b6aSTejun Heo #define PCPU_ATOMIC_MAP_MARGIN_LOW 32 809c824b6aSTejun Heo #define PCPU_ATOMIC_MAP_MARGIN_HIGH 64 81fbf59bc9STejun Heo 82bbddff05STejun Heo #ifdef CONFIG_SMP 83e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 84e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 85e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 8643cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 8743cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 8843cf38ebSTejun Heo (unsigned long)__per_cpu_start) 89e0100983STejun Heo #endif 90e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 91e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 9243cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 9343cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 9443cf38ebSTejun Heo (unsigned long)__per_cpu_start) 95e0100983STejun Heo #endif 96bbddff05STejun Heo #else /* CONFIG_SMP */ 97bbddff05STejun Heo /* on UP, it's always identity mapped */ 98bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 99bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 100bbddff05STejun Heo #endif /* CONFIG_SMP */ 101e0100983STejun Heo 102fbf59bc9STejun Heo struct pcpu_chunk { 103fbf59bc9STejun Heo struct list_head list; /* linked to pcpu_slot lists */ 104fbf59bc9STejun Heo int free_size; /* free bytes in the chunk */ 105fbf59bc9STejun Heo int contig_hint; /* max contiguous size hint */ 106bba174f5STejun Heo void *base_addr; /* base address of this chunk */ 1079c824b6aSTejun Heo 108723ad1d9SAl Viro int map_used; /* # of map entries used before the sentry */ 109fbf59bc9STejun Heo int map_alloc; /* # of map entries allocated */ 110fbf59bc9STejun Heo int *map; /* allocation map */ 1119c824b6aSTejun Heo struct work_struct map_extend_work;/* async ->map[] extension */ 1129c824b6aSTejun Heo 11388999a89STejun Heo void *data; /* chunk data */ 1143d331ad7SAl Viro int first_free; /* no free below this */ 1158d408b4bSTejun Heo bool immutable; /* no [de]population allowed */ 116b539b87fSTejun Heo int nr_populated; /* # of populated pages */ 117ce3141a2STejun Heo unsigned long populated[]; /* populated bitmap */ 118fbf59bc9STejun Heo }; 119fbf59bc9STejun Heo 12040150d37STejun Heo static int pcpu_unit_pages __read_mostly; 12140150d37STejun Heo static int pcpu_unit_size __read_mostly; 1222f39e637STejun Heo static int pcpu_nr_units __read_mostly; 1236563297cSTejun Heo static int pcpu_atom_size __read_mostly; 12440150d37STejun Heo static int pcpu_nr_slots __read_mostly; 12540150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly; 126fbf59bc9STejun Heo 127a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 128a855b84cSTejun Heo static unsigned int pcpu_low_unit_cpu __read_mostly; 129a855b84cSTejun Heo static unsigned int pcpu_high_unit_cpu __read_mostly; 1302f39e637STejun Heo 131fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 13240150d37STejun Heo void *pcpu_base_addr __read_mostly; 133fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 134fbf59bc9STejun Heo 135fb435d52STejun Heo static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ 136fb435d52STejun Heo const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ 1372f39e637STejun Heo 1386563297cSTejun Heo /* group information, used for vm allocation */ 1396563297cSTejun Heo static int pcpu_nr_groups __read_mostly; 1406563297cSTejun Heo static const unsigned long *pcpu_group_offsets __read_mostly; 1416563297cSTejun Heo static const size_t *pcpu_group_sizes __read_mostly; 1426563297cSTejun Heo 143ae9e6bc9STejun Heo /* 144ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 145ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 146ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 147ae9e6bc9STejun Heo */ 148ae9e6bc9STejun Heo static struct pcpu_chunk *pcpu_first_chunk; 149ae9e6bc9STejun Heo 150ae9e6bc9STejun Heo /* 151ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 152ae9e6bc9STejun Heo * chunk and serves it for reserved allocations. The amount of 153ae9e6bc9STejun Heo * reserved offset is in pcpu_reserved_chunk_limit. When reserved 154ae9e6bc9STejun Heo * area doesn't exist, the following variables contain NULL and 0 155ae9e6bc9STejun Heo * respectively. 156ae9e6bc9STejun Heo */ 157edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk; 158edcb4639STejun Heo static int pcpu_reserved_chunk_limit; 159edcb4639STejun Heo 160b38d08f3STejun Heo static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 161b38d08f3STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ 162fbf59bc9STejun Heo 16340150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 164fbf59bc9STejun Heo 165b539b87fSTejun Heo /* 166b539b87fSTejun Heo * The number of empty populated pages, protected by pcpu_lock. The 167b539b87fSTejun Heo * reserved chunk doesn't contribute to the count. 168b539b87fSTejun Heo */ 169b539b87fSTejun Heo static int pcpu_nr_empty_pop_pages; 170b539b87fSTejun Heo 171*fe6bd8c3STejun Heo /* balance work is used to populate or destroy chunks asynchronously */ 172*fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 173*fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 174a56dbddfSTejun Heo 175020ec653STejun Heo static bool pcpu_addr_in_first_chunk(void *addr) 176020ec653STejun Heo { 177020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 178020ec653STejun Heo 179020ec653STejun Heo return addr >= first_start && addr < first_start + pcpu_unit_size; 180020ec653STejun Heo } 181020ec653STejun Heo 182020ec653STejun Heo static bool pcpu_addr_in_reserved_chunk(void *addr) 183020ec653STejun Heo { 184020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 185020ec653STejun Heo 186020ec653STejun Heo return addr >= first_start && 187020ec653STejun Heo addr < first_start + pcpu_reserved_chunk_limit; 188020ec653STejun Heo } 189020ec653STejun Heo 190d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 191fbf59bc9STejun Heo { 192cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 193fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 194fbf59bc9STejun Heo } 195fbf59bc9STejun Heo 196d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 197d9b55eebSTejun Heo { 198d9b55eebSTejun Heo if (size == pcpu_unit_size) 199d9b55eebSTejun Heo return pcpu_nr_slots - 1; 200d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 201d9b55eebSTejun Heo } 202d9b55eebSTejun Heo 203fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 204fbf59bc9STejun Heo { 205fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 206fbf59bc9STejun Heo return 0; 207fbf59bc9STejun Heo 208fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 209fbf59bc9STejun Heo } 210fbf59bc9STejun Heo 21188999a89STejun Heo /* set the pointer to a chunk in a page struct */ 21288999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 21388999a89STejun Heo { 21488999a89STejun Heo page->index = (unsigned long)pcpu; 21588999a89STejun Heo } 21688999a89STejun Heo 21788999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 21888999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 21988999a89STejun Heo { 22088999a89STejun Heo return (struct pcpu_chunk *)page->index; 22188999a89STejun Heo } 22288999a89STejun Heo 22388999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 224fbf59bc9STejun Heo { 2252f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 226fbf59bc9STejun Heo } 227fbf59bc9STejun Heo 2289983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 229fbf59bc9STejun Heo unsigned int cpu, int page_idx) 230fbf59bc9STejun Heo { 231bba174f5STejun Heo return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 232fb435d52STejun Heo (page_idx << PAGE_SHIFT); 233fbf59bc9STejun Heo } 234fbf59bc9STejun Heo 23588999a89STejun Heo static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, 23688999a89STejun Heo int *rs, int *re, int end) 237ce3141a2STejun Heo { 238ce3141a2STejun Heo *rs = find_next_zero_bit(chunk->populated, end, *rs); 239ce3141a2STejun Heo *re = find_next_bit(chunk->populated, end, *rs + 1); 240ce3141a2STejun Heo } 241ce3141a2STejun Heo 24288999a89STejun Heo static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, 24388999a89STejun Heo int *rs, int *re, int end) 244ce3141a2STejun Heo { 245ce3141a2STejun Heo *rs = find_next_bit(chunk->populated, end, *rs); 246ce3141a2STejun Heo *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 247ce3141a2STejun Heo } 248ce3141a2STejun Heo 249ce3141a2STejun Heo /* 250ce3141a2STejun Heo * (Un)populated page region iterators. Iterate over (un)populated 251b595076aSUwe Kleine-König * page regions between @start and @end in @chunk. @rs and @re should 252ce3141a2STejun Heo * be integer variables and will be set to start and end page index of 253ce3141a2STejun Heo * the current region. 254ce3141a2STejun Heo */ 255ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 256ce3141a2STejun Heo for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 257ce3141a2STejun Heo (rs) < (re); \ 258ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 259ce3141a2STejun Heo 260ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 261ce3141a2STejun Heo for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 262ce3141a2STejun Heo (rs) < (re); \ 263ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 264ce3141a2STejun Heo 265fbf59bc9STejun Heo /** 26690459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 2671880d93bSTejun Heo * @size: bytes to allocate 268fbf59bc9STejun Heo * 2691880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 27090459ce0SBob Liu * kzalloc() is used; otherwise, vzalloc() is used. The returned 2711880d93bSTejun Heo * memory is always zeroed. 272fbf59bc9STejun Heo * 273ccea34b5STejun Heo * CONTEXT: 274ccea34b5STejun Heo * Does GFP_KERNEL allocation. 275ccea34b5STejun Heo * 276fbf59bc9STejun Heo * RETURNS: 2771880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 278fbf59bc9STejun Heo */ 27990459ce0SBob Liu static void *pcpu_mem_zalloc(size_t size) 280fbf59bc9STejun Heo { 281099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 282099a19d9STejun Heo return NULL; 283099a19d9STejun Heo 284fbf59bc9STejun Heo if (size <= PAGE_SIZE) 2851880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 2867af4c093SJesper Juhl else 2877af4c093SJesper Juhl return vzalloc(size); 2881880d93bSTejun Heo } 289fbf59bc9STejun Heo 2901880d93bSTejun Heo /** 2911880d93bSTejun Heo * pcpu_mem_free - free memory 2921880d93bSTejun Heo * @ptr: memory to free 2931880d93bSTejun Heo * @size: size of the area 2941880d93bSTejun Heo * 29590459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 2961880d93bSTejun Heo */ 2971880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size) 2981880d93bSTejun Heo { 2991880d93bSTejun Heo if (size <= PAGE_SIZE) 3001880d93bSTejun Heo kfree(ptr); 3011880d93bSTejun Heo else 3021880d93bSTejun Heo vfree(ptr); 303fbf59bc9STejun Heo } 304fbf59bc9STejun Heo 305fbf59bc9STejun Heo /** 306b539b87fSTejun Heo * pcpu_count_occupied_pages - count the number of pages an area occupies 307b539b87fSTejun Heo * @chunk: chunk of interest 308b539b87fSTejun Heo * @i: index of the area in question 309b539b87fSTejun Heo * 310b539b87fSTejun Heo * Count the number of pages chunk's @i'th area occupies. When the area's 311b539b87fSTejun Heo * start and/or end address isn't aligned to page boundary, the straddled 312b539b87fSTejun Heo * page is included in the count iff the rest of the page is free. 313b539b87fSTejun Heo */ 314b539b87fSTejun Heo static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i) 315b539b87fSTejun Heo { 316b539b87fSTejun Heo int off = chunk->map[i] & ~1; 317b539b87fSTejun Heo int end = chunk->map[i + 1] & ~1; 318b539b87fSTejun Heo 319b539b87fSTejun Heo if (!PAGE_ALIGNED(off) && i > 0) { 320b539b87fSTejun Heo int prev = chunk->map[i - 1]; 321b539b87fSTejun Heo 322b539b87fSTejun Heo if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE)) 323b539b87fSTejun Heo off = round_down(off, PAGE_SIZE); 324b539b87fSTejun Heo } 325b539b87fSTejun Heo 326b539b87fSTejun Heo if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) { 327b539b87fSTejun Heo int next = chunk->map[i + 1]; 328b539b87fSTejun Heo int nend = chunk->map[i + 2] & ~1; 329b539b87fSTejun Heo 330b539b87fSTejun Heo if (!(next & 1) && nend >= round_up(end, PAGE_SIZE)) 331b539b87fSTejun Heo end = round_up(end, PAGE_SIZE); 332b539b87fSTejun Heo } 333b539b87fSTejun Heo 334b539b87fSTejun Heo return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0); 335b539b87fSTejun Heo } 336b539b87fSTejun Heo 337b539b87fSTejun Heo /** 338fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 339fbf59bc9STejun Heo * @chunk: chunk of interest 340fbf59bc9STejun Heo * @oslot: the previous slot it was on 341fbf59bc9STejun Heo * 342fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 343fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 344edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 345edcb4639STejun Heo * chunk slots. 346ccea34b5STejun Heo * 347ccea34b5STejun Heo * CONTEXT: 348ccea34b5STejun Heo * pcpu_lock. 349fbf59bc9STejun Heo */ 350fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 351fbf59bc9STejun Heo { 352fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 353fbf59bc9STejun Heo 354edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 355fbf59bc9STejun Heo if (oslot < nslot) 356fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 357fbf59bc9STejun Heo else 358fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 359fbf59bc9STejun Heo } 360fbf59bc9STejun Heo } 361fbf59bc9STejun Heo 362fbf59bc9STejun Heo /** 363833af842STejun Heo * pcpu_need_to_extend - determine whether chunk area map needs to be extended 364833af842STejun Heo * @chunk: chunk of interest 3659c824b6aSTejun Heo * @is_atomic: the allocation context 3669f7dcf22STejun Heo * 3679c824b6aSTejun Heo * Determine whether area map of @chunk needs to be extended. If 3689c824b6aSTejun Heo * @is_atomic, only the amount necessary for a new allocation is 3699c824b6aSTejun Heo * considered; however, async extension is scheduled if the left amount is 3709c824b6aSTejun Heo * low. If !@is_atomic, it aims for more empty space. Combined, this 3719c824b6aSTejun Heo * ensures that the map is likely to have enough available space to 3729c824b6aSTejun Heo * accomodate atomic allocations which can't extend maps directly. 3739f7dcf22STejun Heo * 374ccea34b5STejun Heo * CONTEXT: 375833af842STejun Heo * pcpu_lock. 376ccea34b5STejun Heo * 3779f7dcf22STejun Heo * RETURNS: 378833af842STejun Heo * New target map allocation length if extension is necessary, 0 379833af842STejun Heo * otherwise. 3809f7dcf22STejun Heo */ 3819c824b6aSTejun Heo static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) 3829f7dcf22STejun Heo { 3839c824b6aSTejun Heo int margin, new_alloc; 3849f7dcf22STejun Heo 3859c824b6aSTejun Heo if (is_atomic) { 3869c824b6aSTejun Heo margin = 3; 3879c824b6aSTejun Heo 3889c824b6aSTejun Heo if (chunk->map_alloc < 3899c824b6aSTejun Heo chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) 3909c824b6aSTejun Heo schedule_work(&chunk->map_extend_work); 3919c824b6aSTejun Heo } else { 3929c824b6aSTejun Heo margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; 3939c824b6aSTejun Heo } 3949c824b6aSTejun Heo 3959c824b6aSTejun Heo if (chunk->map_alloc >= chunk->map_used + margin) 3969f7dcf22STejun Heo return 0; 3979f7dcf22STejun Heo 3989f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 3999c824b6aSTejun Heo while (new_alloc < chunk->map_used + margin) 4009f7dcf22STejun Heo new_alloc *= 2; 4019f7dcf22STejun Heo 402833af842STejun Heo return new_alloc; 403ccea34b5STejun Heo } 404ccea34b5STejun Heo 405833af842STejun Heo /** 406833af842STejun Heo * pcpu_extend_area_map - extend area map of a chunk 407833af842STejun Heo * @chunk: chunk of interest 408833af842STejun Heo * @new_alloc: new target allocation length of the area map 409833af842STejun Heo * 410833af842STejun Heo * Extend area map of @chunk to have @new_alloc entries. 411833af842STejun Heo * 412833af842STejun Heo * CONTEXT: 413833af842STejun Heo * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 414833af842STejun Heo * 415833af842STejun Heo * RETURNS: 416833af842STejun Heo * 0 on success, -errno on failure. 417ccea34b5STejun Heo */ 418833af842STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 419833af842STejun Heo { 420833af842STejun Heo int *old = NULL, *new = NULL; 421833af842STejun Heo size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 422833af842STejun Heo unsigned long flags; 4239f7dcf22STejun Heo 42490459ce0SBob Liu new = pcpu_mem_zalloc(new_size); 425833af842STejun Heo if (!new) 426833af842STejun Heo return -ENOMEM; 427833af842STejun Heo 428833af842STejun Heo /* acquire pcpu_lock and switch to new area map */ 429833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 430833af842STejun Heo 431833af842STejun Heo if (new_alloc <= chunk->map_alloc) 432833af842STejun Heo goto out_unlock; 433833af842STejun Heo 434833af842STejun Heo old_size = chunk->map_alloc * sizeof(chunk->map[0]); 435a002d148SHuang Shijie old = chunk->map; 436a002d148SHuang Shijie 437a002d148SHuang Shijie memcpy(new, old, old_size); 4389f7dcf22STejun Heo 4399f7dcf22STejun Heo chunk->map_alloc = new_alloc; 4409f7dcf22STejun Heo chunk->map = new; 441833af842STejun Heo new = NULL; 442833af842STejun Heo 443833af842STejun Heo out_unlock: 444833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 445833af842STejun Heo 446833af842STejun Heo /* 447833af842STejun Heo * pcpu_mem_free() might end up calling vfree() which uses 448833af842STejun Heo * IRQ-unsafe lock and thus can't be called under pcpu_lock. 449833af842STejun Heo */ 450833af842STejun Heo pcpu_mem_free(old, old_size); 451833af842STejun Heo pcpu_mem_free(new, new_size); 452833af842STejun Heo 4539f7dcf22STejun Heo return 0; 4549f7dcf22STejun Heo } 4559f7dcf22STejun Heo 4569c824b6aSTejun Heo static void pcpu_map_extend_workfn(struct work_struct *work) 4579c824b6aSTejun Heo { 4589c824b6aSTejun Heo struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk, 4599c824b6aSTejun Heo map_extend_work); 4609c824b6aSTejun Heo int new_alloc; 4619c824b6aSTejun Heo 4629c824b6aSTejun Heo spin_lock_irq(&pcpu_lock); 4639c824b6aSTejun Heo new_alloc = pcpu_need_to_extend(chunk, false); 4649c824b6aSTejun Heo spin_unlock_irq(&pcpu_lock); 4659c824b6aSTejun Heo 4669c824b6aSTejun Heo if (new_alloc) 4679c824b6aSTejun Heo pcpu_extend_area_map(chunk, new_alloc); 4689c824b6aSTejun Heo } 4699c824b6aSTejun Heo 4709f7dcf22STejun Heo /** 471a16037c8STejun Heo * pcpu_fit_in_area - try to fit the requested allocation in a candidate area 472a16037c8STejun Heo * @chunk: chunk the candidate area belongs to 473a16037c8STejun Heo * @off: the offset to the start of the candidate area 474a16037c8STejun Heo * @this_size: the size of the candidate area 475a16037c8STejun Heo * @size: the size of the target allocation 476a16037c8STejun Heo * @align: the alignment of the target allocation 477a16037c8STejun Heo * @pop_only: only allocate from already populated region 478a16037c8STejun Heo * 479a16037c8STejun Heo * We're trying to allocate @size bytes aligned at @align. @chunk's area 480a16037c8STejun Heo * at @off sized @this_size is a candidate. This function determines 481a16037c8STejun Heo * whether the target allocation fits in the candidate area and returns the 482a16037c8STejun Heo * number of bytes to pad after @off. If the target area doesn't fit, -1 483a16037c8STejun Heo * is returned. 484a16037c8STejun Heo * 485a16037c8STejun Heo * If @pop_only is %true, this function only considers the already 486a16037c8STejun Heo * populated part of the candidate area. 487a16037c8STejun Heo */ 488a16037c8STejun Heo static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, 489a16037c8STejun Heo int size, int align, bool pop_only) 490a16037c8STejun Heo { 491a16037c8STejun Heo int cand_off = off; 492a16037c8STejun Heo 493a16037c8STejun Heo while (true) { 494a16037c8STejun Heo int head = ALIGN(cand_off, align) - off; 495a16037c8STejun Heo int page_start, page_end, rs, re; 496a16037c8STejun Heo 497a16037c8STejun Heo if (this_size < head + size) 498a16037c8STejun Heo return -1; 499a16037c8STejun Heo 500a16037c8STejun Heo if (!pop_only) 501a16037c8STejun Heo return head; 502a16037c8STejun Heo 503a16037c8STejun Heo /* 504a16037c8STejun Heo * If the first unpopulated page is beyond the end of the 505a16037c8STejun Heo * allocation, the whole allocation is populated; 506a16037c8STejun Heo * otherwise, retry from the end of the unpopulated area. 507a16037c8STejun Heo */ 508a16037c8STejun Heo page_start = PFN_DOWN(head + off); 509a16037c8STejun Heo page_end = PFN_UP(head + off + size); 510a16037c8STejun Heo 511a16037c8STejun Heo rs = page_start; 512a16037c8STejun Heo pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); 513a16037c8STejun Heo if (rs >= page_end) 514a16037c8STejun Heo return head; 515a16037c8STejun Heo cand_off = re * PAGE_SIZE; 516a16037c8STejun Heo } 517a16037c8STejun Heo } 518a16037c8STejun Heo 519a16037c8STejun Heo /** 520fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 521fbf59bc9STejun Heo * @chunk: chunk of interest 522cae3aeb8STejun Heo * @size: wanted size in bytes 523fbf59bc9STejun Heo * @align: wanted align 524a16037c8STejun Heo * @pop_only: allocate only from the populated area 525b539b87fSTejun Heo * @occ_pages_p: out param for the number of pages the area occupies 526fbf59bc9STejun Heo * 527fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 528fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 529fbf59bc9STejun Heo * populate or map the area. 530fbf59bc9STejun Heo * 5319f7dcf22STejun Heo * @chunk->map must have at least two free slots. 5329f7dcf22STejun Heo * 533ccea34b5STejun Heo * CONTEXT: 534ccea34b5STejun Heo * pcpu_lock. 535ccea34b5STejun Heo * 536fbf59bc9STejun Heo * RETURNS: 5379f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 5389f7dcf22STejun Heo * found. 539fbf59bc9STejun Heo */ 540a16037c8STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, 541b539b87fSTejun Heo bool pop_only, int *occ_pages_p) 542fbf59bc9STejun Heo { 543fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 544fbf59bc9STejun Heo int max_contig = 0; 545fbf59bc9STejun Heo int i, off; 5463d331ad7SAl Viro bool seen_free = false; 547723ad1d9SAl Viro int *p; 548fbf59bc9STejun Heo 5493d331ad7SAl Viro for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { 550fbf59bc9STejun Heo int head, tail; 551723ad1d9SAl Viro int this_size; 552723ad1d9SAl Viro 553723ad1d9SAl Viro off = *p; 554723ad1d9SAl Viro if (off & 1) 555723ad1d9SAl Viro continue; 556fbf59bc9STejun Heo 557723ad1d9SAl Viro this_size = (p[1] & ~1) - off; 558a16037c8STejun Heo 559a16037c8STejun Heo head = pcpu_fit_in_area(chunk, off, this_size, size, align, 560a16037c8STejun Heo pop_only); 561a16037c8STejun Heo if (head < 0) { 5623d331ad7SAl Viro if (!seen_free) { 5633d331ad7SAl Viro chunk->first_free = i; 5643d331ad7SAl Viro seen_free = true; 5653d331ad7SAl Viro } 566723ad1d9SAl Viro max_contig = max(this_size, max_contig); 567fbf59bc9STejun Heo continue; 568fbf59bc9STejun Heo } 569fbf59bc9STejun Heo 570fbf59bc9STejun Heo /* 571fbf59bc9STejun Heo * If head is small or the previous block is free, 572fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 573fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 574fbf59bc9STejun Heo * uncommon for percpu allocations. 575fbf59bc9STejun Heo */ 576723ad1d9SAl Viro if (head && (head < sizeof(int) || !(p[-1] & 1))) { 57721ddfd38SJianyu Zhan *p = off += head; 578723ad1d9SAl Viro if (p[-1] & 1) 579fbf59bc9STejun Heo chunk->free_size -= head; 58021ddfd38SJianyu Zhan else 58121ddfd38SJianyu Zhan max_contig = max(*p - p[-1], max_contig); 582723ad1d9SAl Viro this_size -= head; 583fbf59bc9STejun Heo head = 0; 584fbf59bc9STejun Heo } 585fbf59bc9STejun Heo 586fbf59bc9STejun Heo /* if tail is small, just keep it around */ 587723ad1d9SAl Viro tail = this_size - head - size; 588723ad1d9SAl Viro if (tail < sizeof(int)) { 589fbf59bc9STejun Heo tail = 0; 590723ad1d9SAl Viro size = this_size - head; 591723ad1d9SAl Viro } 592fbf59bc9STejun Heo 593fbf59bc9STejun Heo /* split if warranted */ 594fbf59bc9STejun Heo if (head || tail) { 595706c16f2SAl Viro int nr_extra = !!head + !!tail; 596706c16f2SAl Viro 597706c16f2SAl Viro /* insert new subblocks */ 598723ad1d9SAl Viro memmove(p + nr_extra + 1, p + 1, 599706c16f2SAl Viro sizeof(chunk->map[0]) * (chunk->map_used - i)); 600706c16f2SAl Viro chunk->map_used += nr_extra; 601706c16f2SAl Viro 602fbf59bc9STejun Heo if (head) { 6033d331ad7SAl Viro if (!seen_free) { 6043d331ad7SAl Viro chunk->first_free = i; 6053d331ad7SAl Viro seen_free = true; 6063d331ad7SAl Viro } 607723ad1d9SAl Viro *++p = off += head; 608723ad1d9SAl Viro ++i; 609706c16f2SAl Viro max_contig = max(head, max_contig); 610fbf59bc9STejun Heo } 611706c16f2SAl Viro if (tail) { 612723ad1d9SAl Viro p[1] = off + size; 613706c16f2SAl Viro max_contig = max(tail, max_contig); 614706c16f2SAl Viro } 615fbf59bc9STejun Heo } 616fbf59bc9STejun Heo 6173d331ad7SAl Viro if (!seen_free) 6183d331ad7SAl Viro chunk->first_free = i + 1; 6193d331ad7SAl Viro 620fbf59bc9STejun Heo /* update hint and mark allocated */ 621723ad1d9SAl Viro if (i + 1 == chunk->map_used) 622fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 623fbf59bc9STejun Heo else 624fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 625fbf59bc9STejun Heo max_contig); 626fbf59bc9STejun Heo 627723ad1d9SAl Viro chunk->free_size -= size; 628723ad1d9SAl Viro *p |= 1; 629fbf59bc9STejun Heo 630b539b87fSTejun Heo *occ_pages_p = pcpu_count_occupied_pages(chunk, i); 631fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 632fbf59bc9STejun Heo return off; 633fbf59bc9STejun Heo } 634fbf59bc9STejun Heo 635fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 636fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 637fbf59bc9STejun Heo 6389f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 6399f7dcf22STejun Heo return -1; 640fbf59bc9STejun Heo } 641fbf59bc9STejun Heo 642fbf59bc9STejun Heo /** 643fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 644fbf59bc9STejun Heo * @chunk: chunk of interest 645fbf59bc9STejun Heo * @freeme: offset of area to free 646b539b87fSTejun Heo * @occ_pages_p: out param for the number of pages the area occupies 647fbf59bc9STejun Heo * 648fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 649fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 650fbf59bc9STejun Heo * the area. 651ccea34b5STejun Heo * 652ccea34b5STejun Heo * CONTEXT: 653ccea34b5STejun Heo * pcpu_lock. 654fbf59bc9STejun Heo */ 655b539b87fSTejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme, 656b539b87fSTejun Heo int *occ_pages_p) 657fbf59bc9STejun Heo { 658fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 659723ad1d9SAl Viro int off = 0; 660723ad1d9SAl Viro unsigned i, j; 661723ad1d9SAl Viro int to_free = 0; 662723ad1d9SAl Viro int *p; 663fbf59bc9STejun Heo 664723ad1d9SAl Viro freeme |= 1; /* we are searching for <given offset, in use> pair */ 665723ad1d9SAl Viro 666723ad1d9SAl Viro i = 0; 667723ad1d9SAl Viro j = chunk->map_used; 668723ad1d9SAl Viro while (i != j) { 669723ad1d9SAl Viro unsigned k = (i + j) / 2; 670723ad1d9SAl Viro off = chunk->map[k]; 671723ad1d9SAl Viro if (off < freeme) 672723ad1d9SAl Viro i = k + 1; 673723ad1d9SAl Viro else if (off > freeme) 674723ad1d9SAl Viro j = k; 675723ad1d9SAl Viro else 676723ad1d9SAl Viro i = j = k; 677723ad1d9SAl Viro } 678fbf59bc9STejun Heo BUG_ON(off != freeme); 679fbf59bc9STejun Heo 6803d331ad7SAl Viro if (i < chunk->first_free) 6813d331ad7SAl Viro chunk->first_free = i; 6823d331ad7SAl Viro 683723ad1d9SAl Viro p = chunk->map + i; 684723ad1d9SAl Viro *p = off &= ~1; 685723ad1d9SAl Viro chunk->free_size += (p[1] & ~1) - off; 686fbf59bc9STejun Heo 687b539b87fSTejun Heo *occ_pages_p = pcpu_count_occupied_pages(chunk, i); 688b539b87fSTejun Heo 689fbf59bc9STejun Heo /* merge with next? */ 690723ad1d9SAl Viro if (!(p[1] & 1)) 691723ad1d9SAl Viro to_free++; 692723ad1d9SAl Viro /* merge with previous? */ 693723ad1d9SAl Viro if (i > 0 && !(p[-1] & 1)) { 694723ad1d9SAl Viro to_free++; 695723ad1d9SAl Viro i--; 696723ad1d9SAl Viro p--; 697723ad1d9SAl Viro } 698723ad1d9SAl Viro if (to_free) { 699723ad1d9SAl Viro chunk->map_used -= to_free; 700723ad1d9SAl Viro memmove(p + 1, p + 1 + to_free, 701723ad1d9SAl Viro (chunk->map_used - i) * sizeof(chunk->map[0])); 702fbf59bc9STejun Heo } 703fbf59bc9STejun Heo 704723ad1d9SAl Viro chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); 705fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 706fbf59bc9STejun Heo } 707fbf59bc9STejun Heo 7086081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void) 7096081089fSTejun Heo { 7106081089fSTejun Heo struct pcpu_chunk *chunk; 7116081089fSTejun Heo 71290459ce0SBob Liu chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); 7136081089fSTejun Heo if (!chunk) 7146081089fSTejun Heo return NULL; 7156081089fSTejun Heo 71690459ce0SBob Liu chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * 71790459ce0SBob Liu sizeof(chunk->map[0])); 7186081089fSTejun Heo if (!chunk->map) { 7195a838c3bSJianyu Zhan pcpu_mem_free(chunk, pcpu_chunk_struct_size); 7206081089fSTejun Heo return NULL; 7216081089fSTejun Heo } 7226081089fSTejun Heo 7236081089fSTejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 724723ad1d9SAl Viro chunk->map[0] = 0; 725723ad1d9SAl Viro chunk->map[1] = pcpu_unit_size | 1; 726723ad1d9SAl Viro chunk->map_used = 1; 7276081089fSTejun Heo 7286081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 7299c824b6aSTejun Heo INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); 7306081089fSTejun Heo chunk->free_size = pcpu_unit_size; 7316081089fSTejun Heo chunk->contig_hint = pcpu_unit_size; 7326081089fSTejun Heo 7336081089fSTejun Heo return chunk; 7346081089fSTejun Heo } 7356081089fSTejun Heo 7366081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 7376081089fSTejun Heo { 7386081089fSTejun Heo if (!chunk) 7396081089fSTejun Heo return; 7406081089fSTejun Heo pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 741b4916cb1SJoonsoo Kim pcpu_mem_free(chunk, pcpu_chunk_struct_size); 7426081089fSTejun Heo } 7436081089fSTejun Heo 744b539b87fSTejun Heo /** 745b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 746b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 747b539b87fSTejun Heo * @page_start: the start page 748b539b87fSTejun Heo * @page_end: the end page 749b539b87fSTejun Heo * 750b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 751b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 752b539b87fSTejun Heo * successful population. 753b539b87fSTejun Heo */ 754b539b87fSTejun Heo static void pcpu_chunk_populated(struct pcpu_chunk *chunk, 755b539b87fSTejun Heo int page_start, int page_end) 756b539b87fSTejun Heo { 757b539b87fSTejun Heo int nr = page_end - page_start; 758b539b87fSTejun Heo 759b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 760b539b87fSTejun Heo 761b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 762b539b87fSTejun Heo chunk->nr_populated += nr; 763b539b87fSTejun Heo pcpu_nr_empty_pop_pages += nr; 764b539b87fSTejun Heo } 765b539b87fSTejun Heo 766b539b87fSTejun Heo /** 767b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 768b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 769b539b87fSTejun Heo * @page_start: the start page 770b539b87fSTejun Heo * @page_end: the end page 771b539b87fSTejun Heo * 772b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 773b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 774b539b87fSTejun Heo * each successful depopulation. 775b539b87fSTejun Heo */ 776b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 777b539b87fSTejun Heo int page_start, int page_end) 778b539b87fSTejun Heo { 779b539b87fSTejun Heo int nr = page_end - page_start; 780b539b87fSTejun Heo 781b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 782b539b87fSTejun Heo 783b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 784b539b87fSTejun Heo chunk->nr_populated -= nr; 785b539b87fSTejun Heo pcpu_nr_empty_pop_pages -= nr; 786b539b87fSTejun Heo } 787b539b87fSTejun Heo 788fbf59bc9STejun Heo /* 7899f645532STejun Heo * Chunk management implementation. 790fbf59bc9STejun Heo * 7919f645532STejun Heo * To allow different implementations, chunk alloc/free and 7929f645532STejun Heo * [de]population are implemented in a separate file which is pulled 7939f645532STejun Heo * into this file and compiled together. The following functions 7949f645532STejun Heo * should be implemented. 795ccea34b5STejun Heo * 7969f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 7979f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 7989f645532STejun Heo * pcpu_create_chunk - create a new chunk 7999f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 8009f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 8019f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 802fbf59bc9STejun Heo */ 8039f645532STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); 8049f645532STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); 8059f645532STejun Heo static struct pcpu_chunk *pcpu_create_chunk(void); 8069f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 8079f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 8089f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 809fbf59bc9STejun Heo 810b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 811b0c9778bSTejun Heo #include "percpu-km.c" 812b0c9778bSTejun Heo #else 8139f645532STejun Heo #include "percpu-vm.c" 814b0c9778bSTejun Heo #endif 815fbf59bc9STejun Heo 816fbf59bc9STejun Heo /** 81788999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 81888999a89STejun Heo * @addr: address for which the chunk needs to be determined. 81988999a89STejun Heo * 82088999a89STejun Heo * RETURNS: 82188999a89STejun Heo * The address of the found chunk. 82288999a89STejun Heo */ 82388999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 82488999a89STejun Heo { 82588999a89STejun Heo /* is it in the first chunk? */ 82688999a89STejun Heo if (pcpu_addr_in_first_chunk(addr)) { 82788999a89STejun Heo /* is it in the reserved area? */ 82888999a89STejun Heo if (pcpu_addr_in_reserved_chunk(addr)) 82988999a89STejun Heo return pcpu_reserved_chunk; 83088999a89STejun Heo return pcpu_first_chunk; 83188999a89STejun Heo } 83288999a89STejun Heo 83388999a89STejun Heo /* 83488999a89STejun Heo * The address is relative to unit0 which might be unused and 83588999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 83688999a89STejun Heo * current processor before looking it up in the vmalloc 83788999a89STejun Heo * space. Note that any possible cpu id can be used here, so 83888999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 83988999a89STejun Heo */ 84088999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 8419f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 84288999a89STejun Heo } 84388999a89STejun Heo 84488999a89STejun Heo /** 845edcb4639STejun Heo * pcpu_alloc - the percpu allocator 846cae3aeb8STejun Heo * @size: size of area to allocate in bytes 847fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 848edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 8495835d96eSTejun Heo * @gfp: allocation flags 850fbf59bc9STejun Heo * 8515835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 8525835d96eSTejun Heo * contain %GFP_KERNEL, the allocation is atomic. 853fbf59bc9STejun Heo * 854fbf59bc9STejun Heo * RETURNS: 855fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 856fbf59bc9STejun Heo */ 8575835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 8585835d96eSTejun Heo gfp_t gfp) 859fbf59bc9STejun Heo { 860f2badb0cSTejun Heo static int warn_limit = 10; 861fbf59bc9STejun Heo struct pcpu_chunk *chunk; 862f2badb0cSTejun Heo const char *err; 8635835d96eSTejun Heo bool is_atomic = !(gfp & GFP_KERNEL); 864b539b87fSTejun Heo int occ_pages = 0; 865b38d08f3STejun Heo int slot, off, new_alloc, cpu, ret; 866403a91b1SJiri Kosina unsigned long flags; 867f528f0b8SCatalin Marinas void __percpu *ptr; 868fbf59bc9STejun Heo 869723ad1d9SAl Viro /* 870723ad1d9SAl Viro * We want the lowest bit of offset available for in-use/free 8712f69fa82SViro * indicator, so force >= 16bit alignment and make size even. 872723ad1d9SAl Viro */ 873723ad1d9SAl Viro if (unlikely(align < 2)) 874723ad1d9SAl Viro align = 2; 875723ad1d9SAl Viro 876fb009e3aSChristoph Lameter size = ALIGN(size, 2); 8772f69fa82SViro 8788d408b4bSTejun Heo if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 879fbf59bc9STejun Heo WARN(true, "illegal size (%zu) or align (%zu) for " 880fbf59bc9STejun Heo "percpu allocation\n", size, align); 881fbf59bc9STejun Heo return NULL; 882fbf59bc9STejun Heo } 883fbf59bc9STejun Heo 884403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 885fbf59bc9STejun Heo 886edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 887edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 888edcb4639STejun Heo chunk = pcpu_reserved_chunk; 889833af842STejun Heo 890833af842STejun Heo if (size > chunk->contig_hint) { 891833af842STejun Heo err = "alloc from reserved chunk failed"; 892ccea34b5STejun Heo goto fail_unlock; 893f2badb0cSTejun Heo } 894833af842STejun Heo 8959c824b6aSTejun Heo while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) { 896833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 8975835d96eSTejun Heo if (is_atomic || 8985835d96eSTejun Heo pcpu_extend_area_map(chunk, new_alloc) < 0) { 899833af842STejun Heo err = "failed to extend area map of reserved chunk"; 900b38d08f3STejun Heo goto fail; 901833af842STejun Heo } 902833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 903833af842STejun Heo } 904833af842STejun Heo 905b539b87fSTejun Heo off = pcpu_alloc_area(chunk, size, align, is_atomic, 906b539b87fSTejun Heo &occ_pages); 907edcb4639STejun Heo if (off >= 0) 908edcb4639STejun Heo goto area_found; 909833af842STejun Heo 910f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 911ccea34b5STejun Heo goto fail_unlock; 912edcb4639STejun Heo } 913edcb4639STejun Heo 914ccea34b5STejun Heo restart: 915edcb4639STejun Heo /* search through normal chunks */ 916fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 917fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 918fbf59bc9STejun Heo if (size > chunk->contig_hint) 919fbf59bc9STejun Heo continue; 920ccea34b5STejun Heo 9219c824b6aSTejun Heo new_alloc = pcpu_need_to_extend(chunk, is_atomic); 922833af842STejun Heo if (new_alloc) { 9235835d96eSTejun Heo if (is_atomic) 9245835d96eSTejun Heo continue; 925833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 926833af842STejun Heo if (pcpu_extend_area_map(chunk, 927833af842STejun Heo new_alloc) < 0) { 928f2badb0cSTejun Heo err = "failed to extend area map"; 929b38d08f3STejun Heo goto fail; 930833af842STejun Heo } 931833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 932833af842STejun Heo /* 933833af842STejun Heo * pcpu_lock has been dropped, need to 934833af842STejun Heo * restart cpu_slot list walking. 935833af842STejun Heo */ 936833af842STejun Heo goto restart; 937ccea34b5STejun Heo } 938ccea34b5STejun Heo 939b539b87fSTejun Heo off = pcpu_alloc_area(chunk, size, align, is_atomic, 940b539b87fSTejun Heo &occ_pages); 941fbf59bc9STejun Heo if (off >= 0) 942fbf59bc9STejun Heo goto area_found; 943fbf59bc9STejun Heo } 944fbf59bc9STejun Heo } 945fbf59bc9STejun Heo 946403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 947ccea34b5STejun Heo 948b38d08f3STejun Heo /* 949b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 950b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 951b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 952b38d08f3STejun Heo */ 9535835d96eSTejun Heo if (is_atomic) 9545835d96eSTejun Heo goto fail; 9555835d96eSTejun Heo 956b38d08f3STejun Heo mutex_lock(&pcpu_alloc_mutex); 957b38d08f3STejun Heo 958b38d08f3STejun Heo if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 9596081089fSTejun Heo chunk = pcpu_create_chunk(); 960f2badb0cSTejun Heo if (!chunk) { 961f2badb0cSTejun Heo err = "failed to allocate new chunk"; 962b38d08f3STejun Heo goto fail; 963f2badb0cSTejun Heo } 964ccea34b5STejun Heo 965403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 966fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 967b38d08f3STejun Heo } else { 968b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 969b38d08f3STejun Heo } 970b38d08f3STejun Heo 971b38d08f3STejun Heo mutex_unlock(&pcpu_alloc_mutex); 972ccea34b5STejun Heo goto restart; 973fbf59bc9STejun Heo 974fbf59bc9STejun Heo area_found: 975403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 976ccea34b5STejun Heo 977dca49645STejun Heo /* populate if not all pages are already there */ 9785835d96eSTejun Heo if (!is_atomic) { 979e04d3208STejun Heo int page_start, page_end, rs, re; 980e04d3208STejun Heo 981b38d08f3STejun Heo mutex_lock(&pcpu_alloc_mutex); 982e04d3208STejun Heo 983dca49645STejun Heo page_start = PFN_DOWN(off); 984dca49645STejun Heo page_end = PFN_UP(off + size); 985dca49645STejun Heo 986a93ace48STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 987dca49645STejun Heo WARN_ON(chunk->immutable); 988dca49645STejun Heo 989b38d08f3STejun Heo ret = pcpu_populate_chunk(chunk, rs, re); 990b38d08f3STejun Heo 991403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 992b38d08f3STejun Heo if (ret) { 993b38d08f3STejun Heo mutex_unlock(&pcpu_alloc_mutex); 994b539b87fSTejun Heo pcpu_free_area(chunk, off, &occ_pages); 995f2badb0cSTejun Heo err = "failed to populate"; 996ccea34b5STejun Heo goto fail_unlock; 997fbf59bc9STejun Heo } 998b539b87fSTejun Heo pcpu_chunk_populated(chunk, rs, re); 999b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1000dca49645STejun Heo } 1001dca49645STejun Heo 1002ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1003e04d3208STejun Heo } 1004ccea34b5STejun Heo 1005b539b87fSTejun Heo if (chunk != pcpu_reserved_chunk) 1006b539b87fSTejun Heo pcpu_nr_empty_pop_pages -= occ_pages; 1007b539b87fSTejun Heo 1008dca49645STejun Heo /* clear the areas and return address relative to base address */ 1009dca49645STejun Heo for_each_possible_cpu(cpu) 1010dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1011dca49645STejun Heo 1012f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 1013f528f0b8SCatalin Marinas kmemleak_alloc_percpu(ptr, size); 1014f528f0b8SCatalin Marinas return ptr; 1015ccea34b5STejun Heo 1016ccea34b5STejun Heo fail_unlock: 1017403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1018b38d08f3STejun Heo fail: 10195835d96eSTejun Heo if (!is_atomic && warn_limit) { 10205835d96eSTejun Heo pr_warning("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n", 10215835d96eSTejun Heo size, align, is_atomic, err); 1022f2badb0cSTejun Heo dump_stack(); 1023f2badb0cSTejun Heo if (!--warn_limit) 1024f2badb0cSTejun Heo pr_info("PERCPU: limit reached, disable warning\n"); 1025f2badb0cSTejun Heo } 1026ccea34b5STejun Heo return NULL; 1027fbf59bc9STejun Heo } 1028edcb4639STejun Heo 1029edcb4639STejun Heo /** 10305835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1031edcb4639STejun Heo * @size: size of area to allocate in bytes 1032edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 10335835d96eSTejun Heo * @gfp: allocation flags 1034edcb4639STejun Heo * 10355835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 10365835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 10375835d96eSTejun Heo * be called from any context but is a lot more likely to fail. 1038ccea34b5STejun Heo * 1039edcb4639STejun Heo * RETURNS: 1040edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1041edcb4639STejun Heo */ 10425835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 10435835d96eSTejun Heo { 10445835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 10455835d96eSTejun Heo } 10465835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 10475835d96eSTejun Heo 10485835d96eSTejun Heo /** 10495835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 10505835d96eSTejun Heo * @size: size of area to allocate in bytes 10515835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 10525835d96eSTejun Heo * 10535835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 10545835d96eSTejun Heo */ 105543cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1056edcb4639STejun Heo { 10575835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1058edcb4639STejun Heo } 1059fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1060fbf59bc9STejun Heo 1061edcb4639STejun Heo /** 1062edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1063edcb4639STejun Heo * @size: size of area to allocate in bytes 1064edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1065edcb4639STejun Heo * 10669329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 10679329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 10689329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 10699329ba97STejun Heo * Might trigger writeouts. 1070edcb4639STejun Heo * 1071ccea34b5STejun Heo * CONTEXT: 1072ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1073ccea34b5STejun Heo * 1074edcb4639STejun Heo * RETURNS: 1075edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1076edcb4639STejun Heo */ 107743cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1078edcb4639STejun Heo { 10795835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1080edcb4639STejun Heo } 1081edcb4639STejun Heo 1082a56dbddfSTejun Heo /** 1083*fe6bd8c3STejun Heo * pcpu_balance_workfn - reclaim fully free chunks, workqueue function 1084a56dbddfSTejun Heo * @work: unused 1085a56dbddfSTejun Heo * 1086a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 1087a56dbddfSTejun Heo */ 1088*fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work) 1089fbf59bc9STejun Heo { 1090*fe6bd8c3STejun Heo LIST_HEAD(to_free); 1091*fe6bd8c3STejun Heo struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1092a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 1093a56dbddfSTejun Heo 1094ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1095ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1096a56dbddfSTejun Heo 1097*fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 10988d408b4bSTejun Heo WARN_ON(chunk->immutable); 1099a56dbddfSTejun Heo 1100a56dbddfSTejun Heo /* spare the first one */ 1101*fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1102a56dbddfSTejun Heo continue; 1103a56dbddfSTejun Heo 1104*fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 1105a56dbddfSTejun Heo } 1106a56dbddfSTejun Heo 1107ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1108a56dbddfSTejun Heo 1109*fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 1110a93ace48STejun Heo int rs, re; 1111dca49645STejun Heo 1112a93ace48STejun Heo pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { 1113a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 1114b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 1115b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 1116b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 1117a93ace48STejun Heo } 11186081089fSTejun Heo pcpu_destroy_chunk(chunk); 1119fbf59bc9STejun Heo } 1120971f3918STejun Heo 1121971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1122a56dbddfSTejun Heo } 1123fbf59bc9STejun Heo 1124fbf59bc9STejun Heo /** 1125fbf59bc9STejun Heo * free_percpu - free percpu area 1126fbf59bc9STejun Heo * @ptr: pointer to area to free 1127fbf59bc9STejun Heo * 1128ccea34b5STejun Heo * Free percpu area @ptr. 1129ccea34b5STejun Heo * 1130ccea34b5STejun Heo * CONTEXT: 1131ccea34b5STejun Heo * Can be called from atomic context. 1132fbf59bc9STejun Heo */ 113343cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 1134fbf59bc9STejun Heo { 1135129182e5SAndrew Morton void *addr; 1136fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1137ccea34b5STejun Heo unsigned long flags; 1138b539b87fSTejun Heo int off, occ_pages; 1139fbf59bc9STejun Heo 1140fbf59bc9STejun Heo if (!ptr) 1141fbf59bc9STejun Heo return; 1142fbf59bc9STejun Heo 1143f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 1144f528f0b8SCatalin Marinas 1145129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 1146129182e5SAndrew Morton 1147ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1148fbf59bc9STejun Heo 1149fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1150bba174f5STejun Heo off = addr - chunk->base_addr; 1151fbf59bc9STejun Heo 1152b539b87fSTejun Heo pcpu_free_area(chunk, off, &occ_pages); 1153b539b87fSTejun Heo 1154b539b87fSTejun Heo if (chunk != pcpu_reserved_chunk) 1155b539b87fSTejun Heo pcpu_nr_empty_pop_pages += occ_pages; 1156fbf59bc9STejun Heo 1157a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 1158fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 1159fbf59bc9STejun Heo struct pcpu_chunk *pos; 1160fbf59bc9STejun Heo 1161a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1162fbf59bc9STejun Heo if (pos != chunk) { 1163*fe6bd8c3STejun Heo schedule_work(&pcpu_balance_work); 1164fbf59bc9STejun Heo break; 1165fbf59bc9STejun Heo } 1166fbf59bc9STejun Heo } 1167fbf59bc9STejun Heo 1168ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1169fbf59bc9STejun Heo } 1170fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1171fbf59bc9STejun Heo 11723b034b0dSVivek Goyal /** 117310fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 117410fad5e4STejun Heo * @addr: address to test 117510fad5e4STejun Heo * 117610fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 117710fad5e4STejun Heo * static percpu areas are not considered. For those, use 117810fad5e4STejun Heo * is_module_percpu_address(). 117910fad5e4STejun Heo * 118010fad5e4STejun Heo * RETURNS: 118110fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 118210fad5e4STejun Heo */ 118310fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 118410fad5e4STejun Heo { 1185bbddff05STejun Heo #ifdef CONFIG_SMP 118610fad5e4STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 118710fad5e4STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 118810fad5e4STejun Heo unsigned int cpu; 118910fad5e4STejun Heo 119010fad5e4STejun Heo for_each_possible_cpu(cpu) { 119110fad5e4STejun Heo void *start = per_cpu_ptr(base, cpu); 119210fad5e4STejun Heo 119310fad5e4STejun Heo if ((void *)addr >= start && (void *)addr < start + static_size) 119410fad5e4STejun Heo return true; 119510fad5e4STejun Heo } 1196bbddff05STejun Heo #endif 1197bbddff05STejun Heo /* on UP, can't distinguish from other static vars, always false */ 119810fad5e4STejun Heo return false; 119910fad5e4STejun Heo } 120010fad5e4STejun Heo 120110fad5e4STejun Heo /** 12023b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 12033b034b0dSVivek Goyal * @addr: the address to be converted to physical address 12043b034b0dSVivek Goyal * 12053b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 12063b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 12073b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 12083b034b0dSVivek Goyal * until this function finishes. 12093b034b0dSVivek Goyal * 121067589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 121167589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 121267589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 121367589c71SDave Young * km) provides translation. 121467589c71SDave Young * 121567589c71SDave Young * The addr can be tranlated simply without checking if it falls into the 121667589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 121767589c71SDave Young * actually works, and the verification can discover both bugs in percpu 121867589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 121967589c71SDave Young * code. 122067589c71SDave Young * 12213b034b0dSVivek Goyal * RETURNS: 12223b034b0dSVivek Goyal * The physical address for @addr. 12233b034b0dSVivek Goyal */ 12243b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 12253b034b0dSVivek Goyal { 12269983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 12279983b6f0STejun Heo bool in_first_chunk = false; 1228a855b84cSTejun Heo unsigned long first_low, first_high; 12299983b6f0STejun Heo unsigned int cpu; 12309983b6f0STejun Heo 12319983b6f0STejun Heo /* 1232a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 12339983b6f0STejun Heo * necessary but will speed up lookups of addresses which 12349983b6f0STejun Heo * aren't in the first chunk. 12359983b6f0STejun Heo */ 1236a855b84cSTejun Heo first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); 1237a855b84cSTejun Heo first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, 12389983b6f0STejun Heo pcpu_unit_pages); 1239a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 1240a855b84cSTejun Heo (unsigned long)addr < first_high) { 12419983b6f0STejun Heo for_each_possible_cpu(cpu) { 12429983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 12439983b6f0STejun Heo 12449983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 12459983b6f0STejun Heo in_first_chunk = true; 12469983b6f0STejun Heo break; 12479983b6f0STejun Heo } 12489983b6f0STejun Heo } 12499983b6f0STejun Heo } 12509983b6f0STejun Heo 12519983b6f0STejun Heo if (in_first_chunk) { 1252eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 12533b034b0dSVivek Goyal return __pa(addr); 12543b034b0dSVivek Goyal else 12559f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 12569f57bd4dSEugene Surovegin offset_in_page(addr); 1257020ec653STejun Heo } else 12589f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 12599f57bd4dSEugene Surovegin offset_in_page(addr); 12603b034b0dSVivek Goyal } 12613b034b0dSVivek Goyal 1262fbf59bc9STejun Heo /** 1263fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 1264fd1e8a1fSTejun Heo * @nr_groups: the number of groups 1265fd1e8a1fSTejun Heo * @nr_units: the number of units 1266033e48fbSTejun Heo * 1267fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 1268fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 1269fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 1270fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1271fd1e8a1fSTejun Heo * pointer of other groups. 1272033e48fbSTejun Heo * 1273033e48fbSTejun Heo * RETURNS: 1274fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 1275fd1e8a1fSTejun Heo * failure. 1276033e48fbSTejun Heo */ 1277fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1278fd1e8a1fSTejun Heo int nr_units) 1279fd1e8a1fSTejun Heo { 1280fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1281fd1e8a1fSTejun Heo size_t base_size, ai_size; 1282fd1e8a1fSTejun Heo void *ptr; 1283fd1e8a1fSTejun Heo int unit; 1284fd1e8a1fSTejun Heo 1285fd1e8a1fSTejun Heo base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1286fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 1287fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1288fd1e8a1fSTejun Heo 1289999c17e3SSantosh Shilimkar ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); 1290fd1e8a1fSTejun Heo if (!ptr) 1291fd1e8a1fSTejun Heo return NULL; 1292fd1e8a1fSTejun Heo ai = ptr; 1293fd1e8a1fSTejun Heo ptr += base_size; 1294fd1e8a1fSTejun Heo 1295fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 1296fd1e8a1fSTejun Heo 1297fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 1298fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 1299fd1e8a1fSTejun Heo 1300fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 1301fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 1302fd1e8a1fSTejun Heo 1303fd1e8a1fSTejun Heo return ai; 1304fd1e8a1fSTejun Heo } 1305fd1e8a1fSTejun Heo 1306fd1e8a1fSTejun Heo /** 1307fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 1308fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 1309fd1e8a1fSTejun Heo * 1310fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1311fd1e8a1fSTejun Heo */ 1312fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1313fd1e8a1fSTejun Heo { 1314999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 1315fd1e8a1fSTejun Heo } 1316fd1e8a1fSTejun Heo 1317fd1e8a1fSTejun Heo /** 1318fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1319fd1e8a1fSTejun Heo * @lvl: loglevel 1320fd1e8a1fSTejun Heo * @ai: allocation info to dump 1321fd1e8a1fSTejun Heo * 1322fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 1323fd1e8a1fSTejun Heo */ 1324fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 1325fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 1326033e48fbSTejun Heo { 1327fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 1328033e48fbSTejun Heo char empty_str[] = "--------"; 1329fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 1330fd1e8a1fSTejun Heo int group, v; 1331fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 1332033e48fbSTejun Heo 1333fd1e8a1fSTejun Heo v = ai->nr_groups; 1334033e48fbSTejun Heo while (v /= 10) 1335fd1e8a1fSTejun Heo group_width++; 1336033e48fbSTejun Heo 1337fd1e8a1fSTejun Heo v = num_possible_cpus(); 1338fd1e8a1fSTejun Heo while (v /= 10) 1339fd1e8a1fSTejun Heo cpu_width++; 1340fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1341033e48fbSTejun Heo 1342fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 1343fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 1344fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 1345033e48fbSTejun Heo 1346fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1347fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1348fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1349fd1e8a1fSTejun Heo 1350fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 1351fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 1352fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 1353fd1e8a1fSTejun Heo 1354fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 1355fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 1356fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 1357fd1e8a1fSTejun Heo if (!(alloc % apl)) { 1358cb129820STejun Heo printk(KERN_CONT "\n"); 1359fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 1360033e48fbSTejun Heo } 1361cb129820STejun Heo printk(KERN_CONT "[%0*d] ", group_width, group); 1362fd1e8a1fSTejun Heo 1363fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 1364fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 1365cb129820STejun Heo printk(KERN_CONT "%0*d ", cpu_width, 1366fd1e8a1fSTejun Heo gi->cpu_map[unit]); 1367033e48fbSTejun Heo else 1368cb129820STejun Heo printk(KERN_CONT "%s ", empty_str); 1369033e48fbSTejun Heo } 1370fd1e8a1fSTejun Heo } 1371cb129820STejun Heo printk(KERN_CONT "\n"); 1372033e48fbSTejun Heo } 1373033e48fbSTejun Heo 1374fbf59bc9STejun Heo /** 13758d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 1376fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 137738a6be52STejun Heo * @base_addr: mapped address 1378fbf59bc9STejun Heo * 13798d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 13808d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 138138a6be52STejun Heo * setup path. 13828d408b4bSTejun Heo * 1383fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 1384fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 13858d408b4bSTejun Heo * 1386fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 1387fd1e8a1fSTejun Heo * 1388fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1389edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1390edcb4639STejun Heo * the first chunk such that it's available only through reserved 1391edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1392edcb4639STejun Heo * static areas on architectures where the addressing model has 1393edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1394edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1395edcb4639STejun Heo * 1396fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 1397fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 1398fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 13996074d5b0STejun Heo * 1400fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1401fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 1402fd1e8a1fSTejun Heo * @ai->dyn_size. 14038d408b4bSTejun Heo * 1404fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 1405fd1e8a1fSTejun Heo * for vm areas. 14068d408b4bSTejun Heo * 1407fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 1408fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 1409fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 1410fd1e8a1fSTejun Heo * 1411fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 1412fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 1413fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 1414fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 1415fd1e8a1fSTejun Heo * all units is assumed. 14168d408b4bSTejun Heo * 141738a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 141838a6be52STejun Heo * copied static data to each unit. 1419fbf59bc9STejun Heo * 1420edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1421edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1422edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1423edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1424edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1425edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1426edcb4639STejun Heo * 1427fbf59bc9STejun Heo * RETURNS: 1428fb435d52STejun Heo * 0 on success, -errno on failure. 1429fbf59bc9STejun Heo */ 1430fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1431fd1e8a1fSTejun Heo void *base_addr) 1432fbf59bc9STejun Heo { 1433635b75fcSTejun Heo static char cpus_buf[4096] __initdata; 1434099a19d9STejun Heo static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1435099a19d9STejun Heo static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1436fd1e8a1fSTejun Heo size_t dyn_size = ai->dyn_size; 1437fd1e8a1fSTejun Heo size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1438edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 14396563297cSTejun Heo unsigned long *group_offsets; 14406563297cSTejun Heo size_t *group_sizes; 1441fb435d52STejun Heo unsigned long *unit_off; 1442fbf59bc9STejun Heo unsigned int cpu; 1443fd1e8a1fSTejun Heo int *unit_map; 1444fd1e8a1fSTejun Heo int group, unit, i; 1445fbf59bc9STejun Heo 1446635b75fcSTejun Heo cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); 1447635b75fcSTejun Heo 1448635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 1449635b75fcSTejun Heo if (unlikely(cond)) { \ 1450635b75fcSTejun Heo pr_emerg("PERCPU: failed to initialize, %s", #cond); \ 1451635b75fcSTejun Heo pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ 1452635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1453635b75fcSTejun Heo BUG(); \ 1454635b75fcSTejun Heo } \ 1455635b75fcSTejun Heo } while (0) 1456635b75fcSTejun Heo 14572f39e637STejun Heo /* sanity checks */ 1458635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1459bbddff05STejun Heo #ifdef CONFIG_SMP 1460635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 14610415b00dSTejun Heo PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK); 1462bbddff05STejun Heo #endif 1463635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 14640415b00dSTejun Heo PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK); 1465635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1466635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1467635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1468099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 14699f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 14708d408b4bSTejun Heo 14716563297cSTejun Heo /* process group information and build config tables accordingly */ 1472999c17e3SSantosh Shilimkar group_offsets = memblock_virt_alloc(ai->nr_groups * 1473999c17e3SSantosh Shilimkar sizeof(group_offsets[0]), 0); 1474999c17e3SSantosh Shilimkar group_sizes = memblock_virt_alloc(ai->nr_groups * 1475999c17e3SSantosh Shilimkar sizeof(group_sizes[0]), 0); 1476999c17e3SSantosh Shilimkar unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); 1477999c17e3SSantosh Shilimkar unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); 14782f39e637STejun Heo 1479fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1480ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 1481a855b84cSTejun Heo 1482a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 1483a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 14842f39e637STejun Heo 1485fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1486fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 14872f39e637STejun Heo 14886563297cSTejun Heo group_offsets[group] = gi->base_offset; 14896563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 14906563297cSTejun Heo 1491fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 1492fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 1493fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 1494fd1e8a1fSTejun Heo continue; 1495fd1e8a1fSTejun Heo 1496635b75fcSTejun Heo PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); 1497635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1498635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1499fd1e8a1fSTejun Heo 1500fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 1501fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1502fb435d52STejun Heo 1503a855b84cSTejun Heo /* determine low/high unit_cpu */ 1504a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 1505a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 1506a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 1507a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 1508a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 1509a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 15100fc0531eSLinus Torvalds } 15110fc0531eSLinus Torvalds } 1512fd1e8a1fSTejun Heo pcpu_nr_units = unit; 15132f39e637STejun Heo 15142f39e637STejun Heo for_each_possible_cpu(cpu) 1515635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1516635b75fcSTejun Heo 1517635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 1518635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 1519bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 15202f39e637STejun Heo 15216563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 15226563297cSTejun Heo pcpu_group_offsets = group_offsets; 15236563297cSTejun Heo pcpu_group_sizes = group_sizes; 1524fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 1525fb435d52STejun Heo pcpu_unit_offsets = unit_off; 15262f39e637STejun Heo 15272f39e637STejun Heo /* determine basic parameters */ 1528fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1529d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 15306563297cSTejun Heo pcpu_atom_size = ai->atom_size; 1531ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1532ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1533cafe8816STejun Heo 1534d9b55eebSTejun Heo /* 1535d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1536d9b55eebSTejun Heo * empty chunks. 1537d9b55eebSTejun Heo */ 1538d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1539999c17e3SSantosh Shilimkar pcpu_slot = memblock_virt_alloc( 1540999c17e3SSantosh Shilimkar pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); 1541fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1542fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1543fbf59bc9STejun Heo 1544edcb4639STejun Heo /* 1545edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1546edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1547edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1548edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1549edcb4639STejun Heo * static percpu allocation). 1550edcb4639STejun Heo */ 1551999c17e3SSantosh Shilimkar schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 15522441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 15539c824b6aSTejun Heo INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn); 1554bba174f5STejun Heo schunk->base_addr = base_addr; 155561ace7faSTejun Heo schunk->map = smap; 155661ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 155738a6be52STejun Heo schunk->immutable = true; 1558ce3141a2STejun Heo bitmap_fill(schunk->populated, pcpu_unit_pages); 1559b539b87fSTejun Heo schunk->nr_populated = pcpu_unit_pages; 1560edcb4639STejun Heo 1561fd1e8a1fSTejun Heo if (ai->reserved_size) { 1562fd1e8a1fSTejun Heo schunk->free_size = ai->reserved_size; 1563ae9e6bc9STejun Heo pcpu_reserved_chunk = schunk; 1564fd1e8a1fSTejun Heo pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; 1565edcb4639STejun Heo } else { 15662441d15cSTejun Heo schunk->free_size = dyn_size; 1567edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1568edcb4639STejun Heo } 15692441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1570fbf59bc9STejun Heo 1571723ad1d9SAl Viro schunk->map[0] = 1; 1572723ad1d9SAl Viro schunk->map[1] = ai->static_size; 1573723ad1d9SAl Viro schunk->map_used = 1; 157461ace7faSTejun Heo if (schunk->free_size) 1575723ad1d9SAl Viro schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size); 1576723ad1d9SAl Viro else 1577723ad1d9SAl Viro schunk->map[1] |= 1; 157861ace7faSTejun Heo 1579edcb4639STejun Heo /* init dynamic chunk if necessary */ 1580edcb4639STejun Heo if (dyn_size) { 1581999c17e3SSantosh Shilimkar dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1582edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 15839c824b6aSTejun Heo INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn); 1584bba174f5STejun Heo dchunk->base_addr = base_addr; 1585edcb4639STejun Heo dchunk->map = dmap; 1586edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 158738a6be52STejun Heo dchunk->immutable = true; 1588ce3141a2STejun Heo bitmap_fill(dchunk->populated, pcpu_unit_pages); 1589b539b87fSTejun Heo dchunk->nr_populated = pcpu_unit_pages; 1590edcb4639STejun Heo 1591edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1592723ad1d9SAl Viro dchunk->map[0] = 1; 1593723ad1d9SAl Viro dchunk->map[1] = pcpu_reserved_chunk_limit; 1594723ad1d9SAl Viro dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1; 1595723ad1d9SAl Viro dchunk->map_used = 2; 1596edcb4639STejun Heo } 1597edcb4639STejun Heo 15982441d15cSTejun Heo /* link the first chunk in */ 1599ae9e6bc9STejun Heo pcpu_first_chunk = dchunk ?: schunk; 1600b539b87fSTejun Heo pcpu_nr_empty_pop_pages += 1601b539b87fSTejun Heo pcpu_count_occupied_pages(pcpu_first_chunk, 1); 1602ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 1603fbf59bc9STejun Heo 1604fbf59bc9STejun Heo /* we're done */ 1605bba174f5STejun Heo pcpu_base_addr = base_addr; 1606fb435d52STejun Heo return 0; 1607fbf59bc9STejun Heo } 160866c3a757STejun Heo 1609bbddff05STejun Heo #ifdef CONFIG_SMP 1610bbddff05STejun Heo 161117f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 1612f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 1613f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 1614f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 1615f58dc01bSTejun Heo }; 161666c3a757STejun Heo 1617f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1618f58dc01bSTejun Heo 1619f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 162066c3a757STejun Heo { 16215479c78aSCyrill Gorcunov if (!str) 16225479c78aSCyrill Gorcunov return -EINVAL; 16235479c78aSCyrill Gorcunov 1624f58dc01bSTejun Heo if (0) 1625f58dc01bSTejun Heo /* nada */; 1626f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1627f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 1628f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 1629f58dc01bSTejun Heo #endif 1630f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1631f58dc01bSTejun Heo else if (!strcmp(str, "page")) 1632f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 1633f58dc01bSTejun Heo #endif 1634f58dc01bSTejun Heo else 1635f58dc01bSTejun Heo pr_warning("PERCPU: unknown allocator %s specified\n", str); 163666c3a757STejun Heo 1637f58dc01bSTejun Heo return 0; 163866c3a757STejun Heo } 1639f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 164066c3a757STejun Heo 16413c9a024fSTejun Heo /* 16423c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 16433c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 16443c9a024fSTejun Heo * to be used. 16453c9a024fSTejun Heo */ 164608fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 164708fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 16483c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 16493c9a024fSTejun Heo #endif 16503c9a024fSTejun Heo 16513c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 16523c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 16533c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 16543c9a024fSTejun Heo #endif 16553c9a024fSTejun Heo 16563c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 16573c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 16583c9a024fSTejun Heo /** 1659fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1660fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1661fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1662fbf59bc9STejun Heo * @atom_size: allocation atom size 1663fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1664fbf59bc9STejun Heo * 1665fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 1666fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 1667fbf59bc9STejun Heo * atom size and distances between CPUs. 1668fbf59bc9STejun Heo * 1669fbf59bc9STejun Heo * Groups are always mutliples of atom size and CPUs which are of 1670fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 1671fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 1672fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 1673fbf59bc9STejun Heo * of allocated virtual address space. 1674fbf59bc9STejun Heo * 1675fbf59bc9STejun Heo * RETURNS: 1676fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 1677fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 1678fbf59bc9STejun Heo */ 1679fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1680fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 1681fbf59bc9STejun Heo size_t atom_size, 1682fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1683fbf59bc9STejun Heo { 1684fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 1685fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 1686fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 1687fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 1688fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 1689fbf59bc9STejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1690fbf59bc9STejun Heo int last_allocs, group, unit; 1691fbf59bc9STejun Heo unsigned int cpu, tcpu; 1692fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 1693fbf59bc9STejun Heo unsigned int *cpu_map; 1694fbf59bc9STejun Heo 1695fbf59bc9STejun Heo /* this function may be called multiple times */ 1696fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 1697fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 1698fbf59bc9STejun Heo 1699fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 1700fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 1701fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 1702fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 1703fbf59bc9STejun Heo 1704fbf59bc9STejun Heo /* 1705fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 1706fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 170725985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 1708fbf59bc9STejun Heo * or larger than min_unit_size. 1709fbf59bc9STejun Heo */ 1710fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1711fbf59bc9STejun Heo 1712fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 1713fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 1714fbf59bc9STejun Heo while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1715fbf59bc9STejun Heo upa--; 1716fbf59bc9STejun Heo max_upa = upa; 1717fbf59bc9STejun Heo 1718fbf59bc9STejun Heo /* group cpus according to their proximity */ 1719fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 1720fbf59bc9STejun Heo group = 0; 1721fbf59bc9STejun Heo next_group: 1722fbf59bc9STejun Heo for_each_possible_cpu(tcpu) { 1723fbf59bc9STejun Heo if (cpu == tcpu) 1724fbf59bc9STejun Heo break; 1725fbf59bc9STejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 1726fbf59bc9STejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1727fbf59bc9STejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1728fbf59bc9STejun Heo group++; 1729fbf59bc9STejun Heo nr_groups = max(nr_groups, group + 1); 1730fbf59bc9STejun Heo goto next_group; 1731fbf59bc9STejun Heo } 1732fbf59bc9STejun Heo } 1733fbf59bc9STejun Heo group_map[cpu] = group; 1734fbf59bc9STejun Heo group_cnt[group]++; 1735fbf59bc9STejun Heo } 1736fbf59bc9STejun Heo 1737fbf59bc9STejun Heo /* 1738fbf59bc9STejun Heo * Expand unit size until address space usage goes over 75% 1739fbf59bc9STejun Heo * and then as much as possible without using more address 1740fbf59bc9STejun Heo * space. 1741fbf59bc9STejun Heo */ 1742fbf59bc9STejun Heo last_allocs = INT_MAX; 1743fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 1744fbf59bc9STejun Heo int allocs = 0, wasted = 0; 1745fbf59bc9STejun Heo 1746fbf59bc9STejun Heo if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1747fbf59bc9STejun Heo continue; 1748fbf59bc9STejun Heo 1749fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 1750fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1751fbf59bc9STejun Heo allocs += this_allocs; 1752fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 1753fbf59bc9STejun Heo } 1754fbf59bc9STejun Heo 1755fbf59bc9STejun Heo /* 1756fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 1757fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 1758fbf59bc9STejun Heo * passes the following check. 1759fbf59bc9STejun Heo */ 1760fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 1761fbf59bc9STejun Heo continue; 1762fbf59bc9STejun Heo 1763fbf59bc9STejun Heo /* and then don't consume more memory */ 1764fbf59bc9STejun Heo if (allocs > last_allocs) 1765fbf59bc9STejun Heo break; 1766fbf59bc9STejun Heo last_allocs = allocs; 1767fbf59bc9STejun Heo best_upa = upa; 1768fbf59bc9STejun Heo } 1769fbf59bc9STejun Heo upa = best_upa; 1770fbf59bc9STejun Heo 1771fbf59bc9STejun Heo /* allocate and fill alloc_info */ 1772fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 1773fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 1774fbf59bc9STejun Heo 1775fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1776fbf59bc9STejun Heo if (!ai) 1777fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 1778fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 1779fbf59bc9STejun Heo 1780fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 1781fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 1782fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 1783fbf59bc9STejun Heo } 1784fbf59bc9STejun Heo 1785fbf59bc9STejun Heo ai->static_size = static_size; 1786fbf59bc9STejun Heo ai->reserved_size = reserved_size; 1787fbf59bc9STejun Heo ai->dyn_size = dyn_size; 1788fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 1789fbf59bc9STejun Heo ai->atom_size = atom_size; 1790fbf59bc9STejun Heo ai->alloc_size = alloc_size; 1791fbf59bc9STejun Heo 1792fbf59bc9STejun Heo for (group = 0, unit = 0; group_cnt[group]; group++) { 1793fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1794fbf59bc9STejun Heo 1795fbf59bc9STejun Heo /* 1796fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 1797fbf59bc9STejun Heo * back-to-back. The caller should update this to 1798fbf59bc9STejun Heo * reflect actual allocation. 1799fbf59bc9STejun Heo */ 1800fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 1801fbf59bc9STejun Heo 1802fbf59bc9STejun Heo for_each_possible_cpu(cpu) 1803fbf59bc9STejun Heo if (group_map[cpu] == group) 1804fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 1805fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 1806fbf59bc9STejun Heo unit += gi->nr_units; 1807fbf59bc9STejun Heo } 1808fbf59bc9STejun Heo BUG_ON(unit != nr_units); 1809fbf59bc9STejun Heo 1810fbf59bc9STejun Heo return ai; 1811fbf59bc9STejun Heo } 18123c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 1813fbf59bc9STejun Heo 18143c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 181566c3a757STejun Heo /** 181666c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 181766c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 18184ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1819c8826dd5STejun Heo * @atom_size: allocation atom size 1820c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1821c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 182225985edcSLucas De Marchi * @free_fn: function to free percpu page 182366c3a757STejun Heo * 182466c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 182566c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 182666c3a757STejun Heo * 182766c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 1828c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 1829c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 1830c8826dd5STejun Heo * aligned to @atom_size. 1831c8826dd5STejun Heo * 1832c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 1833c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 1834c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 1835c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 1836c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 1837c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 183866c3a757STejun Heo * 18394ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 184066c3a757STejun Heo * 184166c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 1842c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 184366c3a757STejun Heo * 184466c3a757STejun Heo * RETURNS: 1845fb435d52STejun Heo * 0 on success, -errno on failure. 184666c3a757STejun Heo */ 18474ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 1848c8826dd5STejun Heo size_t atom_size, 1849c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1850c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1851c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 185266c3a757STejun Heo { 1853c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 1854c8826dd5STejun Heo void **areas = NULL; 1855fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 18566ea529a2STejun Heo size_t size_sum, areas_size, max_distance; 1857c8826dd5STejun Heo int group, i, rc; 185866c3a757STejun Heo 1859c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1860c8826dd5STejun Heo cpu_distance_fn); 1861fd1e8a1fSTejun Heo if (IS_ERR(ai)) 1862fd1e8a1fSTejun Heo return PTR_ERR(ai); 186366c3a757STejun Heo 1864fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1865c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 186666c3a757STejun Heo 1867999c17e3SSantosh Shilimkar areas = memblock_virt_alloc_nopanic(areas_size, 0); 1868c8826dd5STejun Heo if (!areas) { 1869fb435d52STejun Heo rc = -ENOMEM; 1870c8826dd5STejun Heo goto out_free; 1871fa8a7094STejun Heo } 187266c3a757STejun Heo 1873c8826dd5STejun Heo /* allocate, copy and determine base address */ 1874c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 1875c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1876c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 1877c8826dd5STejun Heo void *ptr; 187866c3a757STejun Heo 1879c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 1880c8826dd5STejun Heo cpu = gi->cpu_map[i]; 1881c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 1882c8826dd5STejun Heo 1883c8826dd5STejun Heo /* allocate space for the whole group */ 1884c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 1885c8826dd5STejun Heo if (!ptr) { 1886c8826dd5STejun Heo rc = -ENOMEM; 1887c8826dd5STejun Heo goto out_free_areas; 1888c8826dd5STejun Heo } 1889f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 1890f528f0b8SCatalin Marinas kmemleak_free(ptr); 1891c8826dd5STejun Heo areas[group] = ptr; 1892c8826dd5STejun Heo 1893c8826dd5STejun Heo base = min(ptr, base); 189442b64281STejun Heo } 189542b64281STejun Heo 189642b64281STejun Heo /* 189742b64281STejun Heo * Copy data and free unused parts. This should happen after all 189842b64281STejun Heo * allocations are complete; otherwise, we may end up with 189942b64281STejun Heo * overlapping groups. 190042b64281STejun Heo */ 190142b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 190242b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 190342b64281STejun Heo void *ptr = areas[group]; 1904c8826dd5STejun Heo 1905c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1906c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 1907c8826dd5STejun Heo /* unused unit, free whole */ 1908c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 1909c8826dd5STejun Heo continue; 1910c8826dd5STejun Heo } 1911c8826dd5STejun Heo /* copy and return the unused part */ 1912fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 1913c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 1914c8826dd5STejun Heo } 191566c3a757STejun Heo } 191666c3a757STejun Heo 1917c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 19186ea529a2STejun Heo max_distance = 0; 19196ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 1920c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 19211a0c3298STejun Heo max_distance = max_t(size_t, max_distance, 19221a0c3298STejun Heo ai->groups[group].base_offset); 19236ea529a2STejun Heo } 19246ea529a2STejun Heo max_distance += ai->unit_size; 19256ea529a2STejun Heo 19266ea529a2STejun Heo /* warn if maximum distance is further than 75% of vmalloc space */ 19278a092171SLaura Abbott if (max_distance > VMALLOC_TOTAL * 3 / 4) { 19281a0c3298STejun Heo pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 1929787e5b06SMike Frysinger "space 0x%lx\n", max_distance, 19308a092171SLaura Abbott VMALLOC_TOTAL); 19316ea529a2STejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 19326ea529a2STejun Heo /* and fail if we have fallback */ 19336ea529a2STejun Heo rc = -EINVAL; 19346ea529a2STejun Heo goto out_free; 19356ea529a2STejun Heo #endif 19366ea529a2STejun Heo } 1937c8826dd5STejun Heo 1938004018e2STejun Heo pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1939fd1e8a1fSTejun Heo PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1940fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 194166c3a757STejun Heo 1942fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, base); 1943c8826dd5STejun Heo goto out_free; 1944c8826dd5STejun Heo 1945c8826dd5STejun Heo out_free_areas: 1946c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 1947f851c8d8SMichael Holzheu if (areas[group]) 1948c8826dd5STejun Heo free_fn(areas[group], 1949c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 1950c8826dd5STejun Heo out_free: 1951fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 1952c8826dd5STejun Heo if (areas) 1953999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 1954fb435d52STejun Heo return rc; 1955d4b95f80STejun Heo } 19563c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 1957d4b95f80STejun Heo 19583c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 1959d4b95f80STejun Heo /** 196000ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1961d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1962d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 196325985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 1964d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 1965d4b95f80STejun Heo * 196600ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 196700ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 1968d4b95f80STejun Heo * 1969d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 1970d4b95f80STejun Heo * page-by-page into vmalloc area. 1971d4b95f80STejun Heo * 1972d4b95f80STejun Heo * RETURNS: 1973fb435d52STejun Heo * 0 on success, -errno on failure. 1974d4b95f80STejun Heo */ 1975fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 1976d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1977d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 1978d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 1979d4b95f80STejun Heo { 19808f05a6a6STejun Heo static struct vm_struct vm; 1981fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 198200ae4064STejun Heo char psize_str[16]; 1983ce3141a2STejun Heo int unit_pages; 1984d4b95f80STejun Heo size_t pages_size; 1985ce3141a2STejun Heo struct page **pages; 1986fb435d52STejun Heo int unit, i, j, rc; 1987d4b95f80STejun Heo 198800ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 198900ae4064STejun Heo 19904ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 1991fd1e8a1fSTejun Heo if (IS_ERR(ai)) 1992fd1e8a1fSTejun Heo return PTR_ERR(ai); 1993fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 1994fd1e8a1fSTejun Heo BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); 1995fd1e8a1fSTejun Heo 1996fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 1997d4b95f80STejun Heo 1998d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 1999fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2000fd1e8a1fSTejun Heo sizeof(pages[0])); 2001999c17e3SSantosh Shilimkar pages = memblock_virt_alloc(pages_size, 0); 2002d4b95f80STejun Heo 20038f05a6a6STejun Heo /* allocate pages */ 2004d4b95f80STejun Heo j = 0; 2005fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) 2006ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) { 2007fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 2008d4b95f80STejun Heo void *ptr; 2009d4b95f80STejun Heo 20103cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2011d4b95f80STejun Heo if (!ptr) { 201200ae4064STejun Heo pr_warning("PERCPU: failed to allocate %s page " 201300ae4064STejun Heo "for cpu%u\n", psize_str, cpu); 2014d4b95f80STejun Heo goto enomem; 2015d4b95f80STejun Heo } 2016f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2017f528f0b8SCatalin Marinas kmemleak_free(ptr); 2018ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 2019d4b95f80STejun Heo } 2020d4b95f80STejun Heo 20218f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 20228f05a6a6STejun Heo vm.flags = VM_ALLOC; 2023fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 20248f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 20258f05a6a6STejun Heo 2026fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 20271d9d3257STejun Heo unsigned long unit_addr = 2028fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 20298f05a6a6STejun Heo 2030ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 20318f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 20328f05a6a6STejun Heo 20338f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 2034fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2035ce3141a2STejun Heo unit_pages); 2036fb435d52STejun Heo if (rc < 0) 2037fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 20388f05a6a6STejun Heo 20398f05a6a6STejun Heo /* 20408f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 20418f05a6a6STejun Heo * cache for the linear mapping here - something 20428f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 20438f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 20448f05a6a6STejun Heo * data structures are not set up yet. 20458f05a6a6STejun Heo */ 20468f05a6a6STejun Heo 20478f05a6a6STejun Heo /* copy static data */ 2048fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 204966c3a757STejun Heo } 205066c3a757STejun Heo 205166c3a757STejun Heo /* we're ready, commit */ 20521d9d3257STejun Heo pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", 2053fd1e8a1fSTejun Heo unit_pages, psize_str, vm.addr, ai->static_size, 2054fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 205566c3a757STejun Heo 2056fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, vm.addr); 2057d4b95f80STejun Heo goto out_free_ar; 2058d4b95f80STejun Heo 2059d4b95f80STejun Heo enomem: 2060d4b95f80STejun Heo while (--j >= 0) 2061ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 2062fb435d52STejun Heo rc = -ENOMEM; 2063d4b95f80STejun Heo out_free_ar: 2064999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 2065fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2066fb435d52STejun Heo return rc; 206766c3a757STejun Heo } 20683c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 2069d4b95f80STejun Heo 2070bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 20718c4bfc6eSTejun Heo /* 2072bbddff05STejun Heo * Generic SMP percpu area setup. 2073e74e3962STejun Heo * 2074e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 2075e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 2076e74e3962STejun Heo * important because many archs have addressing restrictions and might 2077e74e3962STejun Heo * fail if the percpu area is located far away from the previous 2078e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 2079e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 2080e74e3962STejun Heo * on the physical linear memory mapping which uses large page 2081e74e3962STejun Heo * mappings on applicable archs. 2082e74e3962STejun Heo */ 2083e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2084e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 2085e74e3962STejun Heo 2086c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2087c8826dd5STejun Heo size_t align) 2088c8826dd5STejun Heo { 2089999c17e3SSantosh Shilimkar return memblock_virt_alloc_from_nopanic( 2090999c17e3SSantosh Shilimkar size, align, __pa(MAX_DMA_ADDRESS)); 2091c8826dd5STejun Heo } 2092c8826dd5STejun Heo 2093c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2094c8826dd5STejun Heo { 2095999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 2096c8826dd5STejun Heo } 2097c8826dd5STejun Heo 2098e74e3962STejun Heo void __init setup_per_cpu_areas(void) 2099e74e3962STejun Heo { 2100e74e3962STejun Heo unsigned long delta; 2101e74e3962STejun Heo unsigned int cpu; 2102fb435d52STejun Heo int rc; 2103e74e3962STejun Heo 2104e74e3962STejun Heo /* 2105e74e3962STejun Heo * Always reserve area for module percpu variables. That's 2106e74e3962STejun Heo * what the legacy allocator did. 2107e74e3962STejun Heo */ 2108fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2109c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2110c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2111fb435d52STejun Heo if (rc < 0) 2112bbddff05STejun Heo panic("Failed to initialize percpu areas."); 2113e74e3962STejun Heo 2114e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2115e74e3962STejun Heo for_each_possible_cpu(cpu) 2116fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2117e74e3962STejun Heo } 2118e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2119099a19d9STejun Heo 2120bbddff05STejun Heo #else /* CONFIG_SMP */ 2121bbddff05STejun Heo 2122bbddff05STejun Heo /* 2123bbddff05STejun Heo * UP percpu area setup. 2124bbddff05STejun Heo * 2125bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 2126bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 2127bbddff05STejun Heo * variables and don't require any special preparation. 2128bbddff05STejun Heo */ 2129bbddff05STejun Heo void __init setup_per_cpu_areas(void) 2130bbddff05STejun Heo { 2131bbddff05STejun Heo const size_t unit_size = 2132bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 2133bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 2134bbddff05STejun Heo struct pcpu_alloc_info *ai; 2135bbddff05STejun Heo void *fc; 2136bbddff05STejun Heo 2137bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 2138999c17e3SSantosh Shilimkar fc = memblock_virt_alloc_from_nopanic(unit_size, 2139999c17e3SSantosh Shilimkar PAGE_SIZE, 2140999c17e3SSantosh Shilimkar __pa(MAX_DMA_ADDRESS)); 2141bbddff05STejun Heo if (!ai || !fc) 2142bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 2143100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2144100d13c3SCatalin Marinas kmemleak_free(fc); 2145bbddff05STejun Heo 2146bbddff05STejun Heo ai->dyn_size = unit_size; 2147bbddff05STejun Heo ai->unit_size = unit_size; 2148bbddff05STejun Heo ai->atom_size = unit_size; 2149bbddff05STejun Heo ai->alloc_size = unit_size; 2150bbddff05STejun Heo ai->groups[0].nr_units = 1; 2151bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 2152bbddff05STejun Heo 2153bbddff05STejun Heo if (pcpu_setup_first_chunk(ai, fc) < 0) 2154bbddff05STejun Heo panic("Failed to initialize percpu areas."); 21553189eddbSHonggang Li 21563189eddbSHonggang Li pcpu_free_alloc_info(ai); 2157bbddff05STejun Heo } 2158bbddff05STejun Heo 2159bbddff05STejun Heo #endif /* CONFIG_SMP */ 2160bbddff05STejun Heo 2161099a19d9STejun Heo /* 2162099a19d9STejun Heo * First and reserved chunks are initialized with temporary allocation 2163099a19d9STejun Heo * map in initdata so that they can be used before slab is online. 2164099a19d9STejun Heo * This function is called after slab is brought up and replaces those 2165099a19d9STejun Heo * with properly allocated maps. 2166099a19d9STejun Heo */ 2167099a19d9STejun Heo void __init percpu_init_late(void) 2168099a19d9STejun Heo { 2169099a19d9STejun Heo struct pcpu_chunk *target_chunks[] = 2170099a19d9STejun Heo { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; 2171099a19d9STejun Heo struct pcpu_chunk *chunk; 2172099a19d9STejun Heo unsigned long flags; 2173099a19d9STejun Heo int i; 2174099a19d9STejun Heo 2175099a19d9STejun Heo for (i = 0; (chunk = target_chunks[i]); i++) { 2176099a19d9STejun Heo int *map; 2177099a19d9STejun Heo const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); 2178099a19d9STejun Heo 2179099a19d9STejun Heo BUILD_BUG_ON(size > PAGE_SIZE); 2180099a19d9STejun Heo 218190459ce0SBob Liu map = pcpu_mem_zalloc(size); 2182099a19d9STejun Heo BUG_ON(!map); 2183099a19d9STejun Heo 2184099a19d9STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 2185099a19d9STejun Heo memcpy(map, chunk->map, size); 2186099a19d9STejun Heo chunk->map = map; 2187099a19d9STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 2188099a19d9STejun Heo } 2189099a19d9STejun Heo } 2190