1fbf59bc9STejun Heo /* 288999a89STejun Heo * mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 79c015162SDennis Zhou (Facebook) * This file is released under the GPLv2 license. 8fbf59bc9STejun Heo * 99c015162SDennis Zhou (Facebook) * The percpu allocator handles both static and dynamic areas. Percpu 109c015162SDennis Zhou (Facebook) * areas are allocated in chunks which are divided into units. There is 119c015162SDennis Zhou (Facebook) * a 1-to-1 mapping for units to possible cpus. These units are grouped 129c015162SDennis Zhou (Facebook) * based on NUMA properties of the machine. 13fbf59bc9STejun Heo * 14fbf59bc9STejun Heo * c0 c1 c2 15fbf59bc9STejun Heo * ------------------- ------------------- ------------ 16fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 17fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 18fbf59bc9STejun Heo * 199c015162SDennis Zhou (Facebook) * Allocation is done by offsets into a unit's address space. Ie., an 209c015162SDennis Zhou (Facebook) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 219c015162SDennis Zhou (Facebook) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 229c015162SDennis Zhou (Facebook) * and even sparse. Access is handled by configuring percpu base 239c015162SDennis Zhou (Facebook) * registers according to the cpu to unit mappings and offsetting the 249c015162SDennis Zhou (Facebook) * base address using pcpu_unit_size. 25fbf59bc9STejun Heo * 269c015162SDennis Zhou (Facebook) * There is special consideration for the first chunk which must handle 279c015162SDennis Zhou (Facebook) * the static percpu variables in the kernel image as allocation services 289c015162SDennis Zhou (Facebook) * are not online yet. In short, the first chunk is structure like so: 299c015162SDennis Zhou (Facebook) * 309c015162SDennis Zhou (Facebook) * <Static | [Reserved] | Dynamic> 319c015162SDennis Zhou (Facebook) * 329c015162SDennis Zhou (Facebook) * The static data is copied from the original section managed by the 339c015162SDennis Zhou (Facebook) * linker. The reserved section, if non-zero, primarily manages static 349c015162SDennis Zhou (Facebook) * percpu variables from kernel modules. Finally, the dynamic section 359c015162SDennis Zhou (Facebook) * takes care of normal allocations. 36fbf59bc9STejun Heo * 37fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 38fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 39fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 40fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 41fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 42e1b9aa3fSChristoph Lameter * Chunks can be determined from the address using the index field 43e1b9aa3fSChristoph Lameter * in the page struct. The index field contains a pointer to the chunk. 44fbf59bc9STejun Heo * 459c015162SDennis Zhou (Facebook) * These chunks are organized into lists according to free_size and 469c015162SDennis Zhou (Facebook) * tries to allocate from the fullest chunk first. Each chunk maintains 479c015162SDennis Zhou (Facebook) * a maximum contiguous area size hint which is guaranteed to be equal 489c015162SDennis Zhou (Facebook) * to or larger than the maximum contiguous area in the chunk. This 499c015162SDennis Zhou (Facebook) * helps prevent the allocator from iterating over chunks unnecessarily. 509c015162SDennis Zhou (Facebook) * 514091fb95SMasahiro Yamada * To use this allocator, arch code should do the following: 52fbf59bc9STejun Heo * 53fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 54e0100983STejun Heo * regular address to percpu pointer and back if they need to be 55e0100983STejun Heo * different from the default 56fbf59bc9STejun Heo * 578d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 588d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 59fbf59bc9STejun Heo */ 60fbf59bc9STejun Heo 61870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 62870d4b12SJoe Perches 63fbf59bc9STejun Heo #include <linux/bitmap.h> 64fbf59bc9STejun Heo #include <linux/bootmem.h> 65fd1e8a1fSTejun Heo #include <linux/err.h> 66fbf59bc9STejun Heo #include <linux/list.h> 67a530b795STejun Heo #include <linux/log2.h> 68fbf59bc9STejun Heo #include <linux/mm.h> 69fbf59bc9STejun Heo #include <linux/module.h> 70fbf59bc9STejun Heo #include <linux/mutex.h> 71fbf59bc9STejun Heo #include <linux/percpu.h> 72fbf59bc9STejun Heo #include <linux/pfn.h> 73fbf59bc9STejun Heo #include <linux/slab.h> 74ccea34b5STejun Heo #include <linux/spinlock.h> 75fbf59bc9STejun Heo #include <linux/vmalloc.h> 76a56dbddfSTejun Heo #include <linux/workqueue.h> 77f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 78fbf59bc9STejun Heo 79fbf59bc9STejun Heo #include <asm/cacheflush.h> 80e0100983STejun Heo #include <asm/sections.h> 81fbf59bc9STejun Heo #include <asm/tlbflush.h> 823b034b0dSVivek Goyal #include <asm/io.h> 83fbf59bc9STejun Heo 84df95e795SDennis Zhou #define CREATE_TRACE_POINTS 85df95e795SDennis Zhou #include <trace/events/percpu.h> 86df95e795SDennis Zhou 878fa3ed80SDennis Zhou #include "percpu-internal.h" 888fa3ed80SDennis Zhou 89fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 90fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 919c824b6aSTejun Heo #define PCPU_ATOMIC_MAP_MARGIN_LOW 32 929c824b6aSTejun Heo #define PCPU_ATOMIC_MAP_MARGIN_HIGH 64 931a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2 941a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4 95fbf59bc9STejun Heo 96bbddff05STejun Heo #ifdef CONFIG_SMP 97e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 98e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 99e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 10043cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 10143cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 10243cf38ebSTejun Heo (unsigned long)__per_cpu_start) 103e0100983STejun Heo #endif 104e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 105e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 10643cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 10743cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 10843cf38ebSTejun Heo (unsigned long)__per_cpu_start) 109e0100983STejun Heo #endif 110bbddff05STejun Heo #else /* CONFIG_SMP */ 111bbddff05STejun Heo /* on UP, it's always identity mapped */ 112bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 113bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 114bbddff05STejun Heo #endif /* CONFIG_SMP */ 115e0100983STejun Heo 1161328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init; 1171328710bSDaniel Micay static int pcpu_unit_size __ro_after_init; 1181328710bSDaniel Micay static int pcpu_nr_units __ro_after_init; 1191328710bSDaniel Micay static int pcpu_atom_size __ro_after_init; 1208fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init; 1211328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init; 122fbf59bc9STejun Heo 123a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 1241328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init; 1251328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init; 1262f39e637STejun Heo 127fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 1281328710bSDaniel Micay void *pcpu_base_addr __ro_after_init; 129fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 130fbf59bc9STejun Heo 1311328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 1321328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 1332f39e637STejun Heo 1346563297cSTejun Heo /* group information, used for vm allocation */ 1351328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init; 1361328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init; 1371328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init; 1386563297cSTejun Heo 139ae9e6bc9STejun Heo /* 140ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 141ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 142ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 143ae9e6bc9STejun Heo */ 1448fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 145ae9e6bc9STejun Heo 146ae9e6bc9STejun Heo /* 147ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 148e2266705SDennis Zhou (Facebook) * chunk and serves it for reserved allocations. When the reserved 149e2266705SDennis Zhou (Facebook) * region doesn't exist, the following variable is NULL. 150ae9e6bc9STejun Heo */ 1518fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 152edcb4639STejun Heo 1538fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 1546710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 155fbf59bc9STejun Heo 1568fa3ed80SDennis Zhou struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */ 157fbf59bc9STejun Heo 1584f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */ 1594f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks); 1604f996e23STejun Heo 161b539b87fSTejun Heo /* 162b539b87fSTejun Heo * The number of empty populated pages, protected by pcpu_lock. The 163b539b87fSTejun Heo * reserved chunk doesn't contribute to the count. 164b539b87fSTejun Heo */ 1656b9b6f39SDennis Zhou (Facebook) int pcpu_nr_empty_pop_pages; 166b539b87fSTejun Heo 1671a4d7607STejun Heo /* 1681a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We 1691a4d7607STejun Heo * try to keep the number of populated free pages between 1701a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 1711a4d7607STejun Heo * empty chunk. 1721a4d7607STejun Heo */ 173fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 174fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 1751a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly; 1761a4d7607STejun Heo static bool pcpu_atomic_alloc_failed; 1771a4d7607STejun Heo 1781a4d7607STejun Heo static void pcpu_schedule_balance_work(void) 1791a4d7607STejun Heo { 1801a4d7607STejun Heo if (pcpu_async_enabled) 1811a4d7607STejun Heo schedule_work(&pcpu_balance_work); 1821a4d7607STejun Heo } 183a56dbddfSTejun Heo 184*c0ebfdc3SDennis Zhou (Facebook) /** 185*c0ebfdc3SDennis Zhou (Facebook) * pcpu_addr_in_first_chunk - address check for first chunk's dynamic region 186*c0ebfdc3SDennis Zhou (Facebook) * @addr: percpu address of interest 187*c0ebfdc3SDennis Zhou (Facebook) * 188*c0ebfdc3SDennis Zhou (Facebook) * The first chunk is considered to be the dynamic region of the first chunk. 189*c0ebfdc3SDennis Zhou (Facebook) * While the true first chunk is composed of the static, dynamic, and 190*c0ebfdc3SDennis Zhou (Facebook) * reserved regions, it is the chunk that serves the dynamic region that is 191*c0ebfdc3SDennis Zhou (Facebook) * circulated in the chunk slots. 192*c0ebfdc3SDennis Zhou (Facebook) * 193*c0ebfdc3SDennis Zhou (Facebook) * The reserved chunk has a separate check and the static region addresses 194*c0ebfdc3SDennis Zhou (Facebook) * should never be passed into the percpu allocator. 195*c0ebfdc3SDennis Zhou (Facebook) * 196*c0ebfdc3SDennis Zhou (Facebook) * RETURNS: 197*c0ebfdc3SDennis Zhou (Facebook) * True if the address is in the dynamic region of the first chunk. 198*c0ebfdc3SDennis Zhou (Facebook) */ 199020ec653STejun Heo static bool pcpu_addr_in_first_chunk(void *addr) 200020ec653STejun Heo { 201*c0ebfdc3SDennis Zhou (Facebook) void *start_addr = pcpu_first_chunk->base_addr + 202*c0ebfdc3SDennis Zhou (Facebook) pcpu_first_chunk->start_offset; 203*c0ebfdc3SDennis Zhou (Facebook) void *end_addr = pcpu_first_chunk->base_addr + 204*c0ebfdc3SDennis Zhou (Facebook) pcpu_first_chunk->nr_pages * PAGE_SIZE - 205*c0ebfdc3SDennis Zhou (Facebook) pcpu_first_chunk->end_offset; 206020ec653STejun Heo 207*c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr; 208020ec653STejun Heo } 209020ec653STejun Heo 210*c0ebfdc3SDennis Zhou (Facebook) /** 211*c0ebfdc3SDennis Zhou (Facebook) * pcpu_addr_in_reserved_chunk - address check for reserved region 212*c0ebfdc3SDennis Zhou (Facebook) * 213*c0ebfdc3SDennis Zhou (Facebook) * The reserved region is a part of the first chunk and primarily serves 214*c0ebfdc3SDennis Zhou (Facebook) * static percpu variables from kernel modules. 215*c0ebfdc3SDennis Zhou (Facebook) * 216*c0ebfdc3SDennis Zhou (Facebook) * RETURNS: 217*c0ebfdc3SDennis Zhou (Facebook) * True if the address is in the reserved region. 218*c0ebfdc3SDennis Zhou (Facebook) */ 219020ec653STejun Heo static bool pcpu_addr_in_reserved_chunk(void *addr) 220020ec653STejun Heo { 221*c0ebfdc3SDennis Zhou (Facebook) void *start_addr, *end_addr; 222020ec653STejun Heo 223*c0ebfdc3SDennis Zhou (Facebook) if (!pcpu_reserved_chunk) 224*c0ebfdc3SDennis Zhou (Facebook) return false; 225*c0ebfdc3SDennis Zhou (Facebook) 226*c0ebfdc3SDennis Zhou (Facebook) start_addr = pcpu_reserved_chunk->base_addr + 227*c0ebfdc3SDennis Zhou (Facebook) pcpu_reserved_chunk->start_offset; 228*c0ebfdc3SDennis Zhou (Facebook) end_addr = pcpu_reserved_chunk->base_addr + 229*c0ebfdc3SDennis Zhou (Facebook) pcpu_reserved_chunk->nr_pages * PAGE_SIZE - 230*c0ebfdc3SDennis Zhou (Facebook) pcpu_reserved_chunk->end_offset; 231*c0ebfdc3SDennis Zhou (Facebook) 232*c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr; 233020ec653STejun Heo } 234020ec653STejun Heo 235d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 236fbf59bc9STejun Heo { 237cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 238fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 239fbf59bc9STejun Heo } 240fbf59bc9STejun Heo 241d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 242d9b55eebSTejun Heo { 243d9b55eebSTejun Heo if (size == pcpu_unit_size) 244d9b55eebSTejun Heo return pcpu_nr_slots - 1; 245d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 246d9b55eebSTejun Heo } 247d9b55eebSTejun Heo 248fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 249fbf59bc9STejun Heo { 250fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 251fbf59bc9STejun Heo return 0; 252fbf59bc9STejun Heo 253fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 254fbf59bc9STejun Heo } 255fbf59bc9STejun Heo 25688999a89STejun Heo /* set the pointer to a chunk in a page struct */ 25788999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 25888999a89STejun Heo { 25988999a89STejun Heo page->index = (unsigned long)pcpu; 26088999a89STejun Heo } 26188999a89STejun Heo 26288999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 26388999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 26488999a89STejun Heo { 26588999a89STejun Heo return (struct pcpu_chunk *)page->index; 26688999a89STejun Heo } 26788999a89STejun Heo 26888999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 269fbf59bc9STejun Heo { 2702f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 271fbf59bc9STejun Heo } 272fbf59bc9STejun Heo 273*c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 274*c0ebfdc3SDennis Zhou (Facebook) { 275*c0ebfdc3SDennis Zhou (Facebook) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 276*c0ebfdc3SDennis Zhou (Facebook) } 277*c0ebfdc3SDennis Zhou (Facebook) 2789983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 279fbf59bc9STejun Heo unsigned int cpu, int page_idx) 280fbf59bc9STejun Heo { 281*c0ebfdc3SDennis Zhou (Facebook) return (unsigned long)chunk->base_addr + 282*c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(cpu, page_idx); 283fbf59bc9STejun Heo } 284fbf59bc9STejun Heo 28588999a89STejun Heo static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, 28688999a89STejun Heo int *rs, int *re, int end) 287ce3141a2STejun Heo { 288ce3141a2STejun Heo *rs = find_next_zero_bit(chunk->populated, end, *rs); 289ce3141a2STejun Heo *re = find_next_bit(chunk->populated, end, *rs + 1); 290ce3141a2STejun Heo } 291ce3141a2STejun Heo 29288999a89STejun Heo static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, 29388999a89STejun Heo int *rs, int *re, int end) 294ce3141a2STejun Heo { 295ce3141a2STejun Heo *rs = find_next_bit(chunk->populated, end, *rs); 296ce3141a2STejun Heo *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 297ce3141a2STejun Heo } 298ce3141a2STejun Heo 299ce3141a2STejun Heo /* 300ce3141a2STejun Heo * (Un)populated page region iterators. Iterate over (un)populated 301b595076aSUwe Kleine-König * page regions between @start and @end in @chunk. @rs and @re should 302ce3141a2STejun Heo * be integer variables and will be set to start and end page index of 303ce3141a2STejun Heo * the current region. 304ce3141a2STejun Heo */ 305ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 306ce3141a2STejun Heo for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 307ce3141a2STejun Heo (rs) < (re); \ 308ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 309ce3141a2STejun Heo 310ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 311ce3141a2STejun Heo for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 312ce3141a2STejun Heo (rs) < (re); \ 313ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 314ce3141a2STejun Heo 315fbf59bc9STejun Heo /** 31690459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 3171880d93bSTejun Heo * @size: bytes to allocate 318fbf59bc9STejun Heo * 3191880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 32090459ce0SBob Liu * kzalloc() is used; otherwise, vzalloc() is used. The returned 3211880d93bSTejun Heo * memory is always zeroed. 322fbf59bc9STejun Heo * 323ccea34b5STejun Heo * CONTEXT: 324ccea34b5STejun Heo * Does GFP_KERNEL allocation. 325ccea34b5STejun Heo * 326fbf59bc9STejun Heo * RETURNS: 3271880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 328fbf59bc9STejun Heo */ 32990459ce0SBob Liu static void *pcpu_mem_zalloc(size_t size) 330fbf59bc9STejun Heo { 331099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 332099a19d9STejun Heo return NULL; 333099a19d9STejun Heo 334fbf59bc9STejun Heo if (size <= PAGE_SIZE) 3351880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 3367af4c093SJesper Juhl else 3377af4c093SJesper Juhl return vzalloc(size); 3381880d93bSTejun Heo } 339fbf59bc9STejun Heo 3401880d93bSTejun Heo /** 3411880d93bSTejun Heo * pcpu_mem_free - free memory 3421880d93bSTejun Heo * @ptr: memory to free 3431880d93bSTejun Heo * 34490459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 3451880d93bSTejun Heo */ 3461d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr) 3471880d93bSTejun Heo { 3481d5cfdb0STetsuo Handa kvfree(ptr); 349fbf59bc9STejun Heo } 350fbf59bc9STejun Heo 351fbf59bc9STejun Heo /** 352b539b87fSTejun Heo * pcpu_count_occupied_pages - count the number of pages an area occupies 353b539b87fSTejun Heo * @chunk: chunk of interest 354b539b87fSTejun Heo * @i: index of the area in question 355b539b87fSTejun Heo * 356b539b87fSTejun Heo * Count the number of pages chunk's @i'th area occupies. When the area's 357b539b87fSTejun Heo * start and/or end address isn't aligned to page boundary, the straddled 358b539b87fSTejun Heo * page is included in the count iff the rest of the page is free. 359b539b87fSTejun Heo */ 360b539b87fSTejun Heo static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i) 361b539b87fSTejun Heo { 362b539b87fSTejun Heo int off = chunk->map[i] & ~1; 363b539b87fSTejun Heo int end = chunk->map[i + 1] & ~1; 364b539b87fSTejun Heo 365b539b87fSTejun Heo if (!PAGE_ALIGNED(off) && i > 0) { 366b539b87fSTejun Heo int prev = chunk->map[i - 1]; 367b539b87fSTejun Heo 368b539b87fSTejun Heo if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE)) 369b539b87fSTejun Heo off = round_down(off, PAGE_SIZE); 370b539b87fSTejun Heo } 371b539b87fSTejun Heo 372b539b87fSTejun Heo if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) { 373b539b87fSTejun Heo int next = chunk->map[i + 1]; 374b539b87fSTejun Heo int nend = chunk->map[i + 2] & ~1; 375b539b87fSTejun Heo 376b539b87fSTejun Heo if (!(next & 1) && nend >= round_up(end, PAGE_SIZE)) 377b539b87fSTejun Heo end = round_up(end, PAGE_SIZE); 378b539b87fSTejun Heo } 379b539b87fSTejun Heo 380b539b87fSTejun Heo return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0); 381b539b87fSTejun Heo } 382b539b87fSTejun Heo 383b539b87fSTejun Heo /** 384fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 385fbf59bc9STejun Heo * @chunk: chunk of interest 386fbf59bc9STejun Heo * @oslot: the previous slot it was on 387fbf59bc9STejun Heo * 388fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 389fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 390edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 391edcb4639STejun Heo * chunk slots. 392ccea34b5STejun Heo * 393ccea34b5STejun Heo * CONTEXT: 394ccea34b5STejun Heo * pcpu_lock. 395fbf59bc9STejun Heo */ 396fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 397fbf59bc9STejun Heo { 398fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 399fbf59bc9STejun Heo 400edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 401fbf59bc9STejun Heo if (oslot < nslot) 402fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 403fbf59bc9STejun Heo else 404fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 405fbf59bc9STejun Heo } 406fbf59bc9STejun Heo } 407fbf59bc9STejun Heo 408fbf59bc9STejun Heo /** 409833af842STejun Heo * pcpu_need_to_extend - determine whether chunk area map needs to be extended 410833af842STejun Heo * @chunk: chunk of interest 4119c824b6aSTejun Heo * @is_atomic: the allocation context 4129f7dcf22STejun Heo * 4139c824b6aSTejun Heo * Determine whether area map of @chunk needs to be extended. If 4149c824b6aSTejun Heo * @is_atomic, only the amount necessary for a new allocation is 4159c824b6aSTejun Heo * considered; however, async extension is scheduled if the left amount is 4169c824b6aSTejun Heo * low. If !@is_atomic, it aims for more empty space. Combined, this 4179c824b6aSTejun Heo * ensures that the map is likely to have enough available space to 4189c824b6aSTejun Heo * accomodate atomic allocations which can't extend maps directly. 4199f7dcf22STejun Heo * 420ccea34b5STejun Heo * CONTEXT: 421833af842STejun Heo * pcpu_lock. 422ccea34b5STejun Heo * 4239f7dcf22STejun Heo * RETURNS: 424833af842STejun Heo * New target map allocation length if extension is necessary, 0 425833af842STejun Heo * otherwise. 4269f7dcf22STejun Heo */ 4279c824b6aSTejun Heo static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) 4289f7dcf22STejun Heo { 4299c824b6aSTejun Heo int margin, new_alloc; 4309f7dcf22STejun Heo 4314f996e23STejun Heo lockdep_assert_held(&pcpu_lock); 4324f996e23STejun Heo 4339c824b6aSTejun Heo if (is_atomic) { 4349c824b6aSTejun Heo margin = 3; 4359c824b6aSTejun Heo 4369c824b6aSTejun Heo if (chunk->map_alloc < 4374f996e23STejun Heo chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) { 4384f996e23STejun Heo if (list_empty(&chunk->map_extend_list)) { 4394f996e23STejun Heo list_add_tail(&chunk->map_extend_list, 4404f996e23STejun Heo &pcpu_map_extend_chunks); 4414f996e23STejun Heo pcpu_schedule_balance_work(); 4424f996e23STejun Heo } 4434f996e23STejun Heo } 4449c824b6aSTejun Heo } else { 4459c824b6aSTejun Heo margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; 4469c824b6aSTejun Heo } 4479c824b6aSTejun Heo 4489c824b6aSTejun Heo if (chunk->map_alloc >= chunk->map_used + margin) 4499f7dcf22STejun Heo return 0; 4509f7dcf22STejun Heo 4519f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 4529c824b6aSTejun Heo while (new_alloc < chunk->map_used + margin) 4539f7dcf22STejun Heo new_alloc *= 2; 4549f7dcf22STejun Heo 455833af842STejun Heo return new_alloc; 456ccea34b5STejun Heo } 457ccea34b5STejun Heo 458833af842STejun Heo /** 459833af842STejun Heo * pcpu_extend_area_map - extend area map of a chunk 460833af842STejun Heo * @chunk: chunk of interest 461833af842STejun Heo * @new_alloc: new target allocation length of the area map 462833af842STejun Heo * 463833af842STejun Heo * Extend area map of @chunk to have @new_alloc entries. 464833af842STejun Heo * 465833af842STejun Heo * CONTEXT: 466833af842STejun Heo * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 467833af842STejun Heo * 468833af842STejun Heo * RETURNS: 469833af842STejun Heo * 0 on success, -errno on failure. 470ccea34b5STejun Heo */ 471833af842STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 472833af842STejun Heo { 473833af842STejun Heo int *old = NULL, *new = NULL; 474833af842STejun Heo size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 475833af842STejun Heo unsigned long flags; 4769f7dcf22STejun Heo 4776710e594STejun Heo lockdep_assert_held(&pcpu_alloc_mutex); 4786710e594STejun Heo 47990459ce0SBob Liu new = pcpu_mem_zalloc(new_size); 480833af842STejun Heo if (!new) 481833af842STejun Heo return -ENOMEM; 482833af842STejun Heo 483833af842STejun Heo /* acquire pcpu_lock and switch to new area map */ 484833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 485833af842STejun Heo 486833af842STejun Heo if (new_alloc <= chunk->map_alloc) 487833af842STejun Heo goto out_unlock; 488833af842STejun Heo 489833af842STejun Heo old_size = chunk->map_alloc * sizeof(chunk->map[0]); 490a002d148SHuang Shijie old = chunk->map; 491a002d148SHuang Shijie 492a002d148SHuang Shijie memcpy(new, old, old_size); 4939f7dcf22STejun Heo 4949f7dcf22STejun Heo chunk->map_alloc = new_alloc; 4959f7dcf22STejun Heo chunk->map = new; 496833af842STejun Heo new = NULL; 497833af842STejun Heo 498833af842STejun Heo out_unlock: 499833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 500833af842STejun Heo 501833af842STejun Heo /* 502833af842STejun Heo * pcpu_mem_free() might end up calling vfree() which uses 503833af842STejun Heo * IRQ-unsafe lock and thus can't be called under pcpu_lock. 504833af842STejun Heo */ 5051d5cfdb0STetsuo Handa pcpu_mem_free(old); 5061d5cfdb0STetsuo Handa pcpu_mem_free(new); 507833af842STejun Heo 5089f7dcf22STejun Heo return 0; 5099f7dcf22STejun Heo } 5109f7dcf22STejun Heo 5119f7dcf22STejun Heo /** 512a16037c8STejun Heo * pcpu_fit_in_area - try to fit the requested allocation in a candidate area 513a16037c8STejun Heo * @chunk: chunk the candidate area belongs to 514a16037c8STejun Heo * @off: the offset to the start of the candidate area 515a16037c8STejun Heo * @this_size: the size of the candidate area 516a16037c8STejun Heo * @size: the size of the target allocation 517a16037c8STejun Heo * @align: the alignment of the target allocation 518a16037c8STejun Heo * @pop_only: only allocate from already populated region 519a16037c8STejun Heo * 520a16037c8STejun Heo * We're trying to allocate @size bytes aligned at @align. @chunk's area 521a16037c8STejun Heo * at @off sized @this_size is a candidate. This function determines 522a16037c8STejun Heo * whether the target allocation fits in the candidate area and returns the 523a16037c8STejun Heo * number of bytes to pad after @off. If the target area doesn't fit, -1 524a16037c8STejun Heo * is returned. 525a16037c8STejun Heo * 526a16037c8STejun Heo * If @pop_only is %true, this function only considers the already 527a16037c8STejun Heo * populated part of the candidate area. 528a16037c8STejun Heo */ 529a16037c8STejun Heo static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, 530a16037c8STejun Heo int size, int align, bool pop_only) 531a16037c8STejun Heo { 532a16037c8STejun Heo int cand_off = off; 533a16037c8STejun Heo 534a16037c8STejun Heo while (true) { 535a16037c8STejun Heo int head = ALIGN(cand_off, align) - off; 536a16037c8STejun Heo int page_start, page_end, rs, re; 537a16037c8STejun Heo 538a16037c8STejun Heo if (this_size < head + size) 539a16037c8STejun Heo return -1; 540a16037c8STejun Heo 541a16037c8STejun Heo if (!pop_only) 542a16037c8STejun Heo return head; 543a16037c8STejun Heo 544a16037c8STejun Heo /* 545a16037c8STejun Heo * If the first unpopulated page is beyond the end of the 546a16037c8STejun Heo * allocation, the whole allocation is populated; 547a16037c8STejun Heo * otherwise, retry from the end of the unpopulated area. 548a16037c8STejun Heo */ 549a16037c8STejun Heo page_start = PFN_DOWN(head + off); 550a16037c8STejun Heo page_end = PFN_UP(head + off + size); 551a16037c8STejun Heo 552a16037c8STejun Heo rs = page_start; 553a16037c8STejun Heo pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); 554a16037c8STejun Heo if (rs >= page_end) 555a16037c8STejun Heo return head; 556a16037c8STejun Heo cand_off = re * PAGE_SIZE; 557a16037c8STejun Heo } 558a16037c8STejun Heo } 559a16037c8STejun Heo 560a16037c8STejun Heo /** 561fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 562fbf59bc9STejun Heo * @chunk: chunk of interest 563cae3aeb8STejun Heo * @size: wanted size in bytes 564fbf59bc9STejun Heo * @align: wanted align 565a16037c8STejun Heo * @pop_only: allocate only from the populated area 566b539b87fSTejun Heo * @occ_pages_p: out param for the number of pages the area occupies 567fbf59bc9STejun Heo * 568fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 569fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 570fbf59bc9STejun Heo * populate or map the area. 571fbf59bc9STejun Heo * 5729f7dcf22STejun Heo * @chunk->map must have at least two free slots. 5739f7dcf22STejun Heo * 574ccea34b5STejun Heo * CONTEXT: 575ccea34b5STejun Heo * pcpu_lock. 576ccea34b5STejun Heo * 577fbf59bc9STejun Heo * RETURNS: 5789f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 5799f7dcf22STejun Heo * found. 580fbf59bc9STejun Heo */ 581a16037c8STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, 582b539b87fSTejun Heo bool pop_only, int *occ_pages_p) 583fbf59bc9STejun Heo { 584fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 585fbf59bc9STejun Heo int max_contig = 0; 586fbf59bc9STejun Heo int i, off; 5873d331ad7SAl Viro bool seen_free = false; 588723ad1d9SAl Viro int *p; 589fbf59bc9STejun Heo 5903d331ad7SAl Viro for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { 591fbf59bc9STejun Heo int head, tail; 592723ad1d9SAl Viro int this_size; 593723ad1d9SAl Viro 594723ad1d9SAl Viro off = *p; 595723ad1d9SAl Viro if (off & 1) 596723ad1d9SAl Viro continue; 597fbf59bc9STejun Heo 598723ad1d9SAl Viro this_size = (p[1] & ~1) - off; 599a16037c8STejun Heo 600a16037c8STejun Heo head = pcpu_fit_in_area(chunk, off, this_size, size, align, 601a16037c8STejun Heo pop_only); 602a16037c8STejun Heo if (head < 0) { 6033d331ad7SAl Viro if (!seen_free) { 6043d331ad7SAl Viro chunk->first_free = i; 6053d331ad7SAl Viro seen_free = true; 6063d331ad7SAl Viro } 607723ad1d9SAl Viro max_contig = max(this_size, max_contig); 608fbf59bc9STejun Heo continue; 609fbf59bc9STejun Heo } 610fbf59bc9STejun Heo 611fbf59bc9STejun Heo /* 612fbf59bc9STejun Heo * If head is small or the previous block is free, 613fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 614fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 615fbf59bc9STejun Heo * uncommon for percpu allocations. 616fbf59bc9STejun Heo */ 617723ad1d9SAl Viro if (head && (head < sizeof(int) || !(p[-1] & 1))) { 61821ddfd38SJianyu Zhan *p = off += head; 619723ad1d9SAl Viro if (p[-1] & 1) 620fbf59bc9STejun Heo chunk->free_size -= head; 62121ddfd38SJianyu Zhan else 62221ddfd38SJianyu Zhan max_contig = max(*p - p[-1], max_contig); 623723ad1d9SAl Viro this_size -= head; 624fbf59bc9STejun Heo head = 0; 625fbf59bc9STejun Heo } 626fbf59bc9STejun Heo 627fbf59bc9STejun Heo /* if tail is small, just keep it around */ 628723ad1d9SAl Viro tail = this_size - head - size; 629723ad1d9SAl Viro if (tail < sizeof(int)) { 630fbf59bc9STejun Heo tail = 0; 631723ad1d9SAl Viro size = this_size - head; 632723ad1d9SAl Viro } 633fbf59bc9STejun Heo 634fbf59bc9STejun Heo /* split if warranted */ 635fbf59bc9STejun Heo if (head || tail) { 636706c16f2SAl Viro int nr_extra = !!head + !!tail; 637706c16f2SAl Viro 638706c16f2SAl Viro /* insert new subblocks */ 639723ad1d9SAl Viro memmove(p + nr_extra + 1, p + 1, 640706c16f2SAl Viro sizeof(chunk->map[0]) * (chunk->map_used - i)); 641706c16f2SAl Viro chunk->map_used += nr_extra; 642706c16f2SAl Viro 643fbf59bc9STejun Heo if (head) { 6443d331ad7SAl Viro if (!seen_free) { 6453d331ad7SAl Viro chunk->first_free = i; 6463d331ad7SAl Viro seen_free = true; 6473d331ad7SAl Viro } 648723ad1d9SAl Viro *++p = off += head; 649723ad1d9SAl Viro ++i; 650706c16f2SAl Viro max_contig = max(head, max_contig); 651fbf59bc9STejun Heo } 652706c16f2SAl Viro if (tail) { 653723ad1d9SAl Viro p[1] = off + size; 654706c16f2SAl Viro max_contig = max(tail, max_contig); 655706c16f2SAl Viro } 656fbf59bc9STejun Heo } 657fbf59bc9STejun Heo 6583d331ad7SAl Viro if (!seen_free) 6593d331ad7SAl Viro chunk->first_free = i + 1; 6603d331ad7SAl Viro 661fbf59bc9STejun Heo /* update hint and mark allocated */ 662723ad1d9SAl Viro if (i + 1 == chunk->map_used) 663fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 664fbf59bc9STejun Heo else 665fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 666fbf59bc9STejun Heo max_contig); 667fbf59bc9STejun Heo 668723ad1d9SAl Viro chunk->free_size -= size; 669723ad1d9SAl Viro *p |= 1; 670fbf59bc9STejun Heo 671b539b87fSTejun Heo *occ_pages_p = pcpu_count_occupied_pages(chunk, i); 672fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 673fbf59bc9STejun Heo return off; 674fbf59bc9STejun Heo } 675fbf59bc9STejun Heo 676fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 677fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 678fbf59bc9STejun Heo 6799f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 6809f7dcf22STejun Heo return -1; 681fbf59bc9STejun Heo } 682fbf59bc9STejun Heo 683fbf59bc9STejun Heo /** 684fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 685fbf59bc9STejun Heo * @chunk: chunk of interest 686fbf59bc9STejun Heo * @freeme: offset of area to free 687b539b87fSTejun Heo * @occ_pages_p: out param for the number of pages the area occupies 688fbf59bc9STejun Heo * 689fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 690fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 691fbf59bc9STejun Heo * the area. 692ccea34b5STejun Heo * 693ccea34b5STejun Heo * CONTEXT: 694ccea34b5STejun Heo * pcpu_lock. 695fbf59bc9STejun Heo */ 696b539b87fSTejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme, 697b539b87fSTejun Heo int *occ_pages_p) 698fbf59bc9STejun Heo { 699fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 700723ad1d9SAl Viro int off = 0; 701723ad1d9SAl Viro unsigned i, j; 702723ad1d9SAl Viro int to_free = 0; 703723ad1d9SAl Viro int *p; 704fbf59bc9STejun Heo 7055ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock); 70630a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk); 7075ccd30e4SDennis Zhou 708723ad1d9SAl Viro freeme |= 1; /* we are searching for <given offset, in use> pair */ 709723ad1d9SAl Viro 710723ad1d9SAl Viro i = 0; 711723ad1d9SAl Viro j = chunk->map_used; 712723ad1d9SAl Viro while (i != j) { 713723ad1d9SAl Viro unsigned k = (i + j) / 2; 714723ad1d9SAl Viro off = chunk->map[k]; 715723ad1d9SAl Viro if (off < freeme) 716723ad1d9SAl Viro i = k + 1; 717723ad1d9SAl Viro else if (off > freeme) 718723ad1d9SAl Viro j = k; 719723ad1d9SAl Viro else 720723ad1d9SAl Viro i = j = k; 721723ad1d9SAl Viro } 722fbf59bc9STejun Heo BUG_ON(off != freeme); 723fbf59bc9STejun Heo 7243d331ad7SAl Viro if (i < chunk->first_free) 7253d331ad7SAl Viro chunk->first_free = i; 7263d331ad7SAl Viro 727723ad1d9SAl Viro p = chunk->map + i; 728723ad1d9SAl Viro *p = off &= ~1; 729723ad1d9SAl Viro chunk->free_size += (p[1] & ~1) - off; 730fbf59bc9STejun Heo 731b539b87fSTejun Heo *occ_pages_p = pcpu_count_occupied_pages(chunk, i); 732b539b87fSTejun Heo 733fbf59bc9STejun Heo /* merge with next? */ 734723ad1d9SAl Viro if (!(p[1] & 1)) 735723ad1d9SAl Viro to_free++; 736723ad1d9SAl Viro /* merge with previous? */ 737723ad1d9SAl Viro if (i > 0 && !(p[-1] & 1)) { 738723ad1d9SAl Viro to_free++; 739723ad1d9SAl Viro i--; 740723ad1d9SAl Viro p--; 741723ad1d9SAl Viro } 742723ad1d9SAl Viro if (to_free) { 743723ad1d9SAl Viro chunk->map_used -= to_free; 744723ad1d9SAl Viro memmove(p + 1, p + 1 + to_free, 745723ad1d9SAl Viro (chunk->map_used - i) * sizeof(chunk->map[0])); 746fbf59bc9STejun Heo } 747fbf59bc9STejun Heo 748723ad1d9SAl Viro chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); 749fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 750fbf59bc9STejun Heo } 751fbf59bc9STejun Heo 752*c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 75310edf5b0SDennis Zhou (Facebook) int map_size, 75410edf5b0SDennis Zhou (Facebook) int *map, 75510edf5b0SDennis Zhou (Facebook) int init_map_size) 75610edf5b0SDennis Zhou (Facebook) { 75710edf5b0SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 758*c0ebfdc3SDennis Zhou (Facebook) unsigned long aligned_addr; 759*c0ebfdc3SDennis Zhou (Facebook) int start_offset, region_size; 760*c0ebfdc3SDennis Zhou (Facebook) 761*c0ebfdc3SDennis Zhou (Facebook) /* region calculations */ 762*c0ebfdc3SDennis Zhou (Facebook) aligned_addr = tmp_addr & PAGE_MASK; 763*c0ebfdc3SDennis Zhou (Facebook) 764*c0ebfdc3SDennis Zhou (Facebook) start_offset = tmp_addr - aligned_addr; 7656b9d7c8eSDennis Zhou (Facebook) 7666b9d7c8eSDennis Zhou (Facebook) region_size = PFN_ALIGN(start_offset + map_size); 76710edf5b0SDennis Zhou (Facebook) 768*c0ebfdc3SDennis Zhou (Facebook) /* allocate chunk */ 76910edf5b0SDennis Zhou (Facebook) chunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 770*c0ebfdc3SDennis Zhou (Facebook) 77110edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->list); 77210edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->map_extend_list); 773*c0ebfdc3SDennis Zhou (Facebook) 774*c0ebfdc3SDennis Zhou (Facebook) chunk->base_addr = (void *)aligned_addr; 77510edf5b0SDennis Zhou (Facebook) chunk->start_offset = start_offset; 7766b9d7c8eSDennis Zhou (Facebook) chunk->end_offset = region_size - chunk->start_offset - map_size; 777*c0ebfdc3SDennis Zhou (Facebook) 778*c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages; 779*c0ebfdc3SDennis Zhou (Facebook) 78010edf5b0SDennis Zhou (Facebook) chunk->map = map; 78110edf5b0SDennis Zhou (Facebook) chunk->map_alloc = init_map_size; 78210edf5b0SDennis Zhou (Facebook) 78310edf5b0SDennis Zhou (Facebook) /* manage populated page bitmap */ 78410edf5b0SDennis Zhou (Facebook) chunk->immutable = true; 78510edf5b0SDennis Zhou (Facebook) bitmap_fill(chunk->populated, pcpu_unit_pages); 78610edf5b0SDennis Zhou (Facebook) chunk->nr_populated = pcpu_unit_pages; 78710edf5b0SDennis Zhou (Facebook) 78810edf5b0SDennis Zhou (Facebook) chunk->contig_hint = chunk->free_size = map_size; 789*c0ebfdc3SDennis Zhou (Facebook) 790*c0ebfdc3SDennis Zhou (Facebook) if (chunk->start_offset) { 791*c0ebfdc3SDennis Zhou (Facebook) /* hide the beginning of the bitmap */ 79210edf5b0SDennis Zhou (Facebook) chunk->map[0] = 1; 79310edf5b0SDennis Zhou (Facebook) chunk->map[1] = chunk->start_offset; 794*c0ebfdc3SDennis Zhou (Facebook) chunk->map_used = 1; 795*c0ebfdc3SDennis Zhou (Facebook) } 796*c0ebfdc3SDennis Zhou (Facebook) 797*c0ebfdc3SDennis Zhou (Facebook) /* set chunk's free region */ 798*c0ebfdc3SDennis Zhou (Facebook) chunk->map[++chunk->map_used] = 799*c0ebfdc3SDennis Zhou (Facebook) (chunk->start_offset + chunk->free_size) | 1; 80010edf5b0SDennis Zhou (Facebook) 8016b9d7c8eSDennis Zhou (Facebook) if (chunk->end_offset) { 8026b9d7c8eSDennis Zhou (Facebook) /* hide the end of the bitmap */ 8036b9d7c8eSDennis Zhou (Facebook) chunk->map[++chunk->map_used] = region_size | 1; 8046b9d7c8eSDennis Zhou (Facebook) } 8056b9d7c8eSDennis Zhou (Facebook) 80610edf5b0SDennis Zhou (Facebook) return chunk; 80710edf5b0SDennis Zhou (Facebook) } 80810edf5b0SDennis Zhou (Facebook) 8096081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void) 8106081089fSTejun Heo { 8116081089fSTejun Heo struct pcpu_chunk *chunk; 8126081089fSTejun Heo 81390459ce0SBob Liu chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); 8146081089fSTejun Heo if (!chunk) 8156081089fSTejun Heo return NULL; 8166081089fSTejun Heo 81790459ce0SBob Liu chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * 81890459ce0SBob Liu sizeof(chunk->map[0])); 8196081089fSTejun Heo if (!chunk->map) { 8201d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 8216081089fSTejun Heo return NULL; 8226081089fSTejun Heo } 8236081089fSTejun Heo 8246081089fSTejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 825723ad1d9SAl Viro chunk->map[0] = 0; 826723ad1d9SAl Viro chunk->map[1] = pcpu_unit_size | 1; 827723ad1d9SAl Viro chunk->map_used = 1; 8286081089fSTejun Heo 8296081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 8304f996e23STejun Heo INIT_LIST_HEAD(&chunk->map_extend_list); 8316081089fSTejun Heo chunk->free_size = pcpu_unit_size; 8326081089fSTejun Heo chunk->contig_hint = pcpu_unit_size; 8336081089fSTejun Heo 834*c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages; 835*c0ebfdc3SDennis Zhou (Facebook) 8366081089fSTejun Heo return chunk; 8376081089fSTejun Heo } 8386081089fSTejun Heo 8396081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 8406081089fSTejun Heo { 8416081089fSTejun Heo if (!chunk) 8426081089fSTejun Heo return; 8431d5cfdb0STetsuo Handa pcpu_mem_free(chunk->map); 8441d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 8456081089fSTejun Heo } 8466081089fSTejun Heo 847b539b87fSTejun Heo /** 848b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 849b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 850b539b87fSTejun Heo * @page_start: the start page 851b539b87fSTejun Heo * @page_end: the end page 852b539b87fSTejun Heo * 853b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 854b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 855b539b87fSTejun Heo * successful population. 856b539b87fSTejun Heo */ 857b539b87fSTejun Heo static void pcpu_chunk_populated(struct pcpu_chunk *chunk, 858b539b87fSTejun Heo int page_start, int page_end) 859b539b87fSTejun Heo { 860b539b87fSTejun Heo int nr = page_end - page_start; 861b539b87fSTejun Heo 862b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 863b539b87fSTejun Heo 864b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 865b539b87fSTejun Heo chunk->nr_populated += nr; 866b539b87fSTejun Heo pcpu_nr_empty_pop_pages += nr; 867b539b87fSTejun Heo } 868b539b87fSTejun Heo 869b539b87fSTejun Heo /** 870b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 871b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 872b539b87fSTejun Heo * @page_start: the start page 873b539b87fSTejun Heo * @page_end: the end page 874b539b87fSTejun Heo * 875b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 876b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 877b539b87fSTejun Heo * each successful depopulation. 878b539b87fSTejun Heo */ 879b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 880b539b87fSTejun Heo int page_start, int page_end) 881b539b87fSTejun Heo { 882b539b87fSTejun Heo int nr = page_end - page_start; 883b539b87fSTejun Heo 884b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 885b539b87fSTejun Heo 886b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 887b539b87fSTejun Heo chunk->nr_populated -= nr; 888b539b87fSTejun Heo pcpu_nr_empty_pop_pages -= nr; 889b539b87fSTejun Heo } 890b539b87fSTejun Heo 891fbf59bc9STejun Heo /* 8929f645532STejun Heo * Chunk management implementation. 893fbf59bc9STejun Heo * 8949f645532STejun Heo * To allow different implementations, chunk alloc/free and 8959f645532STejun Heo * [de]population are implemented in a separate file which is pulled 8969f645532STejun Heo * into this file and compiled together. The following functions 8979f645532STejun Heo * should be implemented. 898ccea34b5STejun Heo * 8999f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 9009f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 9019f645532STejun Heo * pcpu_create_chunk - create a new chunk 9029f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 9039f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 9049f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 905fbf59bc9STejun Heo */ 9069f645532STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); 9079f645532STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); 9089f645532STejun Heo static struct pcpu_chunk *pcpu_create_chunk(void); 9099f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 9109f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 9119f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 912fbf59bc9STejun Heo 913b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 914b0c9778bSTejun Heo #include "percpu-km.c" 915b0c9778bSTejun Heo #else 9169f645532STejun Heo #include "percpu-vm.c" 917b0c9778bSTejun Heo #endif 918fbf59bc9STejun Heo 919fbf59bc9STejun Heo /** 92088999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 92188999a89STejun Heo * @addr: address for which the chunk needs to be determined. 92288999a89STejun Heo * 923*c0ebfdc3SDennis Zhou (Facebook) * This is an internal function that handles all but static allocations. 924*c0ebfdc3SDennis Zhou (Facebook) * Static percpu address values should never be passed into the allocator. 925*c0ebfdc3SDennis Zhou (Facebook) * 92688999a89STejun Heo * RETURNS: 92788999a89STejun Heo * The address of the found chunk. 92888999a89STejun Heo */ 92988999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 93088999a89STejun Heo { 931*c0ebfdc3SDennis Zhou (Facebook) /* is it in the dynamic region (first chunk)? */ 932*c0ebfdc3SDennis Zhou (Facebook) if (pcpu_addr_in_first_chunk(addr)) 933*c0ebfdc3SDennis Zhou (Facebook) return pcpu_first_chunk; 934*c0ebfdc3SDennis Zhou (Facebook) 935*c0ebfdc3SDennis Zhou (Facebook) /* is it in the reserved region? */ 93688999a89STejun Heo if (pcpu_addr_in_reserved_chunk(addr)) 93788999a89STejun Heo return pcpu_reserved_chunk; 93888999a89STejun Heo 93988999a89STejun Heo /* 94088999a89STejun Heo * The address is relative to unit0 which might be unused and 94188999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 94288999a89STejun Heo * current processor before looking it up in the vmalloc 94388999a89STejun Heo * space. Note that any possible cpu id can be used here, so 94488999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 94588999a89STejun Heo */ 94688999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 9479f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 94888999a89STejun Heo } 94988999a89STejun Heo 95088999a89STejun Heo /** 951edcb4639STejun Heo * pcpu_alloc - the percpu allocator 952cae3aeb8STejun Heo * @size: size of area to allocate in bytes 953fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 954edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 9555835d96eSTejun Heo * @gfp: allocation flags 956fbf59bc9STejun Heo * 9575835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 9585835d96eSTejun Heo * contain %GFP_KERNEL, the allocation is atomic. 959fbf59bc9STejun Heo * 960fbf59bc9STejun Heo * RETURNS: 961fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 962fbf59bc9STejun Heo */ 9635835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 9645835d96eSTejun Heo gfp_t gfp) 965fbf59bc9STejun Heo { 966f2badb0cSTejun Heo static int warn_limit = 10; 967fbf59bc9STejun Heo struct pcpu_chunk *chunk; 968f2badb0cSTejun Heo const char *err; 9696ae833c7STejun Heo bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 970b539b87fSTejun Heo int occ_pages = 0; 971b38d08f3STejun Heo int slot, off, new_alloc, cpu, ret; 972403a91b1SJiri Kosina unsigned long flags; 973f528f0b8SCatalin Marinas void __percpu *ptr; 974fbf59bc9STejun Heo 975723ad1d9SAl Viro /* 976723ad1d9SAl Viro * We want the lowest bit of offset available for in-use/free 9772f69fa82SViro * indicator, so force >= 16bit alignment and make size even. 978723ad1d9SAl Viro */ 979723ad1d9SAl Viro if (unlikely(align < 2)) 980723ad1d9SAl Viro align = 2; 981723ad1d9SAl Viro 982fb009e3aSChristoph Lameter size = ALIGN(size, 2); 9832f69fa82SViro 9843ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 9853ca45a46Szijun_hu !is_power_of_2(align))) { 986756a025fSJoe Perches WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", 987756a025fSJoe Perches size, align); 988fbf59bc9STejun Heo return NULL; 989fbf59bc9STejun Heo } 990fbf59bc9STejun Heo 9916710e594STejun Heo if (!is_atomic) 9926710e594STejun Heo mutex_lock(&pcpu_alloc_mutex); 9936710e594STejun Heo 994403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 995fbf59bc9STejun Heo 996edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 997edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 998edcb4639STejun Heo chunk = pcpu_reserved_chunk; 999833af842STejun Heo 1000833af842STejun Heo if (size > chunk->contig_hint) { 1001833af842STejun Heo err = "alloc from reserved chunk failed"; 1002ccea34b5STejun Heo goto fail_unlock; 1003f2badb0cSTejun Heo } 1004833af842STejun Heo 10059c824b6aSTejun Heo while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) { 1006833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 10075835d96eSTejun Heo if (is_atomic || 10085835d96eSTejun Heo pcpu_extend_area_map(chunk, new_alloc) < 0) { 1009833af842STejun Heo err = "failed to extend area map of reserved chunk"; 1010b38d08f3STejun Heo goto fail; 1011833af842STejun Heo } 1012833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1013833af842STejun Heo } 1014833af842STejun Heo 1015b539b87fSTejun Heo off = pcpu_alloc_area(chunk, size, align, is_atomic, 1016b539b87fSTejun Heo &occ_pages); 1017edcb4639STejun Heo if (off >= 0) 1018edcb4639STejun Heo goto area_found; 1019833af842STejun Heo 1020f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 1021ccea34b5STejun Heo goto fail_unlock; 1022edcb4639STejun Heo } 1023edcb4639STejun Heo 1024ccea34b5STejun Heo restart: 1025edcb4639STejun Heo /* search through normal chunks */ 1026fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 1027fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 1028fbf59bc9STejun Heo if (size > chunk->contig_hint) 1029fbf59bc9STejun Heo continue; 1030ccea34b5STejun Heo 10319c824b6aSTejun Heo new_alloc = pcpu_need_to_extend(chunk, is_atomic); 1032833af842STejun Heo if (new_alloc) { 10335835d96eSTejun Heo if (is_atomic) 10345835d96eSTejun Heo continue; 1035833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1036833af842STejun Heo if (pcpu_extend_area_map(chunk, 1037833af842STejun Heo new_alloc) < 0) { 1038f2badb0cSTejun Heo err = "failed to extend area map"; 1039b38d08f3STejun Heo goto fail; 1040833af842STejun Heo } 1041833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1042833af842STejun Heo /* 1043833af842STejun Heo * pcpu_lock has been dropped, need to 1044833af842STejun Heo * restart cpu_slot list walking. 1045833af842STejun Heo */ 1046833af842STejun Heo goto restart; 1047ccea34b5STejun Heo } 1048ccea34b5STejun Heo 1049b539b87fSTejun Heo off = pcpu_alloc_area(chunk, size, align, is_atomic, 1050b539b87fSTejun Heo &occ_pages); 1051fbf59bc9STejun Heo if (off >= 0) 1052fbf59bc9STejun Heo goto area_found; 1053fbf59bc9STejun Heo } 1054fbf59bc9STejun Heo } 1055fbf59bc9STejun Heo 1056403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1057ccea34b5STejun Heo 1058b38d08f3STejun Heo /* 1059b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 1060b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 1061b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 1062b38d08f3STejun Heo */ 106311df02bfSDennis Zhou if (is_atomic) { 106411df02bfSDennis Zhou err = "atomic alloc failed, no space left"; 10655835d96eSTejun Heo goto fail; 106611df02bfSDennis Zhou } 10675835d96eSTejun Heo 1068b38d08f3STejun Heo if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 10696081089fSTejun Heo chunk = pcpu_create_chunk(); 1070f2badb0cSTejun Heo if (!chunk) { 1071f2badb0cSTejun Heo err = "failed to allocate new chunk"; 1072b38d08f3STejun Heo goto fail; 1073f2badb0cSTejun Heo } 1074ccea34b5STejun Heo 1075403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1076fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1077b38d08f3STejun Heo } else { 1078b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1079b38d08f3STejun Heo } 1080b38d08f3STejun Heo 1081ccea34b5STejun Heo goto restart; 1082fbf59bc9STejun Heo 1083fbf59bc9STejun Heo area_found: 108430a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size); 1085403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1086ccea34b5STejun Heo 1087dca49645STejun Heo /* populate if not all pages are already there */ 10885835d96eSTejun Heo if (!is_atomic) { 1089e04d3208STejun Heo int page_start, page_end, rs, re; 1090e04d3208STejun Heo 1091dca49645STejun Heo page_start = PFN_DOWN(off); 1092dca49645STejun Heo page_end = PFN_UP(off + size); 1093dca49645STejun Heo 1094a93ace48STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 1095dca49645STejun Heo WARN_ON(chunk->immutable); 1096dca49645STejun Heo 1097b38d08f3STejun Heo ret = pcpu_populate_chunk(chunk, rs, re); 1098b38d08f3STejun Heo 1099403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1100b38d08f3STejun Heo if (ret) { 1101b539b87fSTejun Heo pcpu_free_area(chunk, off, &occ_pages); 1102f2badb0cSTejun Heo err = "failed to populate"; 1103ccea34b5STejun Heo goto fail_unlock; 1104fbf59bc9STejun Heo } 1105b539b87fSTejun Heo pcpu_chunk_populated(chunk, rs, re); 1106b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1107dca49645STejun Heo } 1108dca49645STejun Heo 1109ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1110e04d3208STejun Heo } 1111ccea34b5STejun Heo 1112320661b0STahsin Erdogan if (chunk != pcpu_reserved_chunk) { 1113320661b0STahsin Erdogan spin_lock_irqsave(&pcpu_lock, flags); 1114b539b87fSTejun Heo pcpu_nr_empty_pop_pages -= occ_pages; 1115320661b0STahsin Erdogan spin_unlock_irqrestore(&pcpu_lock, flags); 1116320661b0STahsin Erdogan } 1117b539b87fSTejun Heo 11181a4d7607STejun Heo if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 11191a4d7607STejun Heo pcpu_schedule_balance_work(); 11201a4d7607STejun Heo 1121dca49645STejun Heo /* clear the areas and return address relative to base address */ 1122dca49645STejun Heo for_each_possible_cpu(cpu) 1123dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1124dca49645STejun Heo 1125f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 11268a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp); 1127df95e795SDennis Zhou 1128df95e795SDennis Zhou trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1129df95e795SDennis Zhou chunk->base_addr, off, ptr); 1130df95e795SDennis Zhou 1131f528f0b8SCatalin Marinas return ptr; 1132ccea34b5STejun Heo 1133ccea34b5STejun Heo fail_unlock: 1134403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1135b38d08f3STejun Heo fail: 1136df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1137df95e795SDennis Zhou 11385835d96eSTejun Heo if (!is_atomic && warn_limit) { 1139870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 11405835d96eSTejun Heo size, align, is_atomic, err); 1141f2badb0cSTejun Heo dump_stack(); 1142f2badb0cSTejun Heo if (!--warn_limit) 1143870d4b12SJoe Perches pr_info("limit reached, disable warning\n"); 1144f2badb0cSTejun Heo } 11451a4d7607STejun Heo if (is_atomic) { 11461a4d7607STejun Heo /* see the flag handling in pcpu_blance_workfn() */ 11471a4d7607STejun Heo pcpu_atomic_alloc_failed = true; 11481a4d7607STejun Heo pcpu_schedule_balance_work(); 11496710e594STejun Heo } else { 11506710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex); 11511a4d7607STejun Heo } 1152ccea34b5STejun Heo return NULL; 1153fbf59bc9STejun Heo } 1154edcb4639STejun Heo 1155edcb4639STejun Heo /** 11565835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1157edcb4639STejun Heo * @size: size of area to allocate in bytes 1158edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 11595835d96eSTejun Heo * @gfp: allocation flags 1160edcb4639STejun Heo * 11615835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 11625835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 11635835d96eSTejun Heo * be called from any context but is a lot more likely to fail. 1164ccea34b5STejun Heo * 1165edcb4639STejun Heo * RETURNS: 1166edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1167edcb4639STejun Heo */ 11685835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 11695835d96eSTejun Heo { 11705835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 11715835d96eSTejun Heo } 11725835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 11735835d96eSTejun Heo 11745835d96eSTejun Heo /** 11755835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 11765835d96eSTejun Heo * @size: size of area to allocate in bytes 11775835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 11785835d96eSTejun Heo * 11795835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 11805835d96eSTejun Heo */ 118143cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1182edcb4639STejun Heo { 11835835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1184edcb4639STejun Heo } 1185fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1186fbf59bc9STejun Heo 1187edcb4639STejun Heo /** 1188edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1189edcb4639STejun Heo * @size: size of area to allocate in bytes 1190edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1191edcb4639STejun Heo * 11929329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 11939329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 11949329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 11959329ba97STejun Heo * Might trigger writeouts. 1196edcb4639STejun Heo * 1197ccea34b5STejun Heo * CONTEXT: 1198ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1199ccea34b5STejun Heo * 1200edcb4639STejun Heo * RETURNS: 1201edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1202edcb4639STejun Heo */ 120343cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1204edcb4639STejun Heo { 12055835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1206edcb4639STejun Heo } 1207edcb4639STejun Heo 1208a56dbddfSTejun Heo /** 12091a4d7607STejun Heo * pcpu_balance_workfn - manage the amount of free chunks and populated pages 1210a56dbddfSTejun Heo * @work: unused 1211a56dbddfSTejun Heo * 1212a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 1213a56dbddfSTejun Heo */ 1214fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work) 1215fbf59bc9STejun Heo { 1216fe6bd8c3STejun Heo LIST_HEAD(to_free); 1217fe6bd8c3STejun Heo struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1218a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 12191a4d7607STejun Heo int slot, nr_to_pop, ret; 1220a56dbddfSTejun Heo 12211a4d7607STejun Heo /* 12221a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM 12231a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one. 12241a4d7607STejun Heo */ 1225ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1226ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1227a56dbddfSTejun Heo 1228fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 12298d408b4bSTejun Heo WARN_ON(chunk->immutable); 1230a56dbddfSTejun Heo 1231a56dbddfSTejun Heo /* spare the first one */ 1232fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1233a56dbddfSTejun Heo continue; 1234a56dbddfSTejun Heo 12354f996e23STejun Heo list_del_init(&chunk->map_extend_list); 1236fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 1237a56dbddfSTejun Heo } 1238a56dbddfSTejun Heo 1239ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1240a56dbddfSTejun Heo 1241fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 1242a93ace48STejun Heo int rs, re; 1243dca49645STejun Heo 1244a93ace48STejun Heo pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { 1245a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 1246b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 1247b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 1248b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 1249a93ace48STejun Heo } 12506081089fSTejun Heo pcpu_destroy_chunk(chunk); 1251fbf59bc9STejun Heo } 1252971f3918STejun Heo 12534f996e23STejun Heo /* service chunks which requested async area map extension */ 12544f996e23STejun Heo do { 12554f996e23STejun Heo int new_alloc = 0; 12564f996e23STejun Heo 12574f996e23STejun Heo spin_lock_irq(&pcpu_lock); 12584f996e23STejun Heo 12594f996e23STejun Heo chunk = list_first_entry_or_null(&pcpu_map_extend_chunks, 12604f996e23STejun Heo struct pcpu_chunk, map_extend_list); 12614f996e23STejun Heo if (chunk) { 12624f996e23STejun Heo list_del_init(&chunk->map_extend_list); 12634f996e23STejun Heo new_alloc = pcpu_need_to_extend(chunk, false); 12644f996e23STejun Heo } 12654f996e23STejun Heo 12664f996e23STejun Heo spin_unlock_irq(&pcpu_lock); 12674f996e23STejun Heo 12684f996e23STejun Heo if (new_alloc) 12694f996e23STejun Heo pcpu_extend_area_map(chunk, new_alloc); 12704f996e23STejun Heo } while (chunk); 12714f996e23STejun Heo 12721a4d7607STejun Heo /* 12731a4d7607STejun Heo * Ensure there are certain number of free populated pages for 12741a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic 12751a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation 12761a4d7607STejun Heo * failed previously, always populate the maximum amount. This 12771a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping 12781a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not 12791a4d7607STejun Heo * something we support properly and can be highly unreliable and 12801a4d7607STejun Heo * inefficient. 12811a4d7607STejun Heo */ 12821a4d7607STejun Heo retry_pop: 12831a4d7607STejun Heo if (pcpu_atomic_alloc_failed) { 12841a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 12851a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */ 12861a4d7607STejun Heo pcpu_atomic_alloc_failed = false; 12871a4d7607STejun Heo } else { 12881a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 12891a4d7607STejun Heo pcpu_nr_empty_pop_pages, 12901a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH); 12911a4d7607STejun Heo } 12921a4d7607STejun Heo 12931a4d7607STejun Heo for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { 12941a4d7607STejun Heo int nr_unpop = 0, rs, re; 12951a4d7607STejun Heo 12961a4d7607STejun Heo if (!nr_to_pop) 12971a4d7607STejun Heo break; 12981a4d7607STejun Heo 12991a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 13001a4d7607STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 13011a4d7607STejun Heo nr_unpop = pcpu_unit_pages - chunk->nr_populated; 13021a4d7607STejun Heo if (nr_unpop) 13031a4d7607STejun Heo break; 13041a4d7607STejun Heo } 13051a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 13061a4d7607STejun Heo 13071a4d7607STejun Heo if (!nr_unpop) 13081a4d7607STejun Heo continue; 13091a4d7607STejun Heo 13101a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */ 13111a4d7607STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) { 13121a4d7607STejun Heo int nr = min(re - rs, nr_to_pop); 13131a4d7607STejun Heo 13141a4d7607STejun Heo ret = pcpu_populate_chunk(chunk, rs, rs + nr); 13151a4d7607STejun Heo if (!ret) { 13161a4d7607STejun Heo nr_to_pop -= nr; 13171a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 13181a4d7607STejun Heo pcpu_chunk_populated(chunk, rs, rs + nr); 13191a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 13201a4d7607STejun Heo } else { 13211a4d7607STejun Heo nr_to_pop = 0; 13221a4d7607STejun Heo } 13231a4d7607STejun Heo 13241a4d7607STejun Heo if (!nr_to_pop) 13251a4d7607STejun Heo break; 13261a4d7607STejun Heo } 13271a4d7607STejun Heo } 13281a4d7607STejun Heo 13291a4d7607STejun Heo if (nr_to_pop) { 13301a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */ 13311a4d7607STejun Heo chunk = pcpu_create_chunk(); 13321a4d7607STejun Heo if (chunk) { 13331a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 13341a4d7607STejun Heo pcpu_chunk_relocate(chunk, -1); 13351a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 13361a4d7607STejun Heo goto retry_pop; 13371a4d7607STejun Heo } 13381a4d7607STejun Heo } 13391a4d7607STejun Heo 1340971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1341a56dbddfSTejun Heo } 1342fbf59bc9STejun Heo 1343fbf59bc9STejun Heo /** 1344fbf59bc9STejun Heo * free_percpu - free percpu area 1345fbf59bc9STejun Heo * @ptr: pointer to area to free 1346fbf59bc9STejun Heo * 1347ccea34b5STejun Heo * Free percpu area @ptr. 1348ccea34b5STejun Heo * 1349ccea34b5STejun Heo * CONTEXT: 1350ccea34b5STejun Heo * Can be called from atomic context. 1351fbf59bc9STejun Heo */ 135243cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 1353fbf59bc9STejun Heo { 1354129182e5SAndrew Morton void *addr; 1355fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1356ccea34b5STejun Heo unsigned long flags; 1357b539b87fSTejun Heo int off, occ_pages; 1358fbf59bc9STejun Heo 1359fbf59bc9STejun Heo if (!ptr) 1360fbf59bc9STejun Heo return; 1361fbf59bc9STejun Heo 1362f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 1363f528f0b8SCatalin Marinas 1364129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 1365129182e5SAndrew Morton 1366ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1367fbf59bc9STejun Heo 1368fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1369bba174f5STejun Heo off = addr - chunk->base_addr; 1370fbf59bc9STejun Heo 1371b539b87fSTejun Heo pcpu_free_area(chunk, off, &occ_pages); 1372b539b87fSTejun Heo 1373b539b87fSTejun Heo if (chunk != pcpu_reserved_chunk) 1374b539b87fSTejun Heo pcpu_nr_empty_pop_pages += occ_pages; 1375fbf59bc9STejun Heo 1376a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 1377fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 1378fbf59bc9STejun Heo struct pcpu_chunk *pos; 1379fbf59bc9STejun Heo 1380a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1381fbf59bc9STejun Heo if (pos != chunk) { 13821a4d7607STejun Heo pcpu_schedule_balance_work(); 1383fbf59bc9STejun Heo break; 1384fbf59bc9STejun Heo } 1385fbf59bc9STejun Heo } 1386fbf59bc9STejun Heo 1387df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr); 1388df95e795SDennis Zhou 1389ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1390fbf59bc9STejun Heo } 1391fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1392fbf59bc9STejun Heo 1393383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 1394383776faSThomas Gleixner { 1395383776faSThomas Gleixner #ifdef CONFIG_SMP 1396383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start; 1397383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 1398383776faSThomas Gleixner unsigned int cpu; 1399383776faSThomas Gleixner 1400383776faSThomas Gleixner for_each_possible_cpu(cpu) { 1401383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu); 1402383776faSThomas Gleixner void *va = (void *)addr; 1403383776faSThomas Gleixner 1404383776faSThomas Gleixner if (va >= start && va < start + static_size) { 14058ce371f9SPeter Zijlstra if (can_addr) { 1406383776faSThomas Gleixner *can_addr = (unsigned long) (va - start); 14078ce371f9SPeter Zijlstra *can_addr += (unsigned long) 14088ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id()); 14098ce371f9SPeter Zijlstra } 1410383776faSThomas Gleixner return true; 1411383776faSThomas Gleixner } 1412383776faSThomas Gleixner } 1413383776faSThomas Gleixner #endif 1414383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */ 1415383776faSThomas Gleixner return false; 1416383776faSThomas Gleixner } 1417383776faSThomas Gleixner 14183b034b0dSVivek Goyal /** 141910fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 142010fad5e4STejun Heo * @addr: address to test 142110fad5e4STejun Heo * 142210fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 142310fad5e4STejun Heo * static percpu areas are not considered. For those, use 142410fad5e4STejun Heo * is_module_percpu_address(). 142510fad5e4STejun Heo * 142610fad5e4STejun Heo * RETURNS: 142710fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 142810fad5e4STejun Heo */ 142910fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 143010fad5e4STejun Heo { 1431383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL); 143210fad5e4STejun Heo } 143310fad5e4STejun Heo 143410fad5e4STejun Heo /** 14353b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 14363b034b0dSVivek Goyal * @addr: the address to be converted to physical address 14373b034b0dSVivek Goyal * 14383b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 14393b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 14403b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 14413b034b0dSVivek Goyal * until this function finishes. 14423b034b0dSVivek Goyal * 144367589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 144467589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 144567589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 144667589c71SDave Young * km) provides translation. 144767589c71SDave Young * 1448bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the 144967589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 145067589c71SDave Young * actually works, and the verification can discover both bugs in percpu 145167589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 145267589c71SDave Young * code. 145367589c71SDave Young * 14543b034b0dSVivek Goyal * RETURNS: 14553b034b0dSVivek Goyal * The physical address for @addr. 14563b034b0dSVivek Goyal */ 14573b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 14583b034b0dSVivek Goyal { 14599983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 14609983b6f0STejun Heo bool in_first_chunk = false; 1461a855b84cSTejun Heo unsigned long first_low, first_high; 14629983b6f0STejun Heo unsigned int cpu; 14639983b6f0STejun Heo 14649983b6f0STejun Heo /* 1465a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 14669983b6f0STejun Heo * necessary but will speed up lookups of addresses which 14679983b6f0STejun Heo * aren't in the first chunk. 1468*c0ebfdc3SDennis Zhou (Facebook) * 1469*c0ebfdc3SDennis Zhou (Facebook) * The address check is against full chunk sizes. pcpu_base_addr 1470*c0ebfdc3SDennis Zhou (Facebook) * points to the beginning of the first chunk including the 1471*c0ebfdc3SDennis Zhou (Facebook) * static region. Assumes good intent as the first chunk may 1472*c0ebfdc3SDennis Zhou (Facebook) * not be full (ie. < pcpu_unit_pages in size). 14739983b6f0STejun Heo */ 1474*c0ebfdc3SDennis Zhou (Facebook) first_low = (unsigned long)pcpu_base_addr + 1475*c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 1476*c0ebfdc3SDennis Zhou (Facebook) first_high = (unsigned long)pcpu_base_addr + 1477*c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 1478a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 1479a855b84cSTejun Heo (unsigned long)addr < first_high) { 14809983b6f0STejun Heo for_each_possible_cpu(cpu) { 14819983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 14829983b6f0STejun Heo 14839983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 14849983b6f0STejun Heo in_first_chunk = true; 14859983b6f0STejun Heo break; 14869983b6f0STejun Heo } 14879983b6f0STejun Heo } 14889983b6f0STejun Heo } 14899983b6f0STejun Heo 14909983b6f0STejun Heo if (in_first_chunk) { 1491eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 14923b034b0dSVivek Goyal return __pa(addr); 14933b034b0dSVivek Goyal else 14949f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 14959f57bd4dSEugene Surovegin offset_in_page(addr); 1496020ec653STejun Heo } else 14979f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 14989f57bd4dSEugene Surovegin offset_in_page(addr); 14993b034b0dSVivek Goyal } 15003b034b0dSVivek Goyal 1501fbf59bc9STejun Heo /** 1502fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 1503fd1e8a1fSTejun Heo * @nr_groups: the number of groups 1504fd1e8a1fSTejun Heo * @nr_units: the number of units 1505033e48fbSTejun Heo * 1506fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 1507fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 1508fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 1509fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1510fd1e8a1fSTejun Heo * pointer of other groups. 1511033e48fbSTejun Heo * 1512033e48fbSTejun Heo * RETURNS: 1513fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 1514fd1e8a1fSTejun Heo * failure. 1515033e48fbSTejun Heo */ 1516fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1517fd1e8a1fSTejun Heo int nr_units) 1518fd1e8a1fSTejun Heo { 1519fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1520fd1e8a1fSTejun Heo size_t base_size, ai_size; 1521fd1e8a1fSTejun Heo void *ptr; 1522fd1e8a1fSTejun Heo int unit; 1523fd1e8a1fSTejun Heo 1524fd1e8a1fSTejun Heo base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1525fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 1526fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1527fd1e8a1fSTejun Heo 1528999c17e3SSantosh Shilimkar ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); 1529fd1e8a1fSTejun Heo if (!ptr) 1530fd1e8a1fSTejun Heo return NULL; 1531fd1e8a1fSTejun Heo ai = ptr; 1532fd1e8a1fSTejun Heo ptr += base_size; 1533fd1e8a1fSTejun Heo 1534fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 1535fd1e8a1fSTejun Heo 1536fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 1537fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 1538fd1e8a1fSTejun Heo 1539fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 1540fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 1541fd1e8a1fSTejun Heo 1542fd1e8a1fSTejun Heo return ai; 1543fd1e8a1fSTejun Heo } 1544fd1e8a1fSTejun Heo 1545fd1e8a1fSTejun Heo /** 1546fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 1547fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 1548fd1e8a1fSTejun Heo * 1549fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1550fd1e8a1fSTejun Heo */ 1551fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1552fd1e8a1fSTejun Heo { 1553999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 1554fd1e8a1fSTejun Heo } 1555fd1e8a1fSTejun Heo 1556fd1e8a1fSTejun Heo /** 1557fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1558fd1e8a1fSTejun Heo * @lvl: loglevel 1559fd1e8a1fSTejun Heo * @ai: allocation info to dump 1560fd1e8a1fSTejun Heo * 1561fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 1562fd1e8a1fSTejun Heo */ 1563fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 1564fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 1565033e48fbSTejun Heo { 1566fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 1567033e48fbSTejun Heo char empty_str[] = "--------"; 1568fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 1569fd1e8a1fSTejun Heo int group, v; 1570fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 1571033e48fbSTejun Heo 1572fd1e8a1fSTejun Heo v = ai->nr_groups; 1573033e48fbSTejun Heo while (v /= 10) 1574fd1e8a1fSTejun Heo group_width++; 1575033e48fbSTejun Heo 1576fd1e8a1fSTejun Heo v = num_possible_cpus(); 1577fd1e8a1fSTejun Heo while (v /= 10) 1578fd1e8a1fSTejun Heo cpu_width++; 1579fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1580033e48fbSTejun Heo 1581fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 1582fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 1583fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 1584033e48fbSTejun Heo 1585fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1586fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1587fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1588fd1e8a1fSTejun Heo 1589fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 1590fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 1591fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 1592fd1e8a1fSTejun Heo 1593fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 1594fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 1595fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 1596fd1e8a1fSTejun Heo if (!(alloc % apl)) { 15971170532bSJoe Perches pr_cont("\n"); 1598fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 1599033e48fbSTejun Heo } 16001170532bSJoe Perches pr_cont("[%0*d] ", group_width, group); 1601fd1e8a1fSTejun Heo 1602fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 1603fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 16041170532bSJoe Perches pr_cont("%0*d ", 16051170532bSJoe Perches cpu_width, gi->cpu_map[unit]); 1606033e48fbSTejun Heo else 16071170532bSJoe Perches pr_cont("%s ", empty_str); 1608033e48fbSTejun Heo } 1609fd1e8a1fSTejun Heo } 16101170532bSJoe Perches pr_cont("\n"); 1611033e48fbSTejun Heo } 1612033e48fbSTejun Heo 1613fbf59bc9STejun Heo /** 16148d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 1615fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 161638a6be52STejun Heo * @base_addr: mapped address 1617fbf59bc9STejun Heo * 16188d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 16198d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 162038a6be52STejun Heo * setup path. 16218d408b4bSTejun Heo * 1622fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 1623fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 16248d408b4bSTejun Heo * 1625fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 1626fd1e8a1fSTejun Heo * 1627fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1628edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1629edcb4639STejun Heo * the first chunk such that it's available only through reserved 1630edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1631edcb4639STejun Heo * static areas on architectures where the addressing model has 1632edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1633edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1634edcb4639STejun Heo * 1635fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 1636fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 1637fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 16386074d5b0STejun Heo * 1639fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1640fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 1641fd1e8a1fSTejun Heo * @ai->dyn_size. 16428d408b4bSTejun Heo * 1643fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 1644fd1e8a1fSTejun Heo * for vm areas. 16458d408b4bSTejun Heo * 1646fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 1647fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 1648fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 1649fd1e8a1fSTejun Heo * 1650fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 1651fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 1652fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 1653fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 1654fd1e8a1fSTejun Heo * all units is assumed. 16558d408b4bSTejun Heo * 165638a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 165738a6be52STejun Heo * copied static data to each unit. 1658fbf59bc9STejun Heo * 1659*c0ebfdc3SDennis Zhou (Facebook) * The first chunk will always contain a static and a dynamic region. 1660*c0ebfdc3SDennis Zhou (Facebook) * However, the static region is not managed by any chunk. If the first 1661*c0ebfdc3SDennis Zhou (Facebook) * chunk also contains a reserved region, it is served by two chunks - 1662*c0ebfdc3SDennis Zhou (Facebook) * one for the reserved region and one for the dynamic region. They 1663*c0ebfdc3SDennis Zhou (Facebook) * share the same vm, but use offset regions in the area allocation map. 1664*c0ebfdc3SDennis Zhou (Facebook) * The chunk serving the dynamic region is circulated in the chunk slots 1665*c0ebfdc3SDennis Zhou (Facebook) * and available for dynamic allocation like any other chunk. 1666edcb4639STejun Heo * 1667fbf59bc9STejun Heo * RETURNS: 1668fb435d52STejun Heo * 0 on success, -errno on failure. 1669fbf59bc9STejun Heo */ 1670fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1671fd1e8a1fSTejun Heo void *base_addr) 1672fbf59bc9STejun Heo { 1673099a19d9STejun Heo static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1674099a19d9STejun Heo static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1675b9c39442SDennis Zhou (Facebook) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 16760c4169c3SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 16776563297cSTejun Heo unsigned long *group_offsets; 16786563297cSTejun Heo size_t *group_sizes; 1679fb435d52STejun Heo unsigned long *unit_off; 1680fbf59bc9STejun Heo unsigned int cpu; 1681fd1e8a1fSTejun Heo int *unit_map; 1682fd1e8a1fSTejun Heo int group, unit, i; 1683*c0ebfdc3SDennis Zhou (Facebook) int map_size; 1684*c0ebfdc3SDennis Zhou (Facebook) unsigned long tmp_addr; 1685fbf59bc9STejun Heo 1686635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 1687635b75fcSTejun Heo if (unlikely(cond)) { \ 1688870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \ 1689870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \ 1690807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \ 1691635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1692635b75fcSTejun Heo BUG(); \ 1693635b75fcSTejun Heo } \ 1694635b75fcSTejun Heo } while (0) 1695635b75fcSTejun Heo 16962f39e637STejun Heo /* sanity checks */ 1697635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1698bbddff05STejun Heo #ifdef CONFIG_SMP 1699635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 1700f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 1701bbddff05STejun Heo #endif 1702635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 1703f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 1704635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1705f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 1706635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1707099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 1708fb29a2ccSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!ai->dyn_size); 17099f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 17108d408b4bSTejun Heo 17116563297cSTejun Heo /* process group information and build config tables accordingly */ 1712999c17e3SSantosh Shilimkar group_offsets = memblock_virt_alloc(ai->nr_groups * 1713999c17e3SSantosh Shilimkar sizeof(group_offsets[0]), 0); 1714999c17e3SSantosh Shilimkar group_sizes = memblock_virt_alloc(ai->nr_groups * 1715999c17e3SSantosh Shilimkar sizeof(group_sizes[0]), 0); 1716999c17e3SSantosh Shilimkar unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); 1717999c17e3SSantosh Shilimkar unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); 17182f39e637STejun Heo 1719fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1720ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 1721a855b84cSTejun Heo 1722a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 1723a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 17242f39e637STejun Heo 1725fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1726fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 17272f39e637STejun Heo 17286563297cSTejun Heo group_offsets[group] = gi->base_offset; 17296563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 17306563297cSTejun Heo 1731fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 1732fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 1733fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 1734fd1e8a1fSTejun Heo continue; 1735fd1e8a1fSTejun Heo 17369f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 1737635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1738635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1739fd1e8a1fSTejun Heo 1740fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 1741fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1742fb435d52STejun Heo 1743a855b84cSTejun Heo /* determine low/high unit_cpu */ 1744a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 1745a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 1746a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 1747a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 1748a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 1749a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 17500fc0531eSLinus Torvalds } 17510fc0531eSLinus Torvalds } 1752fd1e8a1fSTejun Heo pcpu_nr_units = unit; 17532f39e637STejun Heo 17542f39e637STejun Heo for_each_possible_cpu(cpu) 1755635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1756635b75fcSTejun Heo 1757635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 1758635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 1759bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 17602f39e637STejun Heo 17616563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 17626563297cSTejun Heo pcpu_group_offsets = group_offsets; 17636563297cSTejun Heo pcpu_group_sizes = group_sizes; 1764fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 1765fb435d52STejun Heo pcpu_unit_offsets = unit_off; 17662f39e637STejun Heo 17672f39e637STejun Heo /* determine basic parameters */ 1768fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1769d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 17706563297cSTejun Heo pcpu_atom_size = ai->atom_size; 1771ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1772ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1773cafe8816STejun Heo 177430a5b536SDennis Zhou pcpu_stats_save_ai(ai); 177530a5b536SDennis Zhou 1776d9b55eebSTejun Heo /* 1777d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1778d9b55eebSTejun Heo * empty chunks. 1779d9b55eebSTejun Heo */ 1780d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1781999c17e3SSantosh Shilimkar pcpu_slot = memblock_virt_alloc( 1782999c17e3SSantosh Shilimkar pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); 1783fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1784fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1785fbf59bc9STejun Heo 1786edcb4639STejun Heo /* 1787*c0ebfdc3SDennis Zhou (Facebook) * Initialize first chunk. 1788*c0ebfdc3SDennis Zhou (Facebook) * If the reserved_size is non-zero, this initializes the reserved 1789*c0ebfdc3SDennis Zhou (Facebook) * chunk. If the reserved_size is zero, the reserved chunk is NULL 1790*c0ebfdc3SDennis Zhou (Facebook) * and the dynamic region is initialized here. The first chunk, 1791*c0ebfdc3SDennis Zhou (Facebook) * pcpu_first_chunk, will always point to the chunk that serves 1792*c0ebfdc3SDennis Zhou (Facebook) * the dynamic region. 1793edcb4639STejun Heo */ 1794*c0ebfdc3SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + ai->static_size; 179510edf5b0SDennis Zhou (Facebook) map_size = ai->reserved_size ?: ai->dyn_size; 1796*c0ebfdc3SDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size, smap, 17970c4169c3SDennis Zhou (Facebook) ARRAY_SIZE(smap)); 179861ace7faSTejun Heo 1799edcb4639STejun Heo /* init dynamic chunk if necessary */ 1800b9c39442SDennis Zhou (Facebook) if (ai->reserved_size) { 18010c4169c3SDennis Zhou (Facebook) pcpu_reserved_chunk = chunk; 1802b9c39442SDennis Zhou (Facebook) 1803*c0ebfdc3SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + ai->static_size + 1804*c0ebfdc3SDennis Zhou (Facebook) ai->reserved_size; 180510edf5b0SDennis Zhou (Facebook) map_size = ai->dyn_size; 1806*c0ebfdc3SDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size, dmap, 180710edf5b0SDennis Zhou (Facebook) ARRAY_SIZE(dmap)); 1808edcb4639STejun Heo } 1809edcb4639STejun Heo 18102441d15cSTejun Heo /* link the first chunk in */ 18110c4169c3SDennis Zhou (Facebook) pcpu_first_chunk = chunk; 1812e2266705SDennis Zhou (Facebook) i = (pcpu_first_chunk->start_offset) ? 1 : 0; 1813b539b87fSTejun Heo pcpu_nr_empty_pop_pages += 1814e2266705SDennis Zhou (Facebook) pcpu_count_occupied_pages(pcpu_first_chunk, i); 1815ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 1816fbf59bc9STejun Heo 181730a5b536SDennis Zhou pcpu_stats_chunk_alloc(); 1818df95e795SDennis Zhou trace_percpu_create_chunk(base_addr); 181930a5b536SDennis Zhou 1820fbf59bc9STejun Heo /* we're done */ 1821bba174f5STejun Heo pcpu_base_addr = base_addr; 1822fb435d52STejun Heo return 0; 1823fbf59bc9STejun Heo } 182466c3a757STejun Heo 1825bbddff05STejun Heo #ifdef CONFIG_SMP 1826bbddff05STejun Heo 182717f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 1828f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 1829f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 1830f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 1831f58dc01bSTejun Heo }; 183266c3a757STejun Heo 1833f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1834f58dc01bSTejun Heo 1835f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 183666c3a757STejun Heo { 18375479c78aSCyrill Gorcunov if (!str) 18385479c78aSCyrill Gorcunov return -EINVAL; 18395479c78aSCyrill Gorcunov 1840f58dc01bSTejun Heo if (0) 1841f58dc01bSTejun Heo /* nada */; 1842f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1843f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 1844f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 1845f58dc01bSTejun Heo #endif 1846f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1847f58dc01bSTejun Heo else if (!strcmp(str, "page")) 1848f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 1849f58dc01bSTejun Heo #endif 1850f58dc01bSTejun Heo else 1851870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str); 185266c3a757STejun Heo 1853f58dc01bSTejun Heo return 0; 185466c3a757STejun Heo } 1855f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 185666c3a757STejun Heo 18573c9a024fSTejun Heo /* 18583c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 18593c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 18603c9a024fSTejun Heo * to be used. 18613c9a024fSTejun Heo */ 186208fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 186308fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 18643c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 18653c9a024fSTejun Heo #endif 18663c9a024fSTejun Heo 18673c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 18683c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 18693c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 18703c9a024fSTejun Heo #endif 18713c9a024fSTejun Heo 18723c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 18733c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 18743c9a024fSTejun Heo /** 1875fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1876fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1877fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1878fbf59bc9STejun Heo * @atom_size: allocation atom size 1879fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1880fbf59bc9STejun Heo * 1881fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 1882fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 1883fbf59bc9STejun Heo * atom size and distances between CPUs. 1884fbf59bc9STejun Heo * 1885bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of 1886fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 1887fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 1888fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 1889fbf59bc9STejun Heo * of allocated virtual address space. 1890fbf59bc9STejun Heo * 1891fbf59bc9STejun Heo * RETURNS: 1892fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 1893fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 1894fbf59bc9STejun Heo */ 1895fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1896fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 1897fbf59bc9STejun Heo size_t atom_size, 1898fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1899fbf59bc9STejun Heo { 1900fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 1901fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 1902fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 1903fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 1904fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 1905fbf59bc9STejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1906fbf59bc9STejun Heo int last_allocs, group, unit; 1907fbf59bc9STejun Heo unsigned int cpu, tcpu; 1908fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 1909fbf59bc9STejun Heo unsigned int *cpu_map; 1910fbf59bc9STejun Heo 1911fbf59bc9STejun Heo /* this function may be called multiple times */ 1912fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 1913fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 1914fbf59bc9STejun Heo 1915fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 1916fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 1917fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 1918fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 1919fbf59bc9STejun Heo 1920fbf59bc9STejun Heo /* 1921fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 1922fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 192325985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 1924fbf59bc9STejun Heo * or larger than min_unit_size. 1925fbf59bc9STejun Heo */ 1926fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1927fbf59bc9STejun Heo 19289c015162SDennis Zhou (Facebook) /* determine the maximum # of units that can fit in an allocation */ 1929fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 1930fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 1931f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 1932fbf59bc9STejun Heo upa--; 1933fbf59bc9STejun Heo max_upa = upa; 1934fbf59bc9STejun Heo 1935fbf59bc9STejun Heo /* group cpus according to their proximity */ 1936fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 1937fbf59bc9STejun Heo group = 0; 1938fbf59bc9STejun Heo next_group: 1939fbf59bc9STejun Heo for_each_possible_cpu(tcpu) { 1940fbf59bc9STejun Heo if (cpu == tcpu) 1941fbf59bc9STejun Heo break; 1942fbf59bc9STejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 1943fbf59bc9STejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1944fbf59bc9STejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1945fbf59bc9STejun Heo group++; 1946fbf59bc9STejun Heo nr_groups = max(nr_groups, group + 1); 1947fbf59bc9STejun Heo goto next_group; 1948fbf59bc9STejun Heo } 1949fbf59bc9STejun Heo } 1950fbf59bc9STejun Heo group_map[cpu] = group; 1951fbf59bc9STejun Heo group_cnt[group]++; 1952fbf59bc9STejun Heo } 1953fbf59bc9STejun Heo 1954fbf59bc9STejun Heo /* 19559c015162SDennis Zhou (Facebook) * Wasted space is caused by a ratio imbalance of upa to group_cnt. 19569c015162SDennis Zhou (Facebook) * Expand the unit_size until we use >= 75% of the units allocated. 19579c015162SDennis Zhou (Facebook) * Related to atom_size, which could be much larger than the unit_size. 1958fbf59bc9STejun Heo */ 1959fbf59bc9STejun Heo last_allocs = INT_MAX; 1960fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 1961fbf59bc9STejun Heo int allocs = 0, wasted = 0; 1962fbf59bc9STejun Heo 1963f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 1964fbf59bc9STejun Heo continue; 1965fbf59bc9STejun Heo 1966fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 1967fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1968fbf59bc9STejun Heo allocs += this_allocs; 1969fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 1970fbf59bc9STejun Heo } 1971fbf59bc9STejun Heo 1972fbf59bc9STejun Heo /* 1973fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 1974fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 1975fbf59bc9STejun Heo * passes the following check. 1976fbf59bc9STejun Heo */ 1977fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 1978fbf59bc9STejun Heo continue; 1979fbf59bc9STejun Heo 1980fbf59bc9STejun Heo /* and then don't consume more memory */ 1981fbf59bc9STejun Heo if (allocs > last_allocs) 1982fbf59bc9STejun Heo break; 1983fbf59bc9STejun Heo last_allocs = allocs; 1984fbf59bc9STejun Heo best_upa = upa; 1985fbf59bc9STejun Heo } 1986fbf59bc9STejun Heo upa = best_upa; 1987fbf59bc9STejun Heo 1988fbf59bc9STejun Heo /* allocate and fill alloc_info */ 1989fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 1990fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 1991fbf59bc9STejun Heo 1992fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1993fbf59bc9STejun Heo if (!ai) 1994fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 1995fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 1996fbf59bc9STejun Heo 1997fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 1998fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 1999fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 2000fbf59bc9STejun Heo } 2001fbf59bc9STejun Heo 2002fbf59bc9STejun Heo ai->static_size = static_size; 2003fbf59bc9STejun Heo ai->reserved_size = reserved_size; 2004fbf59bc9STejun Heo ai->dyn_size = dyn_size; 2005fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 2006fbf59bc9STejun Heo ai->atom_size = atom_size; 2007fbf59bc9STejun Heo ai->alloc_size = alloc_size; 2008fbf59bc9STejun Heo 2009fbf59bc9STejun Heo for (group = 0, unit = 0; group_cnt[group]; group++) { 2010fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2011fbf59bc9STejun Heo 2012fbf59bc9STejun Heo /* 2013fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 2014fbf59bc9STejun Heo * back-to-back. The caller should update this to 2015fbf59bc9STejun Heo * reflect actual allocation. 2016fbf59bc9STejun Heo */ 2017fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 2018fbf59bc9STejun Heo 2019fbf59bc9STejun Heo for_each_possible_cpu(cpu) 2020fbf59bc9STejun Heo if (group_map[cpu] == group) 2021fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 2022fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 2023fbf59bc9STejun Heo unit += gi->nr_units; 2024fbf59bc9STejun Heo } 2025fbf59bc9STejun Heo BUG_ON(unit != nr_units); 2026fbf59bc9STejun Heo 2027fbf59bc9STejun Heo return ai; 2028fbf59bc9STejun Heo } 20293c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 2030fbf59bc9STejun Heo 20313c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 203266c3a757STejun Heo /** 203366c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 203466c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 20354ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2036c8826dd5STejun Heo * @atom_size: allocation atom size 2037c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2038c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 203925985edcSLucas De Marchi * @free_fn: function to free percpu page 204066c3a757STejun Heo * 204166c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 204266c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 204366c3a757STejun Heo * 204466c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 2045c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 2046c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 2047c8826dd5STejun Heo * aligned to @atom_size. 2048c8826dd5STejun Heo * 2049c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 2050c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 2051c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 2052c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 2053c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 2054c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 205566c3a757STejun Heo * 20564ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 205766c3a757STejun Heo * 205866c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 2059c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 206066c3a757STejun Heo * 206166c3a757STejun Heo * RETURNS: 2062fb435d52STejun Heo * 0 on success, -errno on failure. 206366c3a757STejun Heo */ 20644ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 2065c8826dd5STejun Heo size_t atom_size, 2066c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 2067c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2068c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 206966c3a757STejun Heo { 2070c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 2071c8826dd5STejun Heo void **areas = NULL; 2072fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 207393c76b6bSzijun_hu size_t size_sum, areas_size; 207493c76b6bSzijun_hu unsigned long max_distance; 20759b739662Szijun_hu int group, i, highest_group, rc; 207666c3a757STejun Heo 2077c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 2078c8826dd5STejun Heo cpu_distance_fn); 2079fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2080fd1e8a1fSTejun Heo return PTR_ERR(ai); 208166c3a757STejun Heo 2082fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2083c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 208466c3a757STejun Heo 2085999c17e3SSantosh Shilimkar areas = memblock_virt_alloc_nopanic(areas_size, 0); 2086c8826dd5STejun Heo if (!areas) { 2087fb435d52STejun Heo rc = -ENOMEM; 2088c8826dd5STejun Heo goto out_free; 2089fa8a7094STejun Heo } 209066c3a757STejun Heo 20919b739662Szijun_hu /* allocate, copy and determine base address & max_distance */ 20929b739662Szijun_hu highest_group = 0; 2093c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2094c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2095c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 2096c8826dd5STejun Heo void *ptr; 209766c3a757STejun Heo 2098c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 2099c8826dd5STejun Heo cpu = gi->cpu_map[i]; 2100c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 2101c8826dd5STejun Heo 2102c8826dd5STejun Heo /* allocate space for the whole group */ 2103c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 2104c8826dd5STejun Heo if (!ptr) { 2105c8826dd5STejun Heo rc = -ENOMEM; 2106c8826dd5STejun Heo goto out_free_areas; 2107c8826dd5STejun Heo } 2108f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2109f528f0b8SCatalin Marinas kmemleak_free(ptr); 2110c8826dd5STejun Heo areas[group] = ptr; 2111c8826dd5STejun Heo 2112c8826dd5STejun Heo base = min(ptr, base); 21139b739662Szijun_hu if (ptr > areas[highest_group]) 21149b739662Szijun_hu highest_group = group; 21159b739662Szijun_hu } 21169b739662Szijun_hu max_distance = areas[highest_group] - base; 21179b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 21189b739662Szijun_hu 21199b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */ 21209b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) { 21219b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 21229b739662Szijun_hu max_distance, VMALLOC_TOTAL); 21239b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 21249b739662Szijun_hu /* and fail if we have fallback */ 21259b739662Szijun_hu rc = -EINVAL; 21269b739662Szijun_hu goto out_free_areas; 21279b739662Szijun_hu #endif 212842b64281STejun Heo } 212942b64281STejun Heo 213042b64281STejun Heo /* 213142b64281STejun Heo * Copy data and free unused parts. This should happen after all 213242b64281STejun Heo * allocations are complete; otherwise, we may end up with 213342b64281STejun Heo * overlapping groups. 213442b64281STejun Heo */ 213542b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 213642b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 213742b64281STejun Heo void *ptr = areas[group]; 2138c8826dd5STejun Heo 2139c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 2140c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 2141c8826dd5STejun Heo /* unused unit, free whole */ 2142c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 2143c8826dd5STejun Heo continue; 2144c8826dd5STejun Heo } 2145c8826dd5STejun Heo /* copy and return the unused part */ 2146fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 2147c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 2148c8826dd5STejun Heo } 214966c3a757STejun Heo } 215066c3a757STejun Heo 2151c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 21526ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2153c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 21546ea529a2STejun Heo } 2155c8826dd5STejun Heo 2156870d4b12SJoe Perches pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 2157fd1e8a1fSTejun Heo PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 2158fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 215966c3a757STejun Heo 2160fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, base); 2161c8826dd5STejun Heo goto out_free; 2162c8826dd5STejun Heo 2163c8826dd5STejun Heo out_free_areas: 2164c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 2165f851c8d8SMichael Holzheu if (areas[group]) 2166c8826dd5STejun Heo free_fn(areas[group], 2167c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 2168c8826dd5STejun Heo out_free: 2169fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2170c8826dd5STejun Heo if (areas) 2171999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 2172fb435d52STejun Heo return rc; 2173d4b95f80STejun Heo } 21743c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 2175d4b95f80STejun Heo 21763c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 2177d4b95f80STejun Heo /** 217800ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2179d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2180d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 218125985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 2182d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 2183d4b95f80STejun Heo * 218400ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 218500ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 2186d4b95f80STejun Heo * 2187d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 2188d4b95f80STejun Heo * page-by-page into vmalloc area. 2189d4b95f80STejun Heo * 2190d4b95f80STejun Heo * RETURNS: 2191fb435d52STejun Heo * 0 on success, -errno on failure. 2192d4b95f80STejun Heo */ 2193fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 2194d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2195d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 2196d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 2197d4b95f80STejun Heo { 21988f05a6a6STejun Heo static struct vm_struct vm; 2199fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 220000ae4064STejun Heo char psize_str[16]; 2201ce3141a2STejun Heo int unit_pages; 2202d4b95f80STejun Heo size_t pages_size; 2203ce3141a2STejun Heo struct page **pages; 2204fb435d52STejun Heo int unit, i, j, rc; 22058f606604Szijun_hu int upa; 22068f606604Szijun_hu int nr_g0_units; 2207d4b95f80STejun Heo 220800ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 220900ae4064STejun Heo 22104ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 2211fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2212fd1e8a1fSTejun Heo return PTR_ERR(ai); 2213fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 22148f606604Szijun_hu upa = ai->alloc_size/ai->unit_size; 22158f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa); 22168f606604Szijun_hu if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) { 22178f606604Szijun_hu pcpu_free_alloc_info(ai); 22188f606604Szijun_hu return -EINVAL; 22198f606604Szijun_hu } 2220fd1e8a1fSTejun Heo 2221fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 2222d4b95f80STejun Heo 2223d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 2224fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2225fd1e8a1fSTejun Heo sizeof(pages[0])); 2226999c17e3SSantosh Shilimkar pages = memblock_virt_alloc(pages_size, 0); 2227d4b95f80STejun Heo 22288f05a6a6STejun Heo /* allocate pages */ 2229d4b95f80STejun Heo j = 0; 22308f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) { 2231fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 22328f606604Szijun_hu for (i = 0; i < unit_pages; i++) { 2233d4b95f80STejun Heo void *ptr; 2234d4b95f80STejun Heo 22353cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2236d4b95f80STejun Heo if (!ptr) { 2237870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n", 2238598d8091SJoe Perches psize_str, cpu); 2239d4b95f80STejun Heo goto enomem; 2240d4b95f80STejun Heo } 2241f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2242f528f0b8SCatalin Marinas kmemleak_free(ptr); 2243ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 2244d4b95f80STejun Heo } 22458f606604Szijun_hu } 2246d4b95f80STejun Heo 22478f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 22488f05a6a6STejun Heo vm.flags = VM_ALLOC; 2249fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 22508f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 22518f05a6a6STejun Heo 2252fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 22531d9d3257STejun Heo unsigned long unit_addr = 2254fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 22558f05a6a6STejun Heo 2256ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 22578f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 22588f05a6a6STejun Heo 22598f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 2260fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2261ce3141a2STejun Heo unit_pages); 2262fb435d52STejun Heo if (rc < 0) 2263fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 22648f05a6a6STejun Heo 22658f05a6a6STejun Heo /* 22668f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 22678f05a6a6STejun Heo * cache for the linear mapping here - something 22688f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 22698f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 22708f05a6a6STejun Heo * data structures are not set up yet. 22718f05a6a6STejun Heo */ 22728f05a6a6STejun Heo 22738f05a6a6STejun Heo /* copy static data */ 2274fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 227566c3a757STejun Heo } 227666c3a757STejun Heo 227766c3a757STejun Heo /* we're ready, commit */ 2278870d4b12SJoe Perches pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n", 2279fd1e8a1fSTejun Heo unit_pages, psize_str, vm.addr, ai->static_size, 2280fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 228166c3a757STejun Heo 2282fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, vm.addr); 2283d4b95f80STejun Heo goto out_free_ar; 2284d4b95f80STejun Heo 2285d4b95f80STejun Heo enomem: 2286d4b95f80STejun Heo while (--j >= 0) 2287ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 2288fb435d52STejun Heo rc = -ENOMEM; 2289d4b95f80STejun Heo out_free_ar: 2290999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 2291fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2292fb435d52STejun Heo return rc; 229366c3a757STejun Heo } 22943c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 2295d4b95f80STejun Heo 2296bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 22978c4bfc6eSTejun Heo /* 2298bbddff05STejun Heo * Generic SMP percpu area setup. 2299e74e3962STejun Heo * 2300e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 2301e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 2302e74e3962STejun Heo * important because many archs have addressing restrictions and might 2303e74e3962STejun Heo * fail if the percpu area is located far away from the previous 2304e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 2305e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 2306e74e3962STejun Heo * on the physical linear memory mapping which uses large page 2307e74e3962STejun Heo * mappings on applicable archs. 2308e74e3962STejun Heo */ 2309e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2310e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 2311e74e3962STejun Heo 2312c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2313c8826dd5STejun Heo size_t align) 2314c8826dd5STejun Heo { 2315999c17e3SSantosh Shilimkar return memblock_virt_alloc_from_nopanic( 2316999c17e3SSantosh Shilimkar size, align, __pa(MAX_DMA_ADDRESS)); 2317c8826dd5STejun Heo } 2318c8826dd5STejun Heo 2319c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2320c8826dd5STejun Heo { 2321999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 2322c8826dd5STejun Heo } 2323c8826dd5STejun Heo 2324e74e3962STejun Heo void __init setup_per_cpu_areas(void) 2325e74e3962STejun Heo { 2326e74e3962STejun Heo unsigned long delta; 2327e74e3962STejun Heo unsigned int cpu; 2328fb435d52STejun Heo int rc; 2329e74e3962STejun Heo 2330e74e3962STejun Heo /* 2331e74e3962STejun Heo * Always reserve area for module percpu variables. That's 2332e74e3962STejun Heo * what the legacy allocator did. 2333e74e3962STejun Heo */ 2334fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2335c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2336c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2337fb435d52STejun Heo if (rc < 0) 2338bbddff05STejun Heo panic("Failed to initialize percpu areas."); 2339e74e3962STejun Heo 2340e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2341e74e3962STejun Heo for_each_possible_cpu(cpu) 2342fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2343e74e3962STejun Heo } 2344e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2345099a19d9STejun Heo 2346bbddff05STejun Heo #else /* CONFIG_SMP */ 2347bbddff05STejun Heo 2348bbddff05STejun Heo /* 2349bbddff05STejun Heo * UP percpu area setup. 2350bbddff05STejun Heo * 2351bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 2352bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 2353bbddff05STejun Heo * variables and don't require any special preparation. 2354bbddff05STejun Heo */ 2355bbddff05STejun Heo void __init setup_per_cpu_areas(void) 2356bbddff05STejun Heo { 2357bbddff05STejun Heo const size_t unit_size = 2358bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 2359bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 2360bbddff05STejun Heo struct pcpu_alloc_info *ai; 2361bbddff05STejun Heo void *fc; 2362bbddff05STejun Heo 2363bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 2364999c17e3SSantosh Shilimkar fc = memblock_virt_alloc_from_nopanic(unit_size, 2365999c17e3SSantosh Shilimkar PAGE_SIZE, 2366999c17e3SSantosh Shilimkar __pa(MAX_DMA_ADDRESS)); 2367bbddff05STejun Heo if (!ai || !fc) 2368bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 2369100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2370100d13c3SCatalin Marinas kmemleak_free(fc); 2371bbddff05STejun Heo 2372bbddff05STejun Heo ai->dyn_size = unit_size; 2373bbddff05STejun Heo ai->unit_size = unit_size; 2374bbddff05STejun Heo ai->atom_size = unit_size; 2375bbddff05STejun Heo ai->alloc_size = unit_size; 2376bbddff05STejun Heo ai->groups[0].nr_units = 1; 2377bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 2378bbddff05STejun Heo 2379bbddff05STejun Heo if (pcpu_setup_first_chunk(ai, fc) < 0) 2380bbddff05STejun Heo panic("Failed to initialize percpu areas."); 2381bbddff05STejun Heo } 2382bbddff05STejun Heo 2383bbddff05STejun Heo #endif /* CONFIG_SMP */ 2384bbddff05STejun Heo 2385099a19d9STejun Heo /* 2386099a19d9STejun Heo * First and reserved chunks are initialized with temporary allocation 2387099a19d9STejun Heo * map in initdata so that they can be used before slab is online. 2388099a19d9STejun Heo * This function is called after slab is brought up and replaces those 2389099a19d9STejun Heo * with properly allocated maps. 2390099a19d9STejun Heo */ 2391099a19d9STejun Heo void __init percpu_init_late(void) 2392099a19d9STejun Heo { 2393099a19d9STejun Heo struct pcpu_chunk *target_chunks[] = 2394099a19d9STejun Heo { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; 2395099a19d9STejun Heo struct pcpu_chunk *chunk; 2396099a19d9STejun Heo unsigned long flags; 2397099a19d9STejun Heo int i; 2398099a19d9STejun Heo 2399099a19d9STejun Heo for (i = 0; (chunk = target_chunks[i]); i++) { 2400099a19d9STejun Heo int *map; 2401099a19d9STejun Heo const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); 2402099a19d9STejun Heo 2403099a19d9STejun Heo BUILD_BUG_ON(size > PAGE_SIZE); 2404099a19d9STejun Heo 240590459ce0SBob Liu map = pcpu_mem_zalloc(size); 2406099a19d9STejun Heo BUG_ON(!map); 2407099a19d9STejun Heo 2408099a19d9STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 2409099a19d9STejun Heo memcpy(map, chunk->map, size); 2410099a19d9STejun Heo chunk->map = map; 2411099a19d9STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 2412099a19d9STejun Heo } 2413099a19d9STejun Heo } 24141a4d7607STejun Heo 24151a4d7607STejun Heo /* 24161a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or 24171a4d7607STejun Heo * workqueue is available. Plug async management until everything is up 24181a4d7607STejun Heo * and running. 24191a4d7607STejun Heo */ 24201a4d7607STejun Heo static int __init percpu_enable_async(void) 24211a4d7607STejun Heo { 24221a4d7607STejun Heo pcpu_async_enabled = true; 24231a4d7607STejun Heo return 0; 24241a4d7607STejun Heo } 24251a4d7607STejun Heo subsys_initcall(percpu_enable_async); 2426