155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2fbf59bc9STejun Heo /* 388999a89STejun Heo * mm/percpu.c - percpu memory allocator 4fbf59bc9STejun Heo * 5fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 6fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 7fbf59bc9STejun Heo * 85e81ee3eSDennis Zhou (Facebook) * Copyright (C) 2017 Facebook Inc. 9bfacd38fSDennis Zhou * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> 105e81ee3eSDennis Zhou (Facebook) * 119c015162SDennis Zhou (Facebook) * The percpu allocator handles both static and dynamic areas. Percpu 129c015162SDennis Zhou (Facebook) * areas are allocated in chunks which are divided into units. There is 139c015162SDennis Zhou (Facebook) * a 1-to-1 mapping for units to possible cpus. These units are grouped 149c015162SDennis Zhou (Facebook) * based on NUMA properties of the machine. 15fbf59bc9STejun Heo * 16fbf59bc9STejun Heo * c0 c1 c2 17fbf59bc9STejun Heo * ------------------- ------------------- ------------ 18fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 19fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 20fbf59bc9STejun Heo * 219c015162SDennis Zhou (Facebook) * Allocation is done by offsets into a unit's address space. Ie., an 229c015162SDennis Zhou (Facebook) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 239c015162SDennis Zhou (Facebook) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 249c015162SDennis Zhou (Facebook) * and even sparse. Access is handled by configuring percpu base 259c015162SDennis Zhou (Facebook) * registers according to the cpu to unit mappings and offsetting the 269c015162SDennis Zhou (Facebook) * base address using pcpu_unit_size. 27fbf59bc9STejun Heo * 289c015162SDennis Zhou (Facebook) * There is special consideration for the first chunk which must handle 299c015162SDennis Zhou (Facebook) * the static percpu variables in the kernel image as allocation services 305e81ee3eSDennis Zhou (Facebook) * are not online yet. In short, the first chunk is structured like so: 319c015162SDennis Zhou (Facebook) * 329c015162SDennis Zhou (Facebook) * <Static | [Reserved] | Dynamic> 339c015162SDennis Zhou (Facebook) * 349c015162SDennis Zhou (Facebook) * The static data is copied from the original section managed by the 359c015162SDennis Zhou (Facebook) * linker. The reserved section, if non-zero, primarily manages static 369c015162SDennis Zhou (Facebook) * percpu variables from kernel modules. Finally, the dynamic section 379c015162SDennis Zhou (Facebook) * takes care of normal allocations. 38fbf59bc9STejun Heo * 395e81ee3eSDennis Zhou (Facebook) * The allocator organizes chunks into lists according to free size and 403c7be18aSRoman Gushchin * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT 413c7be18aSRoman Gushchin * flag should be passed. All memcg-aware allocations are sharing one set 423c7be18aSRoman Gushchin * of chunks and all unaccounted allocations and allocations performed 433c7be18aSRoman Gushchin * by processes belonging to the root memory cgroup are using the second set. 443c7be18aSRoman Gushchin * 453c7be18aSRoman Gushchin * The allocator tries to allocate from the fullest chunk first. Each chunk 463c7be18aSRoman Gushchin * is managed by a bitmap with metadata blocks. The allocation map is updated 473c7be18aSRoman Gushchin * on every allocation and free to reflect the current state while the boundary 485e81ee3eSDennis Zhou (Facebook) * map is only updated on allocation. Each metadata block contains 495e81ee3eSDennis Zhou (Facebook) * information to help mitigate the need to iterate over large portions 505e81ee3eSDennis Zhou (Facebook) * of the bitmap. The reverse mapping from page to chunk is stored in 515e81ee3eSDennis Zhou (Facebook) * the page's index. Lastly, units are lazily backed and grow in unison. 52fbf59bc9STejun Heo * 535e81ee3eSDennis Zhou (Facebook) * There is a unique conversion that goes on here between bytes and bits. 545e81ee3eSDennis Zhou (Facebook) * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 555e81ee3eSDennis Zhou (Facebook) * tracks the number of pages it is responsible for in nr_pages. Helper 565e81ee3eSDennis Zhou (Facebook) * functions are used to convert from between the bytes, bits, and blocks. 575e81ee3eSDennis Zhou (Facebook) * All hints are managed in bits unless explicitly stated. 589c015162SDennis Zhou (Facebook) * 594091fb95SMasahiro Yamada * To use this allocator, arch code should do the following: 60fbf59bc9STejun Heo * 61fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 62e0100983STejun Heo * regular address to percpu pointer and back if they need to be 63e0100983STejun Heo * different from the default 64fbf59bc9STejun Heo * 658d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 668d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 67fbf59bc9STejun Heo */ 68fbf59bc9STejun Heo 69870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 70870d4b12SJoe Perches 71fbf59bc9STejun Heo #include <linux/bitmap.h> 72d7d29ac7SWonhyuk Yang #include <linux/cpumask.h> 7357c8a661SMike Rapoport #include <linux/memblock.h> 74fd1e8a1fSTejun Heo #include <linux/err.h> 75ca460b3cSDennis Zhou (Facebook) #include <linux/lcm.h> 76fbf59bc9STejun Heo #include <linux/list.h> 77a530b795STejun Heo #include <linux/log2.h> 78fbf59bc9STejun Heo #include <linux/mm.h> 79fbf59bc9STejun Heo #include <linux/module.h> 80fbf59bc9STejun Heo #include <linux/mutex.h> 81fbf59bc9STejun Heo #include <linux/percpu.h> 82fbf59bc9STejun Heo #include <linux/pfn.h> 83fbf59bc9STejun Heo #include <linux/slab.h> 84ccea34b5STejun Heo #include <linux/spinlock.h> 85fbf59bc9STejun Heo #include <linux/vmalloc.h> 86a56dbddfSTejun Heo #include <linux/workqueue.h> 87f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 8871546d10STejun Heo #include <linux/sched.h> 8928307d93SFilipe Manana #include <linux/sched/mm.h> 903c7be18aSRoman Gushchin #include <linux/memcontrol.h> 91fbf59bc9STejun Heo 92fbf59bc9STejun Heo #include <asm/cacheflush.h> 93e0100983STejun Heo #include <asm/sections.h> 94fbf59bc9STejun Heo #include <asm/tlbflush.h> 953b034b0dSVivek Goyal #include <asm/io.h> 96fbf59bc9STejun Heo 97df95e795SDennis Zhou #define CREATE_TRACE_POINTS 98df95e795SDennis Zhou #include <trace/events/percpu.h> 99df95e795SDennis Zhou 1008fa3ed80SDennis Zhou #include "percpu-internal.h" 1018fa3ed80SDennis Zhou 102*ac9380f6SRoman Gushchin /* 103*ac9380f6SRoman Gushchin * The slots are sorted by the size of the biggest continuous free area. 104*ac9380f6SRoman Gushchin * 1-31 bytes share the same slot. 105*ac9380f6SRoman Gushchin */ 10640064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT 5 1078744d859SDennis Zhou /* chunks in slots below this are subject to being sidelined on failed alloc */ 1088744d859SDennis Zhou #define PCPU_SLOT_FAIL_THRESHOLD 3 10940064aecSDennis Zhou (Facebook) 1101a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2 1111a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4 112fbf59bc9STejun Heo 113bbddff05STejun Heo #ifdef CONFIG_SMP 114e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 115e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 116e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 11743cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 11843cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 11943cf38ebSTejun Heo (unsigned long)__per_cpu_start) 120e0100983STejun Heo #endif 121e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 122e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 12343cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 12443cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 12543cf38ebSTejun Heo (unsigned long)__per_cpu_start) 126e0100983STejun Heo #endif 127bbddff05STejun Heo #else /* CONFIG_SMP */ 128bbddff05STejun Heo /* on UP, it's always identity mapped */ 129bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 130bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 131bbddff05STejun Heo #endif /* CONFIG_SMP */ 132e0100983STejun Heo 1331328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init; 1341328710bSDaniel Micay static int pcpu_unit_size __ro_after_init; 1351328710bSDaniel Micay static int pcpu_nr_units __ro_after_init; 1361328710bSDaniel Micay static int pcpu_atom_size __ro_after_init; 1378fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init; 1381328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init; 139fbf59bc9STejun Heo 140a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 1411328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init; 1421328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init; 1432f39e637STejun Heo 144fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 1451328710bSDaniel Micay void *pcpu_base_addr __ro_after_init; 146fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 147fbf59bc9STejun Heo 1481328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 1491328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 1502f39e637STejun Heo 1516563297cSTejun Heo /* group information, used for vm allocation */ 1521328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init; 1531328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init; 1541328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init; 1556563297cSTejun Heo 156ae9e6bc9STejun Heo /* 157ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 158ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 159ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 160ae9e6bc9STejun Heo */ 1618fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 162ae9e6bc9STejun Heo 163ae9e6bc9STejun Heo /* 164ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 165e2266705SDennis Zhou (Facebook) * chunk and serves it for reserved allocations. When the reserved 166e2266705SDennis Zhou (Facebook) * region doesn't exist, the following variable is NULL. 167ae9e6bc9STejun Heo */ 1688fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 169edcb4639STejun Heo 1708fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 1716710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 172fbf59bc9STejun Heo 1733c7be18aSRoman Gushchin struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ 174fbf59bc9STejun Heo 1754f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */ 1764f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks); 1774f996e23STejun Heo 178b539b87fSTejun Heo /* 1790760fa3dSRoman Gushchin * The number of empty populated pages by chunk type, protected by pcpu_lock. 1800760fa3dSRoman Gushchin * The reserved chunk doesn't contribute to the count. 181b539b87fSTejun Heo */ 1820760fa3dSRoman Gushchin int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES]; 183b539b87fSTejun Heo 1841a4d7607STejun Heo /* 1857e8a6304SDennis Zhou (Facebook) * The number of populated pages in use by the allocator, protected by 1867e8a6304SDennis Zhou (Facebook) * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets 1877e8a6304SDennis Zhou (Facebook) * allocated/deallocated, it is allocated/deallocated in all units of a chunk 1887e8a6304SDennis Zhou (Facebook) * and increments/decrements this count by 1). 1897e8a6304SDennis Zhou (Facebook) */ 1907e8a6304SDennis Zhou (Facebook) static unsigned long pcpu_nr_populated; 1917e8a6304SDennis Zhou (Facebook) 1927e8a6304SDennis Zhou (Facebook) /* 1931a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We 1941a4d7607STejun Heo * try to keep the number of populated free pages between 1951a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 1961a4d7607STejun Heo * empty chunk. 1971a4d7607STejun Heo */ 198fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 199fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 2001a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly; 2011a4d7607STejun Heo static bool pcpu_atomic_alloc_failed; 2021a4d7607STejun Heo 2031a4d7607STejun Heo static void pcpu_schedule_balance_work(void) 2041a4d7607STejun Heo { 2051a4d7607STejun Heo if (pcpu_async_enabled) 2061a4d7607STejun Heo schedule_work(&pcpu_balance_work); 2071a4d7607STejun Heo } 208a56dbddfSTejun Heo 209c0ebfdc3SDennis Zhou (Facebook) /** 210560f2c23SDennis Zhou (Facebook) * pcpu_addr_in_chunk - check if the address is served from this chunk 211560f2c23SDennis Zhou (Facebook) * @chunk: chunk of interest 212560f2c23SDennis Zhou (Facebook) * @addr: percpu address 213c0ebfdc3SDennis Zhou (Facebook) * 214c0ebfdc3SDennis Zhou (Facebook) * RETURNS: 215560f2c23SDennis Zhou (Facebook) * True if the address is served from this chunk. 216c0ebfdc3SDennis Zhou (Facebook) */ 217560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) 218020ec653STejun Heo { 219c0ebfdc3SDennis Zhou (Facebook) void *start_addr, *end_addr; 220020ec653STejun Heo 221560f2c23SDennis Zhou (Facebook) if (!chunk) 222c0ebfdc3SDennis Zhou (Facebook) return false; 223c0ebfdc3SDennis Zhou (Facebook) 224560f2c23SDennis Zhou (Facebook) start_addr = chunk->base_addr + chunk->start_offset; 225560f2c23SDennis Zhou (Facebook) end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - 226560f2c23SDennis Zhou (Facebook) chunk->end_offset; 227c0ebfdc3SDennis Zhou (Facebook) 228c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr; 229020ec653STejun Heo } 230020ec653STejun Heo 231d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 232fbf59bc9STejun Heo { 233cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 234fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 235fbf59bc9STejun Heo } 236fbf59bc9STejun Heo 237d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 238d9b55eebSTejun Heo { 239d9b55eebSTejun Heo if (size == pcpu_unit_size) 240d9b55eebSTejun Heo return pcpu_nr_slots - 1; 241d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 242d9b55eebSTejun Heo } 243d9b55eebSTejun Heo 244fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 245fbf59bc9STejun Heo { 24692c14cabSDennis Zhou const struct pcpu_block_md *chunk_md = &chunk->chunk_md; 24792c14cabSDennis Zhou 24892c14cabSDennis Zhou if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || 24992c14cabSDennis Zhou chunk_md->contig_hint == 0) 250fbf59bc9STejun Heo return 0; 251fbf59bc9STejun Heo 25292c14cabSDennis Zhou return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); 253fbf59bc9STejun Heo } 254fbf59bc9STejun Heo 25588999a89STejun Heo /* set the pointer to a chunk in a page struct */ 25688999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 25788999a89STejun Heo { 25888999a89STejun Heo page->index = (unsigned long)pcpu; 25988999a89STejun Heo } 26088999a89STejun Heo 26188999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 26288999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 26388999a89STejun Heo { 26488999a89STejun Heo return (struct pcpu_chunk *)page->index; 26588999a89STejun Heo } 26688999a89STejun Heo 26788999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 268fbf59bc9STejun Heo { 2692f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 270fbf59bc9STejun Heo } 271fbf59bc9STejun Heo 272c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 273c0ebfdc3SDennis Zhou (Facebook) { 274c0ebfdc3SDennis Zhou (Facebook) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 275c0ebfdc3SDennis Zhou (Facebook) } 276c0ebfdc3SDennis Zhou (Facebook) 2779983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 278fbf59bc9STejun Heo unsigned int cpu, int page_idx) 279fbf59bc9STejun Heo { 280c0ebfdc3SDennis Zhou (Facebook) return (unsigned long)chunk->base_addr + 281c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(cpu, page_idx); 282fbf59bc9STejun Heo } 283fbf59bc9STejun Heo 284ca460b3cSDennis Zhou (Facebook) /* 285ca460b3cSDennis Zhou (Facebook) * The following are helper functions to help access bitmaps and convert 286ca460b3cSDennis Zhou (Facebook) * between bitmap offsets to address offsets. 287ca460b3cSDennis Zhou (Facebook) */ 288ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) 289ca460b3cSDennis Zhou (Facebook) { 290ca460b3cSDennis Zhou (Facebook) return chunk->alloc_map + 291ca460b3cSDennis Zhou (Facebook) (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); 292ca460b3cSDennis Zhou (Facebook) } 293ca460b3cSDennis Zhou (Facebook) 294ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off) 295ca460b3cSDennis Zhou (Facebook) { 296ca460b3cSDennis Zhou (Facebook) return off / PCPU_BITMAP_BLOCK_BITS; 297ca460b3cSDennis Zhou (Facebook) } 298ca460b3cSDennis Zhou (Facebook) 299ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off) 300ca460b3cSDennis Zhou (Facebook) { 301ca460b3cSDennis Zhou (Facebook) return off & (PCPU_BITMAP_BLOCK_BITS - 1); 302ca460b3cSDennis Zhou (Facebook) } 303ca460b3cSDennis Zhou (Facebook) 304b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off) 305b185cd0dSDennis Zhou (Facebook) { 306b185cd0dSDennis Zhou (Facebook) return index * PCPU_BITMAP_BLOCK_BITS + off; 307b185cd0dSDennis Zhou (Facebook) } 308b185cd0dSDennis Zhou (Facebook) 309382b88e9SDennis Zhou /* 310382b88e9SDennis Zhou * pcpu_next_hint - determine which hint to use 311382b88e9SDennis Zhou * @block: block of interest 312382b88e9SDennis Zhou * @alloc_bits: size of allocation 313382b88e9SDennis Zhou * 314382b88e9SDennis Zhou * This determines if we should scan based on the scan_hint or first_free. 315382b88e9SDennis Zhou * In general, we want to scan from first_free to fulfill allocations by 316382b88e9SDennis Zhou * first fit. However, if we know a scan_hint at position scan_hint_start 317382b88e9SDennis Zhou * cannot fulfill an allocation, we can begin scanning from there knowing 318382b88e9SDennis Zhou * the contig_hint will be our fallback. 319382b88e9SDennis Zhou */ 320382b88e9SDennis Zhou static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) 321382b88e9SDennis Zhou { 322382b88e9SDennis Zhou /* 323382b88e9SDennis Zhou * The three conditions below determine if we can skip past the 324382b88e9SDennis Zhou * scan_hint. First, does the scan hint exist. Second, is the 325382b88e9SDennis Zhou * contig_hint after the scan_hint (possibly not true iff 326382b88e9SDennis Zhou * contig_hint == scan_hint). Third, is the allocation request 327382b88e9SDennis Zhou * larger than the scan_hint. 328382b88e9SDennis Zhou */ 329382b88e9SDennis Zhou if (block->scan_hint && 330382b88e9SDennis Zhou block->contig_hint_start > block->scan_hint_start && 331382b88e9SDennis Zhou alloc_bits > block->scan_hint) 332382b88e9SDennis Zhou return block->scan_hint_start + block->scan_hint; 333382b88e9SDennis Zhou 334382b88e9SDennis Zhou return block->first_free; 335382b88e9SDennis Zhou } 336382b88e9SDennis Zhou 337fbf59bc9STejun Heo /** 338525ca84dSDennis Zhou (Facebook) * pcpu_next_md_free_region - finds the next hint free area 339525ca84dSDennis Zhou (Facebook) * @chunk: chunk of interest 340525ca84dSDennis Zhou (Facebook) * @bit_off: chunk offset 341525ca84dSDennis Zhou (Facebook) * @bits: size of free area 342525ca84dSDennis Zhou (Facebook) * 343525ca84dSDennis Zhou (Facebook) * Helper function for pcpu_for_each_md_free_region. It checks 344525ca84dSDennis Zhou (Facebook) * block->contig_hint and performs aggregation across blocks to find the 345525ca84dSDennis Zhou (Facebook) * next hint. It modifies bit_off and bits in-place to be consumed in the 346525ca84dSDennis Zhou (Facebook) * loop. 347525ca84dSDennis Zhou (Facebook) */ 348525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, 349525ca84dSDennis Zhou (Facebook) int *bits) 350525ca84dSDennis Zhou (Facebook) { 351525ca84dSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 352525ca84dSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 353525ca84dSDennis Zhou (Facebook) struct pcpu_block_md *block; 354525ca84dSDennis Zhou (Facebook) 355525ca84dSDennis Zhou (Facebook) *bits = 0; 356525ca84dSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 357525ca84dSDennis Zhou (Facebook) block++, i++) { 358525ca84dSDennis Zhou (Facebook) /* handles contig area across blocks */ 359525ca84dSDennis Zhou (Facebook) if (*bits) { 360525ca84dSDennis Zhou (Facebook) *bits += block->left_free; 361525ca84dSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 362525ca84dSDennis Zhou (Facebook) continue; 363525ca84dSDennis Zhou (Facebook) return; 364525ca84dSDennis Zhou (Facebook) } 365525ca84dSDennis Zhou (Facebook) 366525ca84dSDennis Zhou (Facebook) /* 367525ca84dSDennis Zhou (Facebook) * This checks three things. First is there a contig_hint to 368525ca84dSDennis Zhou (Facebook) * check. Second, have we checked this hint before by 369525ca84dSDennis Zhou (Facebook) * comparing the block_off. Third, is this the same as the 370525ca84dSDennis Zhou (Facebook) * right contig hint. In the last case, it spills over into 371525ca84dSDennis Zhou (Facebook) * the next block and should be handled by the contig area 372525ca84dSDennis Zhou (Facebook) * across blocks code. 373525ca84dSDennis Zhou (Facebook) */ 374525ca84dSDennis Zhou (Facebook) *bits = block->contig_hint; 375525ca84dSDennis Zhou (Facebook) if (*bits && block->contig_hint_start >= block_off && 376525ca84dSDennis Zhou (Facebook) *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { 377525ca84dSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, 378525ca84dSDennis Zhou (Facebook) block->contig_hint_start); 379525ca84dSDennis Zhou (Facebook) return; 380525ca84dSDennis Zhou (Facebook) } 3811fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 3821fa4df3eSDennis Zhou block_off = 0; 383525ca84dSDennis Zhou (Facebook) 384525ca84dSDennis Zhou (Facebook) *bits = block->right_free; 385525ca84dSDennis Zhou (Facebook) *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 386525ca84dSDennis Zhou (Facebook) } 387525ca84dSDennis Zhou (Facebook) } 388525ca84dSDennis Zhou (Facebook) 389b4c2116cSDennis Zhou (Facebook) /** 390b4c2116cSDennis Zhou (Facebook) * pcpu_next_fit_region - finds fit areas for a given allocation request 391b4c2116cSDennis Zhou (Facebook) * @chunk: chunk of interest 392b4c2116cSDennis Zhou (Facebook) * @alloc_bits: size of allocation 393b4c2116cSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 394b4c2116cSDennis Zhou (Facebook) * @bit_off: chunk offset 395b4c2116cSDennis Zhou (Facebook) * @bits: size of free area 396b4c2116cSDennis Zhou (Facebook) * 397b4c2116cSDennis Zhou (Facebook) * Finds the next free region that is viable for use with a given size and 398b4c2116cSDennis Zhou (Facebook) * alignment. This only returns if there is a valid area to be used for this 399b4c2116cSDennis Zhou (Facebook) * allocation. block->first_free is returned if the allocation request fits 400b4c2116cSDennis Zhou (Facebook) * within the block to see if the request can be fulfilled prior to the contig 401b4c2116cSDennis Zhou (Facebook) * hint. 402b4c2116cSDennis Zhou (Facebook) */ 403b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, 404b4c2116cSDennis Zhou (Facebook) int align, int *bit_off, int *bits) 405b4c2116cSDennis Zhou (Facebook) { 406b4c2116cSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 407b4c2116cSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 408b4c2116cSDennis Zhou (Facebook) struct pcpu_block_md *block; 409b4c2116cSDennis Zhou (Facebook) 410b4c2116cSDennis Zhou (Facebook) *bits = 0; 411b4c2116cSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 412b4c2116cSDennis Zhou (Facebook) block++, i++) { 413b4c2116cSDennis Zhou (Facebook) /* handles contig area across blocks */ 414b4c2116cSDennis Zhou (Facebook) if (*bits) { 415b4c2116cSDennis Zhou (Facebook) *bits += block->left_free; 416b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 417b4c2116cSDennis Zhou (Facebook) return; 418b4c2116cSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 419b4c2116cSDennis Zhou (Facebook) continue; 420b4c2116cSDennis Zhou (Facebook) } 421b4c2116cSDennis Zhou (Facebook) 422b4c2116cSDennis Zhou (Facebook) /* check block->contig_hint */ 423b4c2116cSDennis Zhou (Facebook) *bits = ALIGN(block->contig_hint_start, align) - 424b4c2116cSDennis Zhou (Facebook) block->contig_hint_start; 425b4c2116cSDennis Zhou (Facebook) /* 426b4c2116cSDennis Zhou (Facebook) * This uses the block offset to determine if this has been 427b4c2116cSDennis Zhou (Facebook) * checked in the prior iteration. 428b4c2116cSDennis Zhou (Facebook) */ 429b4c2116cSDennis Zhou (Facebook) if (block->contig_hint && 430b4c2116cSDennis Zhou (Facebook) block->contig_hint_start >= block_off && 431b4c2116cSDennis Zhou (Facebook) block->contig_hint >= *bits + alloc_bits) { 432382b88e9SDennis Zhou int start = pcpu_next_hint(block, alloc_bits); 433382b88e9SDennis Zhou 434b4c2116cSDennis Zhou (Facebook) *bits += alloc_bits + block->contig_hint_start - 435382b88e9SDennis Zhou start; 436382b88e9SDennis Zhou *bit_off = pcpu_block_off_to_off(i, start); 437b4c2116cSDennis Zhou (Facebook) return; 438b4c2116cSDennis Zhou (Facebook) } 4391fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 4401fa4df3eSDennis Zhou block_off = 0; 441b4c2116cSDennis Zhou (Facebook) 442b4c2116cSDennis Zhou (Facebook) *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 443b4c2116cSDennis Zhou (Facebook) align); 444b4c2116cSDennis Zhou (Facebook) *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; 445b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, *bit_off); 446b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 447b4c2116cSDennis Zhou (Facebook) return; 448b4c2116cSDennis Zhou (Facebook) } 449b4c2116cSDennis Zhou (Facebook) 450b4c2116cSDennis Zhou (Facebook) /* no valid offsets were found - fail condition */ 451b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_chunk_map_bits(chunk); 452b4c2116cSDennis Zhou (Facebook) } 453b4c2116cSDennis Zhou (Facebook) 454525ca84dSDennis Zhou (Facebook) /* 455525ca84dSDennis Zhou (Facebook) * Metadata free area iterators. These perform aggregation of free areas 456525ca84dSDennis Zhou (Facebook) * based on the metadata blocks and return the offset @bit_off and size in 457b4c2116cSDennis Zhou (Facebook) * bits of the free area @bits. pcpu_for_each_fit_region only returns when 458b4c2116cSDennis Zhou (Facebook) * a fit is found for the allocation request. 459525ca84dSDennis Zhou (Facebook) */ 460525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ 461525ca84dSDennis Zhou (Facebook) for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ 462525ca84dSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 463525ca84dSDennis Zhou (Facebook) (bit_off) += (bits) + 1, \ 464525ca84dSDennis Zhou (Facebook) pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) 465525ca84dSDennis Zhou (Facebook) 466b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ 467b4c2116cSDennis Zhou (Facebook) for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 468b4c2116cSDennis Zhou (Facebook) &(bits)); \ 469b4c2116cSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 470b4c2116cSDennis Zhou (Facebook) (bit_off) += (bits), \ 471b4c2116cSDennis Zhou (Facebook) pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 472b4c2116cSDennis Zhou (Facebook) &(bits))) 473b4c2116cSDennis Zhou (Facebook) 474525ca84dSDennis Zhou (Facebook) /** 47590459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 4761880d93bSTejun Heo * @size: bytes to allocate 47747504ee0SDennis Zhou * @gfp: allocation flags 478fbf59bc9STejun Heo * 4791880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 48047504ee0SDennis Zhou * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. 48147504ee0SDennis Zhou * This is to facilitate passing through whitelisted flags. The 48247504ee0SDennis Zhou * returned memory is always zeroed. 483fbf59bc9STejun Heo * 484fbf59bc9STejun Heo * RETURNS: 4851880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 486fbf59bc9STejun Heo */ 48747504ee0SDennis Zhou static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) 488fbf59bc9STejun Heo { 489099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 490099a19d9STejun Heo return NULL; 491099a19d9STejun Heo 492fbf59bc9STejun Heo if (size <= PAGE_SIZE) 493554fef1cSDennis Zhou return kzalloc(size, gfp); 4947af4c093SJesper Juhl else 49588dca4caSChristoph Hellwig return __vmalloc(size, gfp | __GFP_ZERO); 4961880d93bSTejun Heo } 497fbf59bc9STejun Heo 4981880d93bSTejun Heo /** 4991880d93bSTejun Heo * pcpu_mem_free - free memory 5001880d93bSTejun Heo * @ptr: memory to free 5011880d93bSTejun Heo * 50290459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 5031880d93bSTejun Heo */ 5041d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr) 5051880d93bSTejun Heo { 5061d5cfdb0STetsuo Handa kvfree(ptr); 507fbf59bc9STejun Heo } 508fbf59bc9STejun Heo 5098744d859SDennis Zhou static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, 5108744d859SDennis Zhou bool move_front) 5118744d859SDennis Zhou { 5128744d859SDennis Zhou if (chunk != pcpu_reserved_chunk) { 5133c7be18aSRoman Gushchin struct list_head *pcpu_slot; 5143c7be18aSRoman Gushchin 5153c7be18aSRoman Gushchin pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk)); 5168744d859SDennis Zhou if (move_front) 5178744d859SDennis Zhou list_move(&chunk->list, &pcpu_slot[slot]); 5188744d859SDennis Zhou else 5198744d859SDennis Zhou list_move_tail(&chunk->list, &pcpu_slot[slot]); 5208744d859SDennis Zhou } 5218744d859SDennis Zhou } 5228744d859SDennis Zhou 5238744d859SDennis Zhou static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) 5248744d859SDennis Zhou { 5258744d859SDennis Zhou __pcpu_chunk_move(chunk, slot, true); 5268744d859SDennis Zhou } 5278744d859SDennis Zhou 528fbf59bc9STejun Heo /** 529fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 530fbf59bc9STejun Heo * @chunk: chunk of interest 531fbf59bc9STejun Heo * @oslot: the previous slot it was on 532fbf59bc9STejun Heo * 533fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 534fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 535edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 536edcb4639STejun Heo * chunk slots. 537ccea34b5STejun Heo * 538ccea34b5STejun Heo * CONTEXT: 539ccea34b5STejun Heo * pcpu_lock. 540fbf59bc9STejun Heo */ 541fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 542fbf59bc9STejun Heo { 543fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 544fbf59bc9STejun Heo 5458744d859SDennis Zhou if (oslot != nslot) 5468744d859SDennis Zhou __pcpu_chunk_move(chunk, nslot, oslot < nslot); 54740064aecSDennis Zhou (Facebook) } 54840064aecSDennis Zhou (Facebook) 54940064aecSDennis Zhou (Facebook) /* 550b239f7daSDennis Zhou * pcpu_update_empty_pages - update empty page counters 551b239f7daSDennis Zhou * @chunk: chunk of interest 552b239f7daSDennis Zhou * @nr: nr of empty pages 55340064aecSDennis Zhou (Facebook) * 554b239f7daSDennis Zhou * This is used to keep track of the empty pages now based on the premise 555b239f7daSDennis Zhou * a md_block covers a page. The hint update functions recognize if a block 556b239f7daSDennis Zhou * is made full or broken to calculate deltas for keeping track of free pages. 55740064aecSDennis Zhou (Facebook) */ 558b239f7daSDennis Zhou static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) 559b239f7daSDennis Zhou { 560b239f7daSDennis Zhou chunk->nr_empty_pop_pages += nr; 56140064aecSDennis Zhou (Facebook) if (chunk != pcpu_reserved_chunk) 5620760fa3dSRoman Gushchin pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr; 56340064aecSDennis Zhou (Facebook) } 56440064aecSDennis Zhou (Facebook) 565d9f3a01eSDennis Zhou /* 566d9f3a01eSDennis Zhou * pcpu_region_overlap - determines if two regions overlap 567d9f3a01eSDennis Zhou * @a: start of first region, inclusive 568d9f3a01eSDennis Zhou * @b: end of first region, exclusive 569d9f3a01eSDennis Zhou * @x: start of second region, inclusive 570d9f3a01eSDennis Zhou * @y: end of second region, exclusive 571d9f3a01eSDennis Zhou * 572d9f3a01eSDennis Zhou * This is used to determine if the hint region [a, b) overlaps with the 573d9f3a01eSDennis Zhou * allocated region [x, y). 574d9f3a01eSDennis Zhou */ 575d9f3a01eSDennis Zhou static inline bool pcpu_region_overlap(int a, int b, int x, int y) 576d9f3a01eSDennis Zhou { 577d9f3a01eSDennis Zhou return (a < y) && (x < b); 57840064aecSDennis Zhou (Facebook) } 57940064aecSDennis Zhou (Facebook) 58040064aecSDennis Zhou (Facebook) /** 581ca460b3cSDennis Zhou (Facebook) * pcpu_block_update - updates a block given a free area 582ca460b3cSDennis Zhou (Facebook) * @block: block of interest 583ca460b3cSDennis Zhou (Facebook) * @start: start offset in block 584ca460b3cSDennis Zhou (Facebook) * @end: end offset in block 585ca460b3cSDennis Zhou (Facebook) * 586ca460b3cSDennis Zhou (Facebook) * Updates a block given a known free area. The region [start, end) is 587268625a6SDennis Zhou (Facebook) * expected to be the entirety of the free area within a block. Chooses 588268625a6SDennis Zhou (Facebook) * the best starting offset if the contig hints are equal. 589ca460b3cSDennis Zhou (Facebook) */ 590ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) 591ca460b3cSDennis Zhou (Facebook) { 592ca460b3cSDennis Zhou (Facebook) int contig = end - start; 593ca460b3cSDennis Zhou (Facebook) 594ca460b3cSDennis Zhou (Facebook) block->first_free = min(block->first_free, start); 595ca460b3cSDennis Zhou (Facebook) if (start == 0) 596ca460b3cSDennis Zhou (Facebook) block->left_free = contig; 597ca460b3cSDennis Zhou (Facebook) 598047924c9SDennis Zhou if (end == block->nr_bits) 599ca460b3cSDennis Zhou (Facebook) block->right_free = contig; 600ca460b3cSDennis Zhou (Facebook) 601ca460b3cSDennis Zhou (Facebook) if (contig > block->contig_hint) { 602382b88e9SDennis Zhou /* promote the old contig_hint to be the new scan_hint */ 603382b88e9SDennis Zhou if (start > block->contig_hint_start) { 604382b88e9SDennis Zhou if (block->contig_hint > block->scan_hint) { 605382b88e9SDennis Zhou block->scan_hint_start = 606382b88e9SDennis Zhou block->contig_hint_start; 607382b88e9SDennis Zhou block->scan_hint = block->contig_hint; 608382b88e9SDennis Zhou } else if (start < block->scan_hint_start) { 609382b88e9SDennis Zhou /* 610382b88e9SDennis Zhou * The old contig_hint == scan_hint. But, the 611382b88e9SDennis Zhou * new contig is larger so hold the invariant 612382b88e9SDennis Zhou * scan_hint_start < contig_hint_start. 613382b88e9SDennis Zhou */ 614382b88e9SDennis Zhou block->scan_hint = 0; 615382b88e9SDennis Zhou } 616382b88e9SDennis Zhou } else { 617382b88e9SDennis Zhou block->scan_hint = 0; 618382b88e9SDennis Zhou } 619ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = start; 620ca460b3cSDennis Zhou (Facebook) block->contig_hint = contig; 621382b88e9SDennis Zhou } else if (contig == block->contig_hint) { 622382b88e9SDennis Zhou if (block->contig_hint_start && 623382b88e9SDennis Zhou (!start || 624382b88e9SDennis Zhou __ffs(start) > __ffs(block->contig_hint_start))) { 625382b88e9SDennis Zhou /* start has a better alignment so use it */ 626268625a6SDennis Zhou (Facebook) block->contig_hint_start = start; 627382b88e9SDennis Zhou if (start < block->scan_hint_start && 628382b88e9SDennis Zhou block->contig_hint > block->scan_hint) 629382b88e9SDennis Zhou block->scan_hint = 0; 630382b88e9SDennis Zhou } else if (start > block->scan_hint_start || 631382b88e9SDennis Zhou block->contig_hint > block->scan_hint) { 632382b88e9SDennis Zhou /* 633382b88e9SDennis Zhou * Knowing contig == contig_hint, update the scan_hint 634382b88e9SDennis Zhou * if it is farther than or larger than the current 635382b88e9SDennis Zhou * scan_hint. 636382b88e9SDennis Zhou */ 637382b88e9SDennis Zhou block->scan_hint_start = start; 638382b88e9SDennis Zhou block->scan_hint = contig; 639382b88e9SDennis Zhou } 640382b88e9SDennis Zhou } else { 641382b88e9SDennis Zhou /* 642382b88e9SDennis Zhou * The region is smaller than the contig_hint. So only update 643382b88e9SDennis Zhou * the scan_hint if it is larger than or equal and farther than 644382b88e9SDennis Zhou * the current scan_hint. 645382b88e9SDennis Zhou */ 646382b88e9SDennis Zhou if ((start < block->contig_hint_start && 647382b88e9SDennis Zhou (contig > block->scan_hint || 648382b88e9SDennis Zhou (contig == block->scan_hint && 649382b88e9SDennis Zhou start > block->scan_hint_start)))) { 650382b88e9SDennis Zhou block->scan_hint_start = start; 651382b88e9SDennis Zhou block->scan_hint = contig; 652382b88e9SDennis Zhou } 653ca460b3cSDennis Zhou (Facebook) } 654ca460b3cSDennis Zhou (Facebook) } 655ca460b3cSDennis Zhou (Facebook) 656b89462a9SDennis Zhou /* 657b89462a9SDennis Zhou * pcpu_block_update_scan - update a block given a free area from a scan 658b89462a9SDennis Zhou * @chunk: chunk of interest 659b89462a9SDennis Zhou * @bit_off: chunk offset 660b89462a9SDennis Zhou * @bits: size of free area 661b89462a9SDennis Zhou * 662b89462a9SDennis Zhou * Finding the final allocation spot first goes through pcpu_find_block_fit() 663b89462a9SDennis Zhou * to find a block that can hold the allocation and then pcpu_alloc_area() 664b89462a9SDennis Zhou * where a scan is used. When allocations require specific alignments, 665b89462a9SDennis Zhou * we can inadvertently create holes which will not be seen in the alloc 666b89462a9SDennis Zhou * or free paths. 667b89462a9SDennis Zhou * 668b89462a9SDennis Zhou * This takes a given free area hole and updates a block as it may change the 669b89462a9SDennis Zhou * scan_hint. We need to scan backwards to ensure we don't miss free bits 670b89462a9SDennis Zhou * from alignment. 671b89462a9SDennis Zhou */ 672b89462a9SDennis Zhou static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, 673b89462a9SDennis Zhou int bits) 674b89462a9SDennis Zhou { 675b89462a9SDennis Zhou int s_off = pcpu_off_to_block_off(bit_off); 676b89462a9SDennis Zhou int e_off = s_off + bits; 677b89462a9SDennis Zhou int s_index, l_bit; 678b89462a9SDennis Zhou struct pcpu_block_md *block; 679b89462a9SDennis Zhou 680b89462a9SDennis Zhou if (e_off > PCPU_BITMAP_BLOCK_BITS) 681b89462a9SDennis Zhou return; 682b89462a9SDennis Zhou 683b89462a9SDennis Zhou s_index = pcpu_off_to_block_index(bit_off); 684b89462a9SDennis Zhou block = chunk->md_blocks + s_index; 685b89462a9SDennis Zhou 686b89462a9SDennis Zhou /* scan backwards in case of alignment skipping free bits */ 687b89462a9SDennis Zhou l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); 688b89462a9SDennis Zhou s_off = (s_off == l_bit) ? 0 : l_bit + 1; 689b89462a9SDennis Zhou 690b89462a9SDennis Zhou pcpu_block_update(block, s_off, e_off); 691b89462a9SDennis Zhou } 692b89462a9SDennis Zhou 693ca460b3cSDennis Zhou (Facebook) /** 69492c14cabSDennis Zhou * pcpu_chunk_refresh_hint - updates metadata about a chunk 69592c14cabSDennis Zhou * @chunk: chunk of interest 696d33d9f3dSDennis Zhou * @full_scan: if we should scan from the beginning 69792c14cabSDennis Zhou * 69892c14cabSDennis Zhou * Iterates over the metadata blocks to find the largest contig area. 699d33d9f3dSDennis Zhou * A full scan can be avoided on the allocation path as this is triggered 700d33d9f3dSDennis Zhou * if we broke the contig_hint. In doing so, the scan_hint will be before 701d33d9f3dSDennis Zhou * the contig_hint or after if the scan_hint == contig_hint. This cannot 702d33d9f3dSDennis Zhou * be prevented on freeing as we want to find the largest area possibly 703d33d9f3dSDennis Zhou * spanning blocks. 70492c14cabSDennis Zhou */ 705d33d9f3dSDennis Zhou static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) 70692c14cabSDennis Zhou { 70792c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 70892c14cabSDennis Zhou int bit_off, bits; 70992c14cabSDennis Zhou 710d33d9f3dSDennis Zhou /* promote scan_hint to contig_hint */ 711d33d9f3dSDennis Zhou if (!full_scan && chunk_md->scan_hint) { 712d33d9f3dSDennis Zhou bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; 713d33d9f3dSDennis Zhou chunk_md->contig_hint_start = chunk_md->scan_hint_start; 714d33d9f3dSDennis Zhou chunk_md->contig_hint = chunk_md->scan_hint; 715d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 716d33d9f3dSDennis Zhou } else { 71792c14cabSDennis Zhou bit_off = chunk_md->first_free; 718d33d9f3dSDennis Zhou chunk_md->contig_hint = 0; 719d33d9f3dSDennis Zhou } 720d33d9f3dSDennis Zhou 72192c14cabSDennis Zhou bits = 0; 722e837dfdeSDennis Zhou pcpu_for_each_md_free_region(chunk, bit_off, bits) 72392c14cabSDennis Zhou pcpu_block_update(chunk_md, bit_off, bit_off + bits); 724ca460b3cSDennis Zhou (Facebook) } 725ca460b3cSDennis Zhou (Facebook) 726ca460b3cSDennis Zhou (Facebook) /** 727ca460b3cSDennis Zhou (Facebook) * pcpu_block_refresh_hint 728ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 729ca460b3cSDennis Zhou (Facebook) * @index: index of the metadata block 730ca460b3cSDennis Zhou (Facebook) * 731ca460b3cSDennis Zhou (Facebook) * Scans over the block beginning at first_free and updates the block 732ca460b3cSDennis Zhou (Facebook) * metadata accordingly. 733ca460b3cSDennis Zhou (Facebook) */ 734ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) 735ca460b3cSDennis Zhou (Facebook) { 736ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *block = chunk->md_blocks + index; 737ca460b3cSDennis Zhou (Facebook) unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 738e837dfdeSDennis Zhou unsigned int rs, re, start; /* region start, region end */ 739ca460b3cSDennis Zhou (Facebook) 740da3afdd5SDennis Zhou /* promote scan_hint to contig_hint */ 741da3afdd5SDennis Zhou if (block->scan_hint) { 742da3afdd5SDennis Zhou start = block->scan_hint_start + block->scan_hint; 743da3afdd5SDennis Zhou block->contig_hint_start = block->scan_hint_start; 744da3afdd5SDennis Zhou block->contig_hint = block->scan_hint; 745da3afdd5SDennis Zhou block->scan_hint = 0; 746da3afdd5SDennis Zhou } else { 747da3afdd5SDennis Zhou start = block->first_free; 748ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 749da3afdd5SDennis Zhou } 750da3afdd5SDennis Zhou 751da3afdd5SDennis Zhou block->right_free = 0; 752ca460b3cSDennis Zhou (Facebook) 753ca460b3cSDennis Zhou (Facebook) /* iterate over free areas and update the contig hints */ 754e837dfdeSDennis Zhou bitmap_for_each_clear_region(alloc_map, rs, re, start, 755e837dfdeSDennis Zhou PCPU_BITMAP_BLOCK_BITS) 756ca460b3cSDennis Zhou (Facebook) pcpu_block_update(block, rs, re); 757ca460b3cSDennis Zhou (Facebook) } 758ca460b3cSDennis Zhou (Facebook) 759ca460b3cSDennis Zhou (Facebook) /** 760ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_alloc - update hint on allocation path 761ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 762ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 763ca460b3cSDennis Zhou (Facebook) * @bits: size of request 764fc304334SDennis Zhou (Facebook) * 765fc304334SDennis Zhou (Facebook) * Updates metadata for the allocation path. The metadata only has to be 766fc304334SDennis Zhou (Facebook) * refreshed by a full scan iff the chunk's contig hint is broken. Block level 767fc304334SDennis Zhou (Facebook) * scans are required if the block's contig hint is broken. 768ca460b3cSDennis Zhou (Facebook) */ 769ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, 770ca460b3cSDennis Zhou (Facebook) int bits) 771ca460b3cSDennis Zhou (Facebook) { 77292c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 773b239f7daSDennis Zhou int nr_empty_pages = 0; 774ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 775ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 776ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 777ca460b3cSDennis Zhou (Facebook) 778ca460b3cSDennis Zhou (Facebook) /* 779ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 780ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 781ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 782ca460b3cSDennis Zhou (Facebook) * range. 783ca460b3cSDennis Zhou (Facebook) */ 784ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 785ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 786ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 787ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 788ca460b3cSDennis Zhou (Facebook) 789ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 790ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 791ca460b3cSDennis Zhou (Facebook) 792ca460b3cSDennis Zhou (Facebook) /* 793ca460b3cSDennis Zhou (Facebook) * Update s_block. 794fc304334SDennis Zhou (Facebook) * block->first_free must be updated if the allocation takes its place. 795fc304334SDennis Zhou (Facebook) * If the allocation breaks the contig_hint, a scan is required to 796fc304334SDennis Zhou (Facebook) * restore this hint. 797ca460b3cSDennis Zhou (Facebook) */ 798b239f7daSDennis Zhou if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 799b239f7daSDennis Zhou nr_empty_pages++; 800b239f7daSDennis Zhou 801fc304334SDennis Zhou (Facebook) if (s_off == s_block->first_free) 802fc304334SDennis Zhou (Facebook) s_block->first_free = find_next_zero_bit( 803fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, s_index), 804fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, 805fc304334SDennis Zhou (Facebook) s_off + bits); 806fc304334SDennis Zhou (Facebook) 807382b88e9SDennis Zhou if (pcpu_region_overlap(s_block->scan_hint_start, 808382b88e9SDennis Zhou s_block->scan_hint_start + s_block->scan_hint, 809382b88e9SDennis Zhou s_off, 810382b88e9SDennis Zhou s_off + bits)) 811382b88e9SDennis Zhou s_block->scan_hint = 0; 812382b88e9SDennis Zhou 813d9f3a01eSDennis Zhou if (pcpu_region_overlap(s_block->contig_hint_start, 814d9f3a01eSDennis Zhou s_block->contig_hint_start + 815d9f3a01eSDennis Zhou s_block->contig_hint, 816d9f3a01eSDennis Zhou s_off, 817d9f3a01eSDennis Zhou s_off + bits)) { 818fc304334SDennis Zhou (Facebook) /* block contig hint is broken - scan to fix it */ 819da3afdd5SDennis Zhou if (!s_off) 820da3afdd5SDennis Zhou s_block->left_free = 0; 821ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, s_index); 822fc304334SDennis Zhou (Facebook) } else { 823fc304334SDennis Zhou (Facebook) /* update left and right contig manually */ 824fc304334SDennis Zhou (Facebook) s_block->left_free = min(s_block->left_free, s_off); 825fc304334SDennis Zhou (Facebook) if (s_index == e_index) 826fc304334SDennis Zhou (Facebook) s_block->right_free = min_t(int, s_block->right_free, 827fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 828fc304334SDennis Zhou (Facebook) else 829fc304334SDennis Zhou (Facebook) s_block->right_free = 0; 830fc304334SDennis Zhou (Facebook) } 831ca460b3cSDennis Zhou (Facebook) 832ca460b3cSDennis Zhou (Facebook) /* 833ca460b3cSDennis Zhou (Facebook) * Update e_block. 834ca460b3cSDennis Zhou (Facebook) */ 835ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 836b239f7daSDennis Zhou if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 837b239f7daSDennis Zhou nr_empty_pages++; 838b239f7daSDennis Zhou 839fc304334SDennis Zhou (Facebook) /* 840fc304334SDennis Zhou (Facebook) * When the allocation is across blocks, the end is along 841fc304334SDennis Zhou (Facebook) * the left part of the e_block. 842fc304334SDennis Zhou (Facebook) */ 843fc304334SDennis Zhou (Facebook) e_block->first_free = find_next_zero_bit( 844fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, e_index), 845fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, e_off); 846fc304334SDennis Zhou (Facebook) 847fc304334SDennis Zhou (Facebook) if (e_off == PCPU_BITMAP_BLOCK_BITS) { 848fc304334SDennis Zhou (Facebook) /* reset the block */ 849fc304334SDennis Zhou (Facebook) e_block++; 850fc304334SDennis Zhou (Facebook) } else { 851382b88e9SDennis Zhou if (e_off > e_block->scan_hint_start) 852382b88e9SDennis Zhou e_block->scan_hint = 0; 853382b88e9SDennis Zhou 854da3afdd5SDennis Zhou e_block->left_free = 0; 855fc304334SDennis Zhou (Facebook) if (e_off > e_block->contig_hint_start) { 856fc304334SDennis Zhou (Facebook) /* contig hint is broken - scan to fix it */ 857ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, e_index); 858fc304334SDennis Zhou (Facebook) } else { 859fc304334SDennis Zhou (Facebook) e_block->right_free = 860fc304334SDennis Zhou (Facebook) min_t(int, e_block->right_free, 861fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 862fc304334SDennis Zhou (Facebook) } 863fc304334SDennis Zhou (Facebook) } 864ca460b3cSDennis Zhou (Facebook) 865ca460b3cSDennis Zhou (Facebook) /* update in-between md_blocks */ 866b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 867ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 868382b88e9SDennis Zhou block->scan_hint = 0; 869ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 870ca460b3cSDennis Zhou (Facebook) block->left_free = 0; 871ca460b3cSDennis Zhou (Facebook) block->right_free = 0; 872ca460b3cSDennis Zhou (Facebook) } 873ca460b3cSDennis Zhou (Facebook) } 874ca460b3cSDennis Zhou (Facebook) 875b239f7daSDennis Zhou if (nr_empty_pages) 876b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr_empty_pages); 877b239f7daSDennis Zhou 878d33d9f3dSDennis Zhou if (pcpu_region_overlap(chunk_md->scan_hint_start, 879d33d9f3dSDennis Zhou chunk_md->scan_hint_start + 880d33d9f3dSDennis Zhou chunk_md->scan_hint, 881d33d9f3dSDennis Zhou bit_off, 882d33d9f3dSDennis Zhou bit_off + bits)) 883d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 884d33d9f3dSDennis Zhou 885fc304334SDennis Zhou (Facebook) /* 886fc304334SDennis Zhou (Facebook) * The only time a full chunk scan is required is if the chunk 887fc304334SDennis Zhou (Facebook) * contig hint is broken. Otherwise, it means a smaller space 888fc304334SDennis Zhou (Facebook) * was used and therefore the chunk contig hint is still correct. 889fc304334SDennis Zhou (Facebook) */ 89092c14cabSDennis Zhou if (pcpu_region_overlap(chunk_md->contig_hint_start, 89192c14cabSDennis Zhou chunk_md->contig_hint_start + 89292c14cabSDennis Zhou chunk_md->contig_hint, 893d9f3a01eSDennis Zhou bit_off, 894d9f3a01eSDennis Zhou bit_off + bits)) 895d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, false); 896ca460b3cSDennis Zhou (Facebook) } 897ca460b3cSDennis Zhou (Facebook) 898ca460b3cSDennis Zhou (Facebook) /** 899ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_free - updates the block hints on the free path 900ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 901ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 902ca460b3cSDennis Zhou (Facebook) * @bits: size of request 903b185cd0dSDennis Zhou (Facebook) * 904b185cd0dSDennis Zhou (Facebook) * Updates metadata for the allocation path. This avoids a blind block 905b185cd0dSDennis Zhou (Facebook) * refresh by making use of the block contig hints. If this fails, it scans 906b185cd0dSDennis Zhou (Facebook) * forward and backward to determine the extent of the free area. This is 907b185cd0dSDennis Zhou (Facebook) * capped at the boundary of blocks. 908b185cd0dSDennis Zhou (Facebook) * 909b185cd0dSDennis Zhou (Facebook) * A chunk update is triggered if a page becomes free, a block becomes free, 910b185cd0dSDennis Zhou (Facebook) * or the free spans across blocks. This tradeoff is to minimize iterating 91192c14cabSDennis Zhou * over the block metadata to update chunk_md->contig_hint. 91292c14cabSDennis Zhou * chunk_md->contig_hint may be off by up to a page, but it will never be more 91392c14cabSDennis Zhou * than the available space. If the contig hint is contained in one block, it 91492c14cabSDennis Zhou * will be accurate. 915ca460b3cSDennis Zhou (Facebook) */ 916ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, 917ca460b3cSDennis Zhou (Facebook) int bits) 918ca460b3cSDennis Zhou (Facebook) { 919b239f7daSDennis Zhou int nr_empty_pages = 0; 920ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 921ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 922ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 923b185cd0dSDennis Zhou (Facebook) int start, end; /* start and end of the whole free area */ 924ca460b3cSDennis Zhou (Facebook) 925ca460b3cSDennis Zhou (Facebook) /* 926ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 927ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 928ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 929ca460b3cSDennis Zhou (Facebook) * range. 930ca460b3cSDennis Zhou (Facebook) */ 931ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 932ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 933ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 934ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 935ca460b3cSDennis Zhou (Facebook) 936ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 937ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 938ca460b3cSDennis Zhou (Facebook) 939b185cd0dSDennis Zhou (Facebook) /* 940b185cd0dSDennis Zhou (Facebook) * Check if the freed area aligns with the block->contig_hint. 941b185cd0dSDennis Zhou (Facebook) * If it does, then the scan to find the beginning/end of the 942b185cd0dSDennis Zhou (Facebook) * larger free area can be avoided. 943b185cd0dSDennis Zhou (Facebook) * 944b185cd0dSDennis Zhou (Facebook) * start and end refer to beginning and end of the free area 945b185cd0dSDennis Zhou (Facebook) * within each their respective blocks. This is not necessarily 946b185cd0dSDennis Zhou (Facebook) * the entire free area as it may span blocks past the beginning 947b185cd0dSDennis Zhou (Facebook) * or end of the block. 948b185cd0dSDennis Zhou (Facebook) */ 949b185cd0dSDennis Zhou (Facebook) start = s_off; 950b185cd0dSDennis Zhou (Facebook) if (s_off == s_block->contig_hint + s_block->contig_hint_start) { 951b185cd0dSDennis Zhou (Facebook) start = s_block->contig_hint_start; 952b185cd0dSDennis Zhou (Facebook) } else { 953b185cd0dSDennis Zhou (Facebook) /* 954b185cd0dSDennis Zhou (Facebook) * Scan backwards to find the extent of the free area. 955b185cd0dSDennis Zhou (Facebook) * find_last_bit returns the starting bit, so if the start bit 956b185cd0dSDennis Zhou (Facebook) * is returned, that means there was no last bit and the 957b185cd0dSDennis Zhou (Facebook) * remainder of the chunk is free. 958b185cd0dSDennis Zhou (Facebook) */ 959b185cd0dSDennis Zhou (Facebook) int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), 960b185cd0dSDennis Zhou (Facebook) start); 961b185cd0dSDennis Zhou (Facebook) start = (start == l_bit) ? 0 : l_bit + 1; 962b185cd0dSDennis Zhou (Facebook) } 963b185cd0dSDennis Zhou (Facebook) 964b185cd0dSDennis Zhou (Facebook) end = e_off; 965b185cd0dSDennis Zhou (Facebook) if (e_off == e_block->contig_hint_start) 966b185cd0dSDennis Zhou (Facebook) end = e_block->contig_hint_start + e_block->contig_hint; 967b185cd0dSDennis Zhou (Facebook) else 968b185cd0dSDennis Zhou (Facebook) end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), 969b185cd0dSDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, end); 970b185cd0dSDennis Zhou (Facebook) 971ca460b3cSDennis Zhou (Facebook) /* update s_block */ 972b185cd0dSDennis Zhou (Facebook) e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; 973b239f7daSDennis Zhou if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) 974b239f7daSDennis Zhou nr_empty_pages++; 975b185cd0dSDennis Zhou (Facebook) pcpu_block_update(s_block, start, e_off); 976ca460b3cSDennis Zhou (Facebook) 977ca460b3cSDennis Zhou (Facebook) /* freeing in the same block */ 978ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 979ca460b3cSDennis Zhou (Facebook) /* update e_block */ 980b239f7daSDennis Zhou if (end == PCPU_BITMAP_BLOCK_BITS) 981b239f7daSDennis Zhou nr_empty_pages++; 982b185cd0dSDennis Zhou (Facebook) pcpu_block_update(e_block, 0, end); 983ca460b3cSDennis Zhou (Facebook) 984ca460b3cSDennis Zhou (Facebook) /* reset md_blocks in the middle */ 985b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 986ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 987ca460b3cSDennis Zhou (Facebook) block->first_free = 0; 988382b88e9SDennis Zhou block->scan_hint = 0; 989ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = 0; 990ca460b3cSDennis Zhou (Facebook) block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 991ca460b3cSDennis Zhou (Facebook) block->left_free = PCPU_BITMAP_BLOCK_BITS; 992ca460b3cSDennis Zhou (Facebook) block->right_free = PCPU_BITMAP_BLOCK_BITS; 993ca460b3cSDennis Zhou (Facebook) } 994ca460b3cSDennis Zhou (Facebook) } 995ca460b3cSDennis Zhou (Facebook) 996b239f7daSDennis Zhou if (nr_empty_pages) 997b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr_empty_pages); 998b239f7daSDennis Zhou 999b185cd0dSDennis Zhou (Facebook) /* 1000b239f7daSDennis Zhou * Refresh chunk metadata when the free makes a block free or spans 1001b239f7daSDennis Zhou * across blocks. The contig_hint may be off by up to a page, but if 1002b239f7daSDennis Zhou * the contig_hint is contained in a block, it will be accurate with 1003b239f7daSDennis Zhou * the else condition below. 1004b185cd0dSDennis Zhou (Facebook) */ 1005b239f7daSDennis Zhou if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) 1006d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, true); 1007b185cd0dSDennis Zhou (Facebook) else 100892c14cabSDennis Zhou pcpu_block_update(&chunk->chunk_md, 100992c14cabSDennis Zhou pcpu_block_off_to_off(s_index, start), 101092c14cabSDennis Zhou end); 1011ca460b3cSDennis Zhou (Facebook) } 1012ca460b3cSDennis Zhou (Facebook) 1013ca460b3cSDennis Zhou (Facebook) /** 101440064aecSDennis Zhou (Facebook) * pcpu_is_populated - determines if the region is populated 101540064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 101640064aecSDennis Zhou (Facebook) * @bit_off: chunk offset 101740064aecSDennis Zhou (Facebook) * @bits: size of area 101840064aecSDennis Zhou (Facebook) * @next_off: return value for the next offset to start searching 101940064aecSDennis Zhou (Facebook) * 102040064aecSDennis Zhou (Facebook) * For atomic allocations, check if the backing pages are populated. 102140064aecSDennis Zhou (Facebook) * 102240064aecSDennis Zhou (Facebook) * RETURNS: 102340064aecSDennis Zhou (Facebook) * Bool if the backing pages are populated. 102440064aecSDennis Zhou (Facebook) * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. 102540064aecSDennis Zhou (Facebook) */ 102640064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 102740064aecSDennis Zhou (Facebook) int *next_off) 102840064aecSDennis Zhou (Facebook) { 1029e837dfdeSDennis Zhou unsigned int page_start, page_end, rs, re; 103040064aecSDennis Zhou (Facebook) 103140064aecSDennis Zhou (Facebook) page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 103240064aecSDennis Zhou (Facebook) page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 103340064aecSDennis Zhou (Facebook) 103440064aecSDennis Zhou (Facebook) rs = page_start; 1035e837dfdeSDennis Zhou bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); 103640064aecSDennis Zhou (Facebook) if (rs >= page_end) 103740064aecSDennis Zhou (Facebook) return true; 103840064aecSDennis Zhou (Facebook) 103940064aecSDennis Zhou (Facebook) *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 104040064aecSDennis Zhou (Facebook) return false; 104140064aecSDennis Zhou (Facebook) } 104240064aecSDennis Zhou (Facebook) 104340064aecSDennis Zhou (Facebook) /** 104440064aecSDennis Zhou (Facebook) * pcpu_find_block_fit - finds the block index to start searching 104540064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 104640064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 104740064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE bytes) 104840064aecSDennis Zhou (Facebook) * @pop_only: use populated regions only 104940064aecSDennis Zhou (Facebook) * 1050b4c2116cSDennis Zhou (Facebook) * Given a chunk and an allocation spec, find the offset to begin searching 1051b4c2116cSDennis Zhou (Facebook) * for a free region. This iterates over the bitmap metadata blocks to 1052b4c2116cSDennis Zhou (Facebook) * find an offset that will be guaranteed to fit the requirements. It is 1053b4c2116cSDennis Zhou (Facebook) * not quite first fit as if the allocation does not fit in the contig hint 1054b4c2116cSDennis Zhou (Facebook) * of a block or chunk, it is skipped. This errs on the side of caution 1055b4c2116cSDennis Zhou (Facebook) * to prevent excess iteration. Poor alignment can cause the allocator to 1056b4c2116cSDennis Zhou (Facebook) * skip over blocks and chunks that have valid free areas. 1057b4c2116cSDennis Zhou (Facebook) * 105840064aecSDennis Zhou (Facebook) * RETURNS: 105940064aecSDennis Zhou (Facebook) * The offset in the bitmap to begin searching. 106040064aecSDennis Zhou (Facebook) * -1 if no offset is found. 106140064aecSDennis Zhou (Facebook) */ 106240064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, 106340064aecSDennis Zhou (Facebook) size_t align, bool pop_only) 106440064aecSDennis Zhou (Facebook) { 106592c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1066b4c2116cSDennis Zhou (Facebook) int bit_off, bits, next_off; 106740064aecSDennis Zhou (Facebook) 106813f96637SDennis Zhou (Facebook) /* 106913f96637SDennis Zhou (Facebook) * Check to see if the allocation can fit in the chunk's contig hint. 107013f96637SDennis Zhou (Facebook) * This is an optimization to prevent scanning by assuming if it 107113f96637SDennis Zhou (Facebook) * cannot fit in the global hint, there is memory pressure and creating 107213f96637SDennis Zhou (Facebook) * a new chunk would happen soon. 107313f96637SDennis Zhou (Facebook) */ 107492c14cabSDennis Zhou bit_off = ALIGN(chunk_md->contig_hint_start, align) - 107592c14cabSDennis Zhou chunk_md->contig_hint_start; 107692c14cabSDennis Zhou if (bit_off + alloc_bits > chunk_md->contig_hint) 107713f96637SDennis Zhou (Facebook) return -1; 107813f96637SDennis Zhou (Facebook) 1079d33d9f3dSDennis Zhou bit_off = pcpu_next_hint(chunk_md, alloc_bits); 1080b4c2116cSDennis Zhou (Facebook) bits = 0; 1081b4c2116cSDennis Zhou (Facebook) pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { 108240064aecSDennis Zhou (Facebook) if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, 1083b4c2116cSDennis Zhou (Facebook) &next_off)) 108440064aecSDennis Zhou (Facebook) break; 108540064aecSDennis Zhou (Facebook) 1086b4c2116cSDennis Zhou (Facebook) bit_off = next_off; 108740064aecSDennis Zhou (Facebook) bits = 0; 108840064aecSDennis Zhou (Facebook) } 108940064aecSDennis Zhou (Facebook) 109040064aecSDennis Zhou (Facebook) if (bit_off == pcpu_chunk_map_bits(chunk)) 109140064aecSDennis Zhou (Facebook) return -1; 109240064aecSDennis Zhou (Facebook) 109340064aecSDennis Zhou (Facebook) return bit_off; 109440064aecSDennis Zhou (Facebook) } 109540064aecSDennis Zhou (Facebook) 1096b89462a9SDennis Zhou /* 1097b89462a9SDennis Zhou * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() 1098b89462a9SDennis Zhou * @map: the address to base the search on 1099b89462a9SDennis Zhou * @size: the bitmap size in bits 1100b89462a9SDennis Zhou * @start: the bitnumber to start searching at 1101b89462a9SDennis Zhou * @nr: the number of zeroed bits we're looking for 1102b89462a9SDennis Zhou * @align_mask: alignment mask for zero area 1103b89462a9SDennis Zhou * @largest_off: offset of the largest area skipped 1104b89462a9SDennis Zhou * @largest_bits: size of the largest area skipped 1105b89462a9SDennis Zhou * 1106b89462a9SDennis Zhou * The @align_mask should be one less than a power of 2. 1107b89462a9SDennis Zhou * 1108b89462a9SDennis Zhou * This is a modified version of bitmap_find_next_zero_area_off() to remember 1109b89462a9SDennis Zhou * the largest area that was skipped. This is imperfect, but in general is 1110b89462a9SDennis Zhou * good enough. The largest remembered region is the largest failed region 1111b89462a9SDennis Zhou * seen. This does not include anything we possibly skipped due to alignment. 1112b89462a9SDennis Zhou * pcpu_block_update_scan() does scan backwards to try and recover what was 1113b89462a9SDennis Zhou * lost to alignment. While this can cause scanning to miss earlier possible 1114b89462a9SDennis Zhou * free areas, smaller allocations will eventually fill those holes. 1115b89462a9SDennis Zhou */ 1116b89462a9SDennis Zhou static unsigned long pcpu_find_zero_area(unsigned long *map, 1117b89462a9SDennis Zhou unsigned long size, 1118b89462a9SDennis Zhou unsigned long start, 1119b89462a9SDennis Zhou unsigned long nr, 1120b89462a9SDennis Zhou unsigned long align_mask, 1121b89462a9SDennis Zhou unsigned long *largest_off, 1122b89462a9SDennis Zhou unsigned long *largest_bits) 1123b89462a9SDennis Zhou { 1124b89462a9SDennis Zhou unsigned long index, end, i, area_off, area_bits; 1125b89462a9SDennis Zhou again: 1126b89462a9SDennis Zhou index = find_next_zero_bit(map, size, start); 1127b89462a9SDennis Zhou 1128b89462a9SDennis Zhou /* Align allocation */ 1129b89462a9SDennis Zhou index = __ALIGN_MASK(index, align_mask); 1130b89462a9SDennis Zhou area_off = index; 1131b89462a9SDennis Zhou 1132b89462a9SDennis Zhou end = index + nr; 1133b89462a9SDennis Zhou if (end > size) 1134b89462a9SDennis Zhou return end; 1135b89462a9SDennis Zhou i = find_next_bit(map, end, index); 1136b89462a9SDennis Zhou if (i < end) { 1137b89462a9SDennis Zhou area_bits = i - area_off; 1138b89462a9SDennis Zhou /* remember largest unused area with best alignment */ 1139b89462a9SDennis Zhou if (area_bits > *largest_bits || 1140b89462a9SDennis Zhou (area_bits == *largest_bits && *largest_off && 1141b89462a9SDennis Zhou (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { 1142b89462a9SDennis Zhou *largest_off = area_off; 1143b89462a9SDennis Zhou *largest_bits = area_bits; 1144b89462a9SDennis Zhou } 1145b89462a9SDennis Zhou 1146b89462a9SDennis Zhou start = i + 1; 1147b89462a9SDennis Zhou goto again; 1148b89462a9SDennis Zhou } 1149b89462a9SDennis Zhou return index; 1150b89462a9SDennis Zhou } 1151b89462a9SDennis Zhou 115240064aecSDennis Zhou (Facebook) /** 115340064aecSDennis Zhou (Facebook) * pcpu_alloc_area - allocates an area from a pcpu_chunk 115440064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 115540064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 115640064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 115740064aecSDennis Zhou (Facebook) * @start: bit_off to start searching 115840064aecSDennis Zhou (Facebook) * 115940064aecSDennis Zhou (Facebook) * This function takes in a @start offset to begin searching to fit an 1160b4c2116cSDennis Zhou (Facebook) * allocation of @alloc_bits with alignment @align. It needs to scan 1161b4c2116cSDennis Zhou (Facebook) * the allocation map because if it fits within the block's contig hint, 1162b4c2116cSDennis Zhou (Facebook) * @start will be block->first_free. This is an attempt to fill the 1163b4c2116cSDennis Zhou (Facebook) * allocation prior to breaking the contig hint. The allocation and 1164b4c2116cSDennis Zhou (Facebook) * boundary maps are updated accordingly if it confirms a valid 1165b4c2116cSDennis Zhou (Facebook) * free area. 116640064aecSDennis Zhou (Facebook) * 116740064aecSDennis Zhou (Facebook) * RETURNS: 116840064aecSDennis Zhou (Facebook) * Allocated addr offset in @chunk on success. 116940064aecSDennis Zhou (Facebook) * -1 if no matching area is found. 117040064aecSDennis Zhou (Facebook) */ 117140064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, 117240064aecSDennis Zhou (Facebook) size_t align, int start) 117340064aecSDennis Zhou (Facebook) { 117492c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 117540064aecSDennis Zhou (Facebook) size_t align_mask = (align) ? (align - 1) : 0; 1176b89462a9SDennis Zhou unsigned long area_off = 0, area_bits = 0; 117740064aecSDennis Zhou (Facebook) int bit_off, end, oslot; 11789f7dcf22STejun Heo 11794f996e23STejun Heo lockdep_assert_held(&pcpu_lock); 11804f996e23STejun Heo 118140064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1182833af842STejun Heo 1183833af842STejun Heo /* 118440064aecSDennis Zhou (Facebook) * Search to find a fit. 1185833af842STejun Heo */ 11868c43004aSDennis Zhou end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, 11878c43004aSDennis Zhou pcpu_chunk_map_bits(chunk)); 1188b89462a9SDennis Zhou bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, 1189b89462a9SDennis Zhou align_mask, &area_off, &area_bits); 119040064aecSDennis Zhou (Facebook) if (bit_off >= end) 1191a16037c8STejun Heo return -1; 1192a16037c8STejun Heo 1193b89462a9SDennis Zhou if (area_bits) 1194b89462a9SDennis Zhou pcpu_block_update_scan(chunk, area_off, area_bits); 1195b89462a9SDennis Zhou 119640064aecSDennis Zhou (Facebook) /* update alloc map */ 119740064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, bit_off, alloc_bits); 1198a16037c8STejun Heo 119940064aecSDennis Zhou (Facebook) /* update boundary map */ 120040064aecSDennis Zhou (Facebook) set_bit(bit_off, chunk->bound_map); 120140064aecSDennis Zhou (Facebook) bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); 120240064aecSDennis Zhou (Facebook) set_bit(bit_off + alloc_bits, chunk->bound_map); 1203a16037c8STejun Heo 120440064aecSDennis Zhou (Facebook) chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; 120540064aecSDennis Zhou (Facebook) 120686b442fbSDennis Zhou (Facebook) /* update first free bit */ 120792c14cabSDennis Zhou if (bit_off == chunk_md->first_free) 120892c14cabSDennis Zhou chunk_md->first_free = find_next_zero_bit( 120986b442fbSDennis Zhou (Facebook) chunk->alloc_map, 121086b442fbSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk), 121186b442fbSDennis Zhou (Facebook) bit_off + alloc_bits); 121286b442fbSDennis Zhou (Facebook) 1213ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); 121440064aecSDennis Zhou (Facebook) 121540064aecSDennis Zhou (Facebook) pcpu_chunk_relocate(chunk, oslot); 121640064aecSDennis Zhou (Facebook) 121740064aecSDennis Zhou (Facebook) return bit_off * PCPU_MIN_ALLOC_SIZE; 1218a16037c8STejun Heo } 1219a16037c8STejun Heo 1220a16037c8STejun Heo /** 122140064aecSDennis Zhou (Facebook) * pcpu_free_area - frees the corresponding offset 1222fbf59bc9STejun Heo * @chunk: chunk of interest 122340064aecSDennis Zhou (Facebook) * @off: addr offset into chunk 1224fbf59bc9STejun Heo * 122540064aecSDennis Zhou (Facebook) * This function determines the size of an allocation to free using 122640064aecSDennis Zhou (Facebook) * the boundary bitmap and clears the allocation map. 12275b32af91SRoman Gushchin * 12285b32af91SRoman Gushchin * RETURNS: 12295b32af91SRoman Gushchin * Number of freed bytes. 1230fbf59bc9STejun Heo */ 12315b32af91SRoman Gushchin static int pcpu_free_area(struct pcpu_chunk *chunk, int off) 1232fbf59bc9STejun Heo { 123392c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 12345b32af91SRoman Gushchin int bit_off, bits, end, oslot, freed; 1235fbf59bc9STejun Heo 12365ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock); 123730a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk); 12385ccd30e4SDennis Zhou 123940064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1240723ad1d9SAl Viro 124140064aecSDennis Zhou (Facebook) bit_off = off / PCPU_MIN_ALLOC_SIZE; 1242fbf59bc9STejun Heo 124340064aecSDennis Zhou (Facebook) /* find end index */ 124440064aecSDennis Zhou (Facebook) end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), 124540064aecSDennis Zhou (Facebook) bit_off + 1); 124640064aecSDennis Zhou (Facebook) bits = end - bit_off; 124740064aecSDennis Zhou (Facebook) bitmap_clear(chunk->alloc_map, bit_off, bits); 12483d331ad7SAl Viro 12495b32af91SRoman Gushchin freed = bits * PCPU_MIN_ALLOC_SIZE; 12505b32af91SRoman Gushchin 125140064aecSDennis Zhou (Facebook) /* update metadata */ 12525b32af91SRoman Gushchin chunk->free_bytes += freed; 1253fbf59bc9STejun Heo 125486b442fbSDennis Zhou (Facebook) /* update first free bit */ 125592c14cabSDennis Zhou chunk_md->first_free = min(chunk_md->first_free, bit_off); 125686b442fbSDennis Zhou (Facebook) 1257ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_free(chunk, bit_off, bits); 1258b539b87fSTejun Heo 1259fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 12605b32af91SRoman Gushchin 12615b32af91SRoman Gushchin return freed; 1262fbf59bc9STejun Heo } 1263fbf59bc9STejun Heo 1264047924c9SDennis Zhou static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) 1265047924c9SDennis Zhou { 1266047924c9SDennis Zhou block->scan_hint = 0; 1267047924c9SDennis Zhou block->contig_hint = nr_bits; 1268047924c9SDennis Zhou block->left_free = nr_bits; 1269047924c9SDennis Zhou block->right_free = nr_bits; 1270047924c9SDennis Zhou block->first_free = 0; 1271047924c9SDennis Zhou block->nr_bits = nr_bits; 1272047924c9SDennis Zhou } 1273047924c9SDennis Zhou 1274ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) 1275ca460b3cSDennis Zhou (Facebook) { 1276ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *md_block; 1277ca460b3cSDennis Zhou (Facebook) 127892c14cabSDennis Zhou /* init the chunk's block */ 127992c14cabSDennis Zhou pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); 128092c14cabSDennis Zhou 1281ca460b3cSDennis Zhou (Facebook) for (md_block = chunk->md_blocks; 1282ca460b3cSDennis Zhou (Facebook) md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); 1283047924c9SDennis Zhou md_block++) 1284047924c9SDennis Zhou pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); 1285ca460b3cSDennis Zhou (Facebook) } 1286ca460b3cSDennis Zhou (Facebook) 128740064aecSDennis Zhou (Facebook) /** 128840064aecSDennis Zhou (Facebook) * pcpu_alloc_first_chunk - creates chunks that serve the first chunk 128940064aecSDennis Zhou (Facebook) * @tmp_addr: the start of the region served 129040064aecSDennis Zhou (Facebook) * @map_size: size of the region served 129140064aecSDennis Zhou (Facebook) * 129240064aecSDennis Zhou (Facebook) * This is responsible for creating the chunks that serve the first chunk. The 129340064aecSDennis Zhou (Facebook) * base_addr is page aligned down of @tmp_addr while the region end is page 129440064aecSDennis Zhou (Facebook) * aligned up. Offsets are kept track of to determine the region served. All 129540064aecSDennis Zhou (Facebook) * this is done to appease the bitmap allocator in avoiding partial blocks. 129640064aecSDennis Zhou (Facebook) * 129740064aecSDennis Zhou (Facebook) * RETURNS: 129840064aecSDennis Zhou (Facebook) * Chunk serving the region at @tmp_addr of @map_size. 129940064aecSDennis Zhou (Facebook) */ 1300c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 130140064aecSDennis Zhou (Facebook) int map_size) 130210edf5b0SDennis Zhou (Facebook) { 130310edf5b0SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 1304ca460b3cSDennis Zhou (Facebook) unsigned long aligned_addr, lcm_align; 130540064aecSDennis Zhou (Facebook) int start_offset, offset_bits, region_size, region_bits; 1306f655f405SMike Rapoport size_t alloc_size; 1307c0ebfdc3SDennis Zhou (Facebook) 1308c0ebfdc3SDennis Zhou (Facebook) /* region calculations */ 1309c0ebfdc3SDennis Zhou (Facebook) aligned_addr = tmp_addr & PAGE_MASK; 1310c0ebfdc3SDennis Zhou (Facebook) 1311c0ebfdc3SDennis Zhou (Facebook) start_offset = tmp_addr - aligned_addr; 13126b9d7c8eSDennis Zhou (Facebook) 1313ca460b3cSDennis Zhou (Facebook) /* 1314ca460b3cSDennis Zhou (Facebook) * Align the end of the region with the LCM of PAGE_SIZE and 1315ca460b3cSDennis Zhou (Facebook) * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of 1316ca460b3cSDennis Zhou (Facebook) * the other. 1317ca460b3cSDennis Zhou (Facebook) */ 1318ca460b3cSDennis Zhou (Facebook) lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); 1319ca460b3cSDennis Zhou (Facebook) region_size = ALIGN(start_offset + map_size, lcm_align); 132010edf5b0SDennis Zhou (Facebook) 1321c0ebfdc3SDennis Zhou (Facebook) /* allocate chunk */ 132261cf93d3SDennis Zhou alloc_size = struct_size(chunk, populated, 132361cf93d3SDennis Zhou BITS_TO_LONGS(region_size >> PAGE_SHIFT)); 1324f655f405SMike Rapoport chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1325f655f405SMike Rapoport if (!chunk) 1326f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1327f655f405SMike Rapoport alloc_size); 1328c0ebfdc3SDennis Zhou (Facebook) 132910edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->list); 1330c0ebfdc3SDennis Zhou (Facebook) 1331c0ebfdc3SDennis Zhou (Facebook) chunk->base_addr = (void *)aligned_addr; 133210edf5b0SDennis Zhou (Facebook) chunk->start_offset = start_offset; 13336b9d7c8eSDennis Zhou (Facebook) chunk->end_offset = region_size - chunk->start_offset - map_size; 1334c0ebfdc3SDennis Zhou (Facebook) 13358ab16c43SDennis Zhou (Facebook) chunk->nr_pages = region_size >> PAGE_SHIFT; 133640064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 1337c0ebfdc3SDennis Zhou (Facebook) 1338f655f405SMike Rapoport alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); 1339f655f405SMike Rapoport chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1340f655f405SMike Rapoport if (!chunk->alloc_map) 1341f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1342f655f405SMike Rapoport alloc_size); 1343f655f405SMike Rapoport 1344f655f405SMike Rapoport alloc_size = 1345f655f405SMike Rapoport BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); 1346f655f405SMike Rapoport chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1347f655f405SMike Rapoport if (!chunk->bound_map) 1348f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1349f655f405SMike Rapoport alloc_size); 1350f655f405SMike Rapoport 1351f655f405SMike Rapoport alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); 1352f655f405SMike Rapoport chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1353f655f405SMike Rapoport if (!chunk->md_blocks) 1354f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1355f655f405SMike Rapoport alloc_size); 1356f655f405SMike Rapoport 13573c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 13583c7be18aSRoman Gushchin /* first chunk isn't memcg-aware */ 13593c7be18aSRoman Gushchin chunk->obj_cgroups = NULL; 13603c7be18aSRoman Gushchin #endif 1361ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 136210edf5b0SDennis Zhou (Facebook) 136310edf5b0SDennis Zhou (Facebook) /* manage populated page bitmap */ 136410edf5b0SDennis Zhou (Facebook) chunk->immutable = true; 13658ab16c43SDennis Zhou (Facebook) bitmap_fill(chunk->populated, chunk->nr_pages); 13668ab16c43SDennis Zhou (Facebook) chunk->nr_populated = chunk->nr_pages; 1367b239f7daSDennis Zhou chunk->nr_empty_pop_pages = chunk->nr_pages; 136810edf5b0SDennis Zhou (Facebook) 136940064aecSDennis Zhou (Facebook) chunk->free_bytes = map_size; 1370c0ebfdc3SDennis Zhou (Facebook) 1371c0ebfdc3SDennis Zhou (Facebook) if (chunk->start_offset) { 1372c0ebfdc3SDennis Zhou (Facebook) /* hide the beginning of the bitmap */ 137340064aecSDennis Zhou (Facebook) offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; 137440064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 0, offset_bits); 137540064aecSDennis Zhou (Facebook) set_bit(0, chunk->bound_map); 137640064aecSDennis Zhou (Facebook) set_bit(offset_bits, chunk->bound_map); 1377ca460b3cSDennis Zhou (Facebook) 137892c14cabSDennis Zhou chunk->chunk_md.first_free = offset_bits; 137986b442fbSDennis Zhou (Facebook) 1380ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, 0, offset_bits); 1381c0ebfdc3SDennis Zhou (Facebook) } 1382c0ebfdc3SDennis Zhou (Facebook) 13836b9d7c8eSDennis Zhou (Facebook) if (chunk->end_offset) { 13846b9d7c8eSDennis Zhou (Facebook) /* hide the end of the bitmap */ 138540064aecSDennis Zhou (Facebook) offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; 138640064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 138740064aecSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk) - offset_bits, 138840064aecSDennis Zhou (Facebook) offset_bits); 138940064aecSDennis Zhou (Facebook) set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, 139040064aecSDennis Zhou (Facebook) chunk->bound_map); 139140064aecSDennis Zhou (Facebook) set_bit(region_bits, chunk->bound_map); 13926b9d7c8eSDennis Zhou (Facebook) 1393ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) 1394ca460b3cSDennis Zhou (Facebook) - offset_bits, offset_bits); 1395ca460b3cSDennis Zhou (Facebook) } 139640064aecSDennis Zhou (Facebook) 139710edf5b0SDennis Zhou (Facebook) return chunk; 139810edf5b0SDennis Zhou (Facebook) } 139910edf5b0SDennis Zhou (Facebook) 14003c7be18aSRoman Gushchin static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp) 14016081089fSTejun Heo { 14026081089fSTejun Heo struct pcpu_chunk *chunk; 140340064aecSDennis Zhou (Facebook) int region_bits; 14046081089fSTejun Heo 140547504ee0SDennis Zhou chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); 14066081089fSTejun Heo if (!chunk) 14076081089fSTejun Heo return NULL; 14086081089fSTejun Heo 14096081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 1410c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages; 141140064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 141240064aecSDennis Zhou (Facebook) 141340064aecSDennis Zhou (Facebook) chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 141447504ee0SDennis Zhou sizeof(chunk->alloc_map[0]), gfp); 141540064aecSDennis Zhou (Facebook) if (!chunk->alloc_map) 141640064aecSDennis Zhou (Facebook) goto alloc_map_fail; 141740064aecSDennis Zhou (Facebook) 141840064aecSDennis Zhou (Facebook) chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 141947504ee0SDennis Zhou sizeof(chunk->bound_map[0]), gfp); 142040064aecSDennis Zhou (Facebook) if (!chunk->bound_map) 142140064aecSDennis Zhou (Facebook) goto bound_map_fail; 142240064aecSDennis Zhou (Facebook) 1423ca460b3cSDennis Zhou (Facebook) chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 142447504ee0SDennis Zhou sizeof(chunk->md_blocks[0]), gfp); 1425ca460b3cSDennis Zhou (Facebook) if (!chunk->md_blocks) 1426ca460b3cSDennis Zhou (Facebook) goto md_blocks_fail; 1427ca460b3cSDennis Zhou (Facebook) 14283c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 14293c7be18aSRoman Gushchin if (pcpu_is_memcg_chunk(type)) { 14303c7be18aSRoman Gushchin chunk->obj_cgroups = 14313c7be18aSRoman Gushchin pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * 14323c7be18aSRoman Gushchin sizeof(struct obj_cgroup *), gfp); 14333c7be18aSRoman Gushchin if (!chunk->obj_cgroups) 14343c7be18aSRoman Gushchin goto objcg_fail; 14353c7be18aSRoman Gushchin } 14363c7be18aSRoman Gushchin #endif 14373c7be18aSRoman Gushchin 1438ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 1439ca460b3cSDennis Zhou (Facebook) 144040064aecSDennis Zhou (Facebook) /* init metadata */ 144140064aecSDennis Zhou (Facebook) chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; 1442c0ebfdc3SDennis Zhou (Facebook) 14436081089fSTejun Heo return chunk; 144440064aecSDennis Zhou (Facebook) 14453c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 14463c7be18aSRoman Gushchin objcg_fail: 14473c7be18aSRoman Gushchin pcpu_mem_free(chunk->md_blocks); 14483c7be18aSRoman Gushchin #endif 1449ca460b3cSDennis Zhou (Facebook) md_blocks_fail: 1450ca460b3cSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 145140064aecSDennis Zhou (Facebook) bound_map_fail: 145240064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 145340064aecSDennis Zhou (Facebook) alloc_map_fail: 145440064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk); 145540064aecSDennis Zhou (Facebook) 145640064aecSDennis Zhou (Facebook) return NULL; 14576081089fSTejun Heo } 14586081089fSTejun Heo 14596081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 14606081089fSTejun Heo { 14616081089fSTejun Heo if (!chunk) 14626081089fSTejun Heo return; 14633c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 14643c7be18aSRoman Gushchin pcpu_mem_free(chunk->obj_cgroups); 14653c7be18aSRoman Gushchin #endif 14666685b357SMike Rapoport pcpu_mem_free(chunk->md_blocks); 146740064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 146840064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 14691d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 14706081089fSTejun Heo } 14716081089fSTejun Heo 1472b539b87fSTejun Heo /** 1473b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 1474b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 1475b539b87fSTejun Heo * @page_start: the start page 1476b539b87fSTejun Heo * @page_end: the end page 1477b539b87fSTejun Heo * 1478b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1479b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 1480b539b87fSTejun Heo * successful population. 148140064aecSDennis Zhou (Facebook) * 148240064aecSDennis Zhou (Facebook) * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it 148340064aecSDennis Zhou (Facebook) * is to serve an allocation in that area. 1484b539b87fSTejun Heo */ 148540064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, 1486b239f7daSDennis Zhou int page_end) 1487b539b87fSTejun Heo { 1488b539b87fSTejun Heo int nr = page_end - page_start; 1489b539b87fSTejun Heo 1490b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1491b539b87fSTejun Heo 1492b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 1493b539b87fSTejun Heo chunk->nr_populated += nr; 14947e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += nr; 149540064aecSDennis Zhou (Facebook) 1496b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr); 149740064aecSDennis Zhou (Facebook) } 1498b539b87fSTejun Heo 1499b539b87fSTejun Heo /** 1500b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 1501b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 1502b539b87fSTejun Heo * @page_start: the start page 1503b539b87fSTejun Heo * @page_end: the end page 1504b539b87fSTejun Heo * 1505b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1506b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 1507b539b87fSTejun Heo * each successful depopulation. 1508b539b87fSTejun Heo */ 1509b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 1510b539b87fSTejun Heo int page_start, int page_end) 1511b539b87fSTejun Heo { 1512b539b87fSTejun Heo int nr = page_end - page_start; 1513b539b87fSTejun Heo 1514b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1515b539b87fSTejun Heo 1516b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 1517b539b87fSTejun Heo chunk->nr_populated -= nr; 15187e8a6304SDennis Zhou (Facebook) pcpu_nr_populated -= nr; 1519b239f7daSDennis Zhou 1520b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr); 1521b539b87fSTejun Heo } 1522b539b87fSTejun Heo 1523fbf59bc9STejun Heo /* 15249f645532STejun Heo * Chunk management implementation. 1525fbf59bc9STejun Heo * 15269f645532STejun Heo * To allow different implementations, chunk alloc/free and 15279f645532STejun Heo * [de]population are implemented in a separate file which is pulled 15289f645532STejun Heo * into this file and compiled together. The following functions 15299f645532STejun Heo * should be implemented. 1530ccea34b5STejun Heo * 15319f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 15329f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 15339f645532STejun Heo * pcpu_create_chunk - create a new chunk 15349f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 15359f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 15369f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1537fbf59bc9STejun Heo */ 153815d9f3d1SDennis Zhou static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 153947504ee0SDennis Zhou int page_start, int page_end, gfp_t gfp); 154015d9f3d1SDennis Zhou static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 154115d9f3d1SDennis Zhou int page_start, int page_end); 15423c7be18aSRoman Gushchin static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type, 15433c7be18aSRoman Gushchin gfp_t gfp); 15449f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 15459f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 15469f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1547fbf59bc9STejun Heo 1548b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 1549b0c9778bSTejun Heo #include "percpu-km.c" 1550b0c9778bSTejun Heo #else 15519f645532STejun Heo #include "percpu-vm.c" 1552b0c9778bSTejun Heo #endif 1553fbf59bc9STejun Heo 1554fbf59bc9STejun Heo /** 155588999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 155688999a89STejun Heo * @addr: address for which the chunk needs to be determined. 155788999a89STejun Heo * 1558c0ebfdc3SDennis Zhou (Facebook) * This is an internal function that handles all but static allocations. 1559c0ebfdc3SDennis Zhou (Facebook) * Static percpu address values should never be passed into the allocator. 1560c0ebfdc3SDennis Zhou (Facebook) * 156188999a89STejun Heo * RETURNS: 156288999a89STejun Heo * The address of the found chunk. 156388999a89STejun Heo */ 156488999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 156588999a89STejun Heo { 1566c0ebfdc3SDennis Zhou (Facebook) /* is it in the dynamic region (first chunk)? */ 1567560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) 1568c0ebfdc3SDennis Zhou (Facebook) return pcpu_first_chunk; 1569c0ebfdc3SDennis Zhou (Facebook) 1570c0ebfdc3SDennis Zhou (Facebook) /* is it in the reserved region? */ 1571560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) 157288999a89STejun Heo return pcpu_reserved_chunk; 157388999a89STejun Heo 157488999a89STejun Heo /* 157588999a89STejun Heo * The address is relative to unit0 which might be unused and 157688999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 157788999a89STejun Heo * current processor before looking it up in the vmalloc 157888999a89STejun Heo * space. Note that any possible cpu id can be used here, so 157988999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 158088999a89STejun Heo */ 158188999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 15829f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 158388999a89STejun Heo } 158488999a89STejun Heo 15853c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 15863c7be18aSRoman Gushchin static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, 15873c7be18aSRoman Gushchin struct obj_cgroup **objcgp) 15883c7be18aSRoman Gushchin { 15893c7be18aSRoman Gushchin struct obj_cgroup *objcg; 15903c7be18aSRoman Gushchin 1591279c3393SRoman Gushchin if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT)) 15923c7be18aSRoman Gushchin return PCPU_CHUNK_ROOT; 15933c7be18aSRoman Gushchin 15943c7be18aSRoman Gushchin objcg = get_obj_cgroup_from_current(); 15953c7be18aSRoman Gushchin if (!objcg) 15963c7be18aSRoman Gushchin return PCPU_CHUNK_ROOT; 15973c7be18aSRoman Gushchin 15983c7be18aSRoman Gushchin if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) { 15993c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16003c7be18aSRoman Gushchin return PCPU_FAIL_ALLOC; 16013c7be18aSRoman Gushchin } 16023c7be18aSRoman Gushchin 16033c7be18aSRoman Gushchin *objcgp = objcg; 16043c7be18aSRoman Gushchin return PCPU_CHUNK_MEMCG; 16053c7be18aSRoman Gushchin } 16063c7be18aSRoman Gushchin 16073c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 16083c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off, 16093c7be18aSRoman Gushchin size_t size) 16103c7be18aSRoman Gushchin { 16113c7be18aSRoman Gushchin if (!objcg) 16123c7be18aSRoman Gushchin return; 16133c7be18aSRoman Gushchin 16143c7be18aSRoman Gushchin if (chunk) { 16153c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; 1616772616b0SRoman Gushchin 1617772616b0SRoman Gushchin rcu_read_lock(); 1618772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 1619772616b0SRoman Gushchin size * num_possible_cpus()); 1620772616b0SRoman Gushchin rcu_read_unlock(); 16213c7be18aSRoman Gushchin } else { 16223c7be18aSRoman Gushchin obj_cgroup_uncharge(objcg, size * num_possible_cpus()); 16233c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16243c7be18aSRoman Gushchin } 16253c7be18aSRoman Gushchin } 16263c7be18aSRoman Gushchin 16273c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 16283c7be18aSRoman Gushchin { 16293c7be18aSRoman Gushchin struct obj_cgroup *objcg; 16303c7be18aSRoman Gushchin 16313c7be18aSRoman Gushchin if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk))) 16323c7be18aSRoman Gushchin return; 16333c7be18aSRoman Gushchin 16343c7be18aSRoman Gushchin objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; 16353c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; 16363c7be18aSRoman Gushchin 16373c7be18aSRoman Gushchin obj_cgroup_uncharge(objcg, size * num_possible_cpus()); 16383c7be18aSRoman Gushchin 1639772616b0SRoman Gushchin rcu_read_lock(); 1640772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 1641772616b0SRoman Gushchin -(size * num_possible_cpus())); 1642772616b0SRoman Gushchin rcu_read_unlock(); 1643772616b0SRoman Gushchin 16443c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16453c7be18aSRoman Gushchin } 16463c7be18aSRoman Gushchin 16473c7be18aSRoman Gushchin #else /* CONFIG_MEMCG_KMEM */ 16483c7be18aSRoman Gushchin static enum pcpu_chunk_type 16493c7be18aSRoman Gushchin pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) 16503c7be18aSRoman Gushchin { 16513c7be18aSRoman Gushchin return PCPU_CHUNK_ROOT; 16523c7be18aSRoman Gushchin } 16533c7be18aSRoman Gushchin 16543c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 16553c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off, 16563c7be18aSRoman Gushchin size_t size) 16573c7be18aSRoman Gushchin { 16583c7be18aSRoman Gushchin } 16593c7be18aSRoman Gushchin 16603c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 16613c7be18aSRoman Gushchin { 16623c7be18aSRoman Gushchin } 16633c7be18aSRoman Gushchin #endif /* CONFIG_MEMCG_KMEM */ 16643c7be18aSRoman Gushchin 166588999a89STejun Heo /** 1666edcb4639STejun Heo * pcpu_alloc - the percpu allocator 1667cae3aeb8STejun Heo * @size: size of area to allocate in bytes 1668fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 1669edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 16705835d96eSTejun Heo * @gfp: allocation flags 1671fbf59bc9STejun Heo * 16725835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 16730ea7eeecSDaniel Borkmann * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN 16740ea7eeecSDaniel Borkmann * then no warning will be triggered on invalid or failed allocation 16750ea7eeecSDaniel Borkmann * requests. 1676fbf59bc9STejun Heo * 1677fbf59bc9STejun Heo * RETURNS: 1678fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1679fbf59bc9STejun Heo */ 16805835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 16815835d96eSTejun Heo gfp_t gfp) 1682fbf59bc9STejun Heo { 168328307d93SFilipe Manana gfp_t pcpu_gfp; 168428307d93SFilipe Manana bool is_atomic; 168528307d93SFilipe Manana bool do_warn; 16863c7be18aSRoman Gushchin enum pcpu_chunk_type type; 16873c7be18aSRoman Gushchin struct list_head *pcpu_slot; 16883c7be18aSRoman Gushchin struct obj_cgroup *objcg = NULL; 1689f2badb0cSTejun Heo static int warn_limit = 10; 16908744d859SDennis Zhou struct pcpu_chunk *chunk, *next; 1691f2badb0cSTejun Heo const char *err; 169240064aecSDennis Zhou (Facebook) int slot, off, cpu, ret; 1693403a91b1SJiri Kosina unsigned long flags; 1694f528f0b8SCatalin Marinas void __percpu *ptr; 169540064aecSDennis Zhou (Facebook) size_t bits, bit_align; 1696fbf59bc9STejun Heo 169728307d93SFilipe Manana gfp = current_gfp_context(gfp); 169828307d93SFilipe Manana /* whitelisted flags that can be passed to the backing allocators */ 169928307d93SFilipe Manana pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 170028307d93SFilipe Manana is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 170128307d93SFilipe Manana do_warn = !(gfp & __GFP_NOWARN); 170228307d93SFilipe Manana 1703723ad1d9SAl Viro /* 170440064aecSDennis Zhou (Facebook) * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, 170540064aecSDennis Zhou (Facebook) * therefore alignment must be a minimum of that many bytes. 170640064aecSDennis Zhou (Facebook) * An allocation may have internal fragmentation from rounding up 170740064aecSDennis Zhou (Facebook) * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. 1708723ad1d9SAl Viro */ 1709d2f3c384SDennis Zhou (Facebook) if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) 1710d2f3c384SDennis Zhou (Facebook) align = PCPU_MIN_ALLOC_SIZE; 1711723ad1d9SAl Viro 1712d2f3c384SDennis Zhou (Facebook) size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); 171340064aecSDennis Zhou (Facebook) bits = size >> PCPU_MIN_ALLOC_SHIFT; 171440064aecSDennis Zhou (Facebook) bit_align = align >> PCPU_MIN_ALLOC_SHIFT; 17152f69fa82SViro 17163ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 17173ca45a46Szijun_hu !is_power_of_2(align))) { 17180ea7eeecSDaniel Borkmann WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1719756a025fSJoe Perches size, align); 1720fbf59bc9STejun Heo return NULL; 1721fbf59bc9STejun Heo } 1722fbf59bc9STejun Heo 17233c7be18aSRoman Gushchin type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg); 17243c7be18aSRoman Gushchin if (unlikely(type == PCPU_FAIL_ALLOC)) 17253c7be18aSRoman Gushchin return NULL; 17263c7be18aSRoman Gushchin pcpu_slot = pcpu_chunk_list(type); 17273c7be18aSRoman Gushchin 1728f52ba1feSKirill Tkhai if (!is_atomic) { 1729f52ba1feSKirill Tkhai /* 1730f52ba1feSKirill Tkhai * pcpu_balance_workfn() allocates memory under this mutex, 1731f52ba1feSKirill Tkhai * and it may wait for memory reclaim. Allow current task 1732f52ba1feSKirill Tkhai * to become OOM victim, in case of memory pressure. 1733f52ba1feSKirill Tkhai */ 17343c7be18aSRoman Gushchin if (gfp & __GFP_NOFAIL) { 17356710e594STejun Heo mutex_lock(&pcpu_alloc_mutex); 17363c7be18aSRoman Gushchin } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { 17373c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 1738f52ba1feSKirill Tkhai return NULL; 1739f52ba1feSKirill Tkhai } 17403c7be18aSRoman Gushchin } 17416710e594STejun Heo 1742403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1743fbf59bc9STejun Heo 1744edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 1745edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 1746edcb4639STejun Heo chunk = pcpu_reserved_chunk; 1747833af842STejun Heo 174840064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); 174940064aecSDennis Zhou (Facebook) if (off < 0) { 1750833af842STejun Heo err = "alloc from reserved chunk failed"; 1751ccea34b5STejun Heo goto fail_unlock; 1752f2badb0cSTejun Heo } 1753833af842STejun Heo 175440064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1755edcb4639STejun Heo if (off >= 0) 1756edcb4639STejun Heo goto area_found; 1757833af842STejun Heo 1758f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 1759ccea34b5STejun Heo goto fail_unlock; 1760edcb4639STejun Heo } 1761edcb4639STejun Heo 1762ccea34b5STejun Heo restart: 1763edcb4639STejun Heo /* search through normal chunks */ 1764fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 17658744d859SDennis Zhou list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) { 176640064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, 176740064aecSDennis Zhou (Facebook) is_atomic); 17688744d859SDennis Zhou if (off < 0) { 17698744d859SDennis Zhou if (slot < PCPU_SLOT_FAIL_THRESHOLD) 17708744d859SDennis Zhou pcpu_chunk_move(chunk, 0); 1771fbf59bc9STejun Heo continue; 17728744d859SDennis Zhou } 1773ccea34b5STejun Heo 177440064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1775fbf59bc9STejun Heo if (off >= 0) 1776fbf59bc9STejun Heo goto area_found; 177740064aecSDennis Zhou (Facebook) 1778fbf59bc9STejun Heo } 1779fbf59bc9STejun Heo } 1780fbf59bc9STejun Heo 1781403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1782ccea34b5STejun Heo 1783b38d08f3STejun Heo /* 1784b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 1785b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 1786b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 1787b38d08f3STejun Heo */ 178811df02bfSDennis Zhou if (is_atomic) { 178911df02bfSDennis Zhou err = "atomic alloc failed, no space left"; 17905835d96eSTejun Heo goto fail; 179111df02bfSDennis Zhou } 17925835d96eSTejun Heo 1793b38d08f3STejun Heo if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 17943c7be18aSRoman Gushchin chunk = pcpu_create_chunk(type, pcpu_gfp); 1795f2badb0cSTejun Heo if (!chunk) { 1796f2badb0cSTejun Heo err = "failed to allocate new chunk"; 1797b38d08f3STejun Heo goto fail; 1798f2badb0cSTejun Heo } 1799ccea34b5STejun Heo 1800403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1801fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1802b38d08f3STejun Heo } else { 1803b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1804b38d08f3STejun Heo } 1805b38d08f3STejun Heo 1806ccea34b5STejun Heo goto restart; 1807fbf59bc9STejun Heo 1808fbf59bc9STejun Heo area_found: 180930a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size); 1810403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1811ccea34b5STejun Heo 1812dca49645STejun Heo /* populate if not all pages are already there */ 18135835d96eSTejun Heo if (!is_atomic) { 1814e837dfdeSDennis Zhou unsigned int page_start, page_end, rs, re; 1815e04d3208STejun Heo 1816dca49645STejun Heo page_start = PFN_DOWN(off); 1817dca49645STejun Heo page_end = PFN_UP(off + size); 1818dca49645STejun Heo 1819e837dfdeSDennis Zhou bitmap_for_each_clear_region(chunk->populated, rs, re, 182091e914c5SDennis Zhou (Facebook) page_start, page_end) { 1821dca49645STejun Heo WARN_ON(chunk->immutable); 1822dca49645STejun Heo 1823554fef1cSDennis Zhou ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); 1824b38d08f3STejun Heo 1825403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1826b38d08f3STejun Heo if (ret) { 182740064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off); 1828f2badb0cSTejun Heo err = "failed to populate"; 1829ccea34b5STejun Heo goto fail_unlock; 1830fbf59bc9STejun Heo } 1831b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, re); 1832b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1833dca49645STejun Heo } 1834dca49645STejun Heo 1835ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1836e04d3208STejun Heo } 1837ccea34b5STejun Heo 18380760fa3dSRoman Gushchin if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW) 18391a4d7607STejun Heo pcpu_schedule_balance_work(); 18401a4d7607STejun Heo 1841dca49645STejun Heo /* clear the areas and return address relative to base address */ 1842dca49645STejun Heo for_each_possible_cpu(cpu) 1843dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1844dca49645STejun Heo 1845f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 18468a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp); 1847df95e795SDennis Zhou 1848df95e795SDennis Zhou trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1849df95e795SDennis Zhou chunk->base_addr, off, ptr); 1850df95e795SDennis Zhou 18513c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); 18523c7be18aSRoman Gushchin 1853f528f0b8SCatalin Marinas return ptr; 1854ccea34b5STejun Heo 1855ccea34b5STejun Heo fail_unlock: 1856403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1857b38d08f3STejun Heo fail: 1858df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1859df95e795SDennis Zhou 18600ea7eeecSDaniel Borkmann if (!is_atomic && do_warn && warn_limit) { 1861870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 18625835d96eSTejun Heo size, align, is_atomic, err); 1863f2badb0cSTejun Heo dump_stack(); 1864f2badb0cSTejun Heo if (!--warn_limit) 1865870d4b12SJoe Perches pr_info("limit reached, disable warning\n"); 1866f2badb0cSTejun Heo } 18671a4d7607STejun Heo if (is_atomic) { 18681a4d7607STejun Heo /* see the flag handling in pcpu_blance_workfn() */ 18691a4d7607STejun Heo pcpu_atomic_alloc_failed = true; 18701a4d7607STejun Heo pcpu_schedule_balance_work(); 18716710e594STejun Heo } else { 18726710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex); 18731a4d7607STejun Heo } 18743c7be18aSRoman Gushchin 18753c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 18763c7be18aSRoman Gushchin 1877ccea34b5STejun Heo return NULL; 1878fbf59bc9STejun Heo } 1879edcb4639STejun Heo 1880edcb4639STejun Heo /** 18815835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1882edcb4639STejun Heo * @size: size of area to allocate in bytes 1883edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 18845835d96eSTejun Heo * @gfp: allocation flags 1885edcb4639STejun Heo * 18865835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 18875835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 18880ea7eeecSDaniel Borkmann * be called from any context but is a lot more likely to fail. If @gfp 18890ea7eeecSDaniel Borkmann * has __GFP_NOWARN then no warning will be triggered on invalid or failed 18900ea7eeecSDaniel Borkmann * allocation requests. 1891ccea34b5STejun Heo * 1892edcb4639STejun Heo * RETURNS: 1893edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1894edcb4639STejun Heo */ 18955835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 18965835d96eSTejun Heo { 18975835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 18985835d96eSTejun Heo } 18995835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 19005835d96eSTejun Heo 19015835d96eSTejun Heo /** 19025835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 19035835d96eSTejun Heo * @size: size of area to allocate in bytes 19045835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 19055835d96eSTejun Heo * 19065835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 19075835d96eSTejun Heo */ 190843cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1909edcb4639STejun Heo { 19105835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1911edcb4639STejun Heo } 1912fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1913fbf59bc9STejun Heo 1914edcb4639STejun Heo /** 1915edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1916edcb4639STejun Heo * @size: size of area to allocate in bytes 1917edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1918edcb4639STejun Heo * 19199329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 19209329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 19219329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 19229329ba97STejun Heo * Might trigger writeouts. 1923edcb4639STejun Heo * 1924ccea34b5STejun Heo * CONTEXT: 1925ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1926ccea34b5STejun Heo * 1927edcb4639STejun Heo * RETURNS: 1928edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1929edcb4639STejun Heo */ 193043cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1931edcb4639STejun Heo { 19325835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1933edcb4639STejun Heo } 1934edcb4639STejun Heo 1935a56dbddfSTejun Heo /** 19363c7be18aSRoman Gushchin * __pcpu_balance_workfn - manage the amount of free chunks and populated pages 19373c7be18aSRoman Gushchin * @type: chunk type 1938a56dbddfSTejun Heo * 193947504ee0SDennis Zhou * Reclaim all fully free chunks except for the first one. This is also 194047504ee0SDennis Zhou * responsible for maintaining the pool of empty populated pages. However, 194147504ee0SDennis Zhou * it is possible that this is called when physical memory is scarce causing 194247504ee0SDennis Zhou * OOM killer to be triggered. We should avoid doing so until an actual 194347504ee0SDennis Zhou * allocation causes the failure as it is possible that requests can be 194447504ee0SDennis Zhou * serviced from already backed regions. 1945a56dbddfSTejun Heo */ 19463c7be18aSRoman Gushchin static void __pcpu_balance_workfn(enum pcpu_chunk_type type) 1947fbf59bc9STejun Heo { 194847504ee0SDennis Zhou /* gfp flags passed to underlying allocators */ 1949554fef1cSDennis Zhou const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 1950fe6bd8c3STejun Heo LIST_HEAD(to_free); 19513c7be18aSRoman Gushchin struct list_head *pcpu_slot = pcpu_chunk_list(type); 1952fe6bd8c3STejun Heo struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1953a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 19541a4d7607STejun Heo int slot, nr_to_pop, ret; 1955a56dbddfSTejun Heo 19561a4d7607STejun Heo /* 19571a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM 19581a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one. 19591a4d7607STejun Heo */ 1960ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1961ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1962a56dbddfSTejun Heo 1963fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 19648d408b4bSTejun Heo WARN_ON(chunk->immutable); 1965a56dbddfSTejun Heo 1966a56dbddfSTejun Heo /* spare the first one */ 1967fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1968a56dbddfSTejun Heo continue; 1969a56dbddfSTejun Heo 1970fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 1971a56dbddfSTejun Heo } 1972a56dbddfSTejun Heo 1973ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1974a56dbddfSTejun Heo 1975fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 1976e837dfdeSDennis Zhou unsigned int rs, re; 1977dca49645STejun Heo 1978e837dfdeSDennis Zhou bitmap_for_each_set_region(chunk->populated, rs, re, 0, 197991e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 1980a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 1981b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 1982b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 1983b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 1984a93ace48STejun Heo } 19856081089fSTejun Heo pcpu_destroy_chunk(chunk); 1986accd4f36SEric Dumazet cond_resched(); 1987fbf59bc9STejun Heo } 1988971f3918STejun Heo 19891a4d7607STejun Heo /* 19901a4d7607STejun Heo * Ensure there are certain number of free populated pages for 19911a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic 19921a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation 19931a4d7607STejun Heo * failed previously, always populate the maximum amount. This 19941a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping 19951a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not 19961a4d7607STejun Heo * something we support properly and can be highly unreliable and 19971a4d7607STejun Heo * inefficient. 19981a4d7607STejun Heo */ 19991a4d7607STejun Heo retry_pop: 20001a4d7607STejun Heo if (pcpu_atomic_alloc_failed) { 20011a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 20021a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */ 20031a4d7607STejun Heo pcpu_atomic_alloc_failed = false; 20041a4d7607STejun Heo } else { 20051a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 20060760fa3dSRoman Gushchin pcpu_nr_empty_pop_pages[type], 20071a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH); 20081a4d7607STejun Heo } 20091a4d7607STejun Heo 20101a4d7607STejun Heo for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { 2011e837dfdeSDennis Zhou unsigned int nr_unpop = 0, rs, re; 20121a4d7607STejun Heo 20131a4d7607STejun Heo if (!nr_to_pop) 20141a4d7607STejun Heo break; 20151a4d7607STejun Heo 20161a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 20171a4d7607STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 20188ab16c43SDennis Zhou (Facebook) nr_unpop = chunk->nr_pages - chunk->nr_populated; 20191a4d7607STejun Heo if (nr_unpop) 20201a4d7607STejun Heo break; 20211a4d7607STejun Heo } 20221a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 20231a4d7607STejun Heo 20241a4d7607STejun Heo if (!nr_unpop) 20251a4d7607STejun Heo continue; 20261a4d7607STejun Heo 20271a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */ 2028e837dfdeSDennis Zhou bitmap_for_each_clear_region(chunk->populated, rs, re, 0, 202991e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 2030e837dfdeSDennis Zhou int nr = min_t(int, re - rs, nr_to_pop); 20311a4d7607STejun Heo 203247504ee0SDennis Zhou ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); 20331a4d7607STejun Heo if (!ret) { 20341a4d7607STejun Heo nr_to_pop -= nr; 20351a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 2036b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, rs + nr); 20371a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 20381a4d7607STejun Heo } else { 20391a4d7607STejun Heo nr_to_pop = 0; 20401a4d7607STejun Heo } 20411a4d7607STejun Heo 20421a4d7607STejun Heo if (!nr_to_pop) 20431a4d7607STejun Heo break; 20441a4d7607STejun Heo } 20451a4d7607STejun Heo } 20461a4d7607STejun Heo 20471a4d7607STejun Heo if (nr_to_pop) { 20481a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */ 20493c7be18aSRoman Gushchin chunk = pcpu_create_chunk(type, gfp); 20501a4d7607STejun Heo if (chunk) { 20511a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 20521a4d7607STejun Heo pcpu_chunk_relocate(chunk, -1); 20531a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 20541a4d7607STejun Heo goto retry_pop; 20551a4d7607STejun Heo } 20561a4d7607STejun Heo } 20571a4d7607STejun Heo 2058971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 2059a56dbddfSTejun Heo } 2060fbf59bc9STejun Heo 2061fbf59bc9STejun Heo /** 20623c7be18aSRoman Gushchin * pcpu_balance_workfn - manage the amount of free chunks and populated pages 20633c7be18aSRoman Gushchin * @work: unused 20643c7be18aSRoman Gushchin * 20653c7be18aSRoman Gushchin * Call __pcpu_balance_workfn() for each chunk type. 20663c7be18aSRoman Gushchin */ 20673c7be18aSRoman Gushchin static void pcpu_balance_workfn(struct work_struct *work) 20683c7be18aSRoman Gushchin { 20693c7be18aSRoman Gushchin enum pcpu_chunk_type type; 20703c7be18aSRoman Gushchin 20713c7be18aSRoman Gushchin for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) 20723c7be18aSRoman Gushchin __pcpu_balance_workfn(type); 20733c7be18aSRoman Gushchin } 20743c7be18aSRoman Gushchin 20753c7be18aSRoman Gushchin /** 2076fbf59bc9STejun Heo * free_percpu - free percpu area 2077fbf59bc9STejun Heo * @ptr: pointer to area to free 2078fbf59bc9STejun Heo * 2079ccea34b5STejun Heo * Free percpu area @ptr. 2080ccea34b5STejun Heo * 2081ccea34b5STejun Heo * CONTEXT: 2082ccea34b5STejun Heo * Can be called from atomic context. 2083fbf59bc9STejun Heo */ 208443cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 2085fbf59bc9STejun Heo { 2086129182e5SAndrew Morton void *addr; 2087fbf59bc9STejun Heo struct pcpu_chunk *chunk; 2088ccea34b5STejun Heo unsigned long flags; 20893c7be18aSRoman Gushchin int size, off; 2090198790d9SJohn Sperbeck bool need_balance = false; 20913c7be18aSRoman Gushchin struct list_head *pcpu_slot; 2092fbf59bc9STejun Heo 2093fbf59bc9STejun Heo if (!ptr) 2094fbf59bc9STejun Heo return; 2095fbf59bc9STejun Heo 2096f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 2097f528f0b8SCatalin Marinas 2098129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 2099129182e5SAndrew Morton 2100ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 2101fbf59bc9STejun Heo 2102fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 2103bba174f5STejun Heo off = addr - chunk->base_addr; 2104fbf59bc9STejun Heo 21053c7be18aSRoman Gushchin size = pcpu_free_area(chunk, off); 21063c7be18aSRoman Gushchin 21073c7be18aSRoman Gushchin pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk)); 21083c7be18aSRoman Gushchin 21093c7be18aSRoman Gushchin pcpu_memcg_free_hook(chunk, off, size); 2110fbf59bc9STejun Heo 2111a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 211240064aecSDennis Zhou (Facebook) if (chunk->free_bytes == pcpu_unit_size) { 2113fbf59bc9STejun Heo struct pcpu_chunk *pos; 2114fbf59bc9STejun Heo 2115a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 2116fbf59bc9STejun Heo if (pos != chunk) { 2117198790d9SJohn Sperbeck need_balance = true; 2118fbf59bc9STejun Heo break; 2119fbf59bc9STejun Heo } 2120fbf59bc9STejun Heo } 2121fbf59bc9STejun Heo 2122df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr); 2123df95e795SDennis Zhou 2124ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 2125198790d9SJohn Sperbeck 2126198790d9SJohn Sperbeck if (need_balance) 2127198790d9SJohn Sperbeck pcpu_schedule_balance_work(); 2128fbf59bc9STejun Heo } 2129fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 2130fbf59bc9STejun Heo 2131383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 2132383776faSThomas Gleixner { 2133383776faSThomas Gleixner #ifdef CONFIG_SMP 2134383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start; 2135383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 2136383776faSThomas Gleixner unsigned int cpu; 2137383776faSThomas Gleixner 2138383776faSThomas Gleixner for_each_possible_cpu(cpu) { 2139383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu); 2140383776faSThomas Gleixner void *va = (void *)addr; 2141383776faSThomas Gleixner 2142383776faSThomas Gleixner if (va >= start && va < start + static_size) { 21438ce371f9SPeter Zijlstra if (can_addr) { 2144383776faSThomas Gleixner *can_addr = (unsigned long) (va - start); 21458ce371f9SPeter Zijlstra *can_addr += (unsigned long) 21468ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id()); 21478ce371f9SPeter Zijlstra } 2148383776faSThomas Gleixner return true; 2149383776faSThomas Gleixner } 2150383776faSThomas Gleixner } 2151383776faSThomas Gleixner #endif 2152383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */ 2153383776faSThomas Gleixner return false; 2154383776faSThomas Gleixner } 2155383776faSThomas Gleixner 21563b034b0dSVivek Goyal /** 215710fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 215810fad5e4STejun Heo * @addr: address to test 215910fad5e4STejun Heo * 216010fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 216110fad5e4STejun Heo * static percpu areas are not considered. For those, use 216210fad5e4STejun Heo * is_module_percpu_address(). 216310fad5e4STejun Heo * 216410fad5e4STejun Heo * RETURNS: 216510fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 216610fad5e4STejun Heo */ 216710fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 216810fad5e4STejun Heo { 2169383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL); 217010fad5e4STejun Heo } 217110fad5e4STejun Heo 217210fad5e4STejun Heo /** 21733b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 21743b034b0dSVivek Goyal * @addr: the address to be converted to physical address 21753b034b0dSVivek Goyal * 21763b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 21773b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 21783b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 21793b034b0dSVivek Goyal * until this function finishes. 21803b034b0dSVivek Goyal * 218167589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 218267589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 218367589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 218467589c71SDave Young * km) provides translation. 218567589c71SDave Young * 2186bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the 218767589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 218867589c71SDave Young * actually works, and the verification can discover both bugs in percpu 218967589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 219067589c71SDave Young * code. 219167589c71SDave Young * 21923b034b0dSVivek Goyal * RETURNS: 21933b034b0dSVivek Goyal * The physical address for @addr. 21943b034b0dSVivek Goyal */ 21953b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 21963b034b0dSVivek Goyal { 21979983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 21989983b6f0STejun Heo bool in_first_chunk = false; 2199a855b84cSTejun Heo unsigned long first_low, first_high; 22009983b6f0STejun Heo unsigned int cpu; 22019983b6f0STejun Heo 22029983b6f0STejun Heo /* 2203a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 22049983b6f0STejun Heo * necessary but will speed up lookups of addresses which 22059983b6f0STejun Heo * aren't in the first chunk. 2206c0ebfdc3SDennis Zhou (Facebook) * 2207c0ebfdc3SDennis Zhou (Facebook) * The address check is against full chunk sizes. pcpu_base_addr 2208c0ebfdc3SDennis Zhou (Facebook) * points to the beginning of the first chunk including the 2209c0ebfdc3SDennis Zhou (Facebook) * static region. Assumes good intent as the first chunk may 2210c0ebfdc3SDennis Zhou (Facebook) * not be full (ie. < pcpu_unit_pages in size). 22119983b6f0STejun Heo */ 2212c0ebfdc3SDennis Zhou (Facebook) first_low = (unsigned long)pcpu_base_addr + 2213c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 2214c0ebfdc3SDennis Zhou (Facebook) first_high = (unsigned long)pcpu_base_addr + 2215c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 2216a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 2217a855b84cSTejun Heo (unsigned long)addr < first_high) { 22189983b6f0STejun Heo for_each_possible_cpu(cpu) { 22199983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 22209983b6f0STejun Heo 22219983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 22229983b6f0STejun Heo in_first_chunk = true; 22239983b6f0STejun Heo break; 22249983b6f0STejun Heo } 22259983b6f0STejun Heo } 22269983b6f0STejun Heo } 22279983b6f0STejun Heo 22289983b6f0STejun Heo if (in_first_chunk) { 2229eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 22303b034b0dSVivek Goyal return __pa(addr); 22313b034b0dSVivek Goyal else 22329f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 22339f57bd4dSEugene Surovegin offset_in_page(addr); 2234020ec653STejun Heo } else 22359f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 22369f57bd4dSEugene Surovegin offset_in_page(addr); 22373b034b0dSVivek Goyal } 22383b034b0dSVivek Goyal 2239fbf59bc9STejun Heo /** 2240fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 2241fd1e8a1fSTejun Heo * @nr_groups: the number of groups 2242fd1e8a1fSTejun Heo * @nr_units: the number of units 2243033e48fbSTejun Heo * 2244fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 2245fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 2246fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 2247fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 2248fd1e8a1fSTejun Heo * pointer of other groups. 2249033e48fbSTejun Heo * 2250033e48fbSTejun Heo * RETURNS: 2251fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 2252fd1e8a1fSTejun Heo * failure. 2253033e48fbSTejun Heo */ 2254fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 2255fd1e8a1fSTejun Heo int nr_units) 2256fd1e8a1fSTejun Heo { 2257fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 2258fd1e8a1fSTejun Heo size_t base_size, ai_size; 2259fd1e8a1fSTejun Heo void *ptr; 2260fd1e8a1fSTejun Heo int unit; 2261fd1e8a1fSTejun Heo 226214d37612SGustavo A. R. Silva base_size = ALIGN(struct_size(ai, groups, nr_groups), 2263fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 2264fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 2265fd1e8a1fSTejun Heo 226626fb3daeSMike Rapoport ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); 2267fd1e8a1fSTejun Heo if (!ptr) 2268fd1e8a1fSTejun Heo return NULL; 2269fd1e8a1fSTejun Heo ai = ptr; 2270fd1e8a1fSTejun Heo ptr += base_size; 2271fd1e8a1fSTejun Heo 2272fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 2273fd1e8a1fSTejun Heo 2274fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 2275fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 2276fd1e8a1fSTejun Heo 2277fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 2278fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 2279fd1e8a1fSTejun Heo 2280fd1e8a1fSTejun Heo return ai; 2281fd1e8a1fSTejun Heo } 2282fd1e8a1fSTejun Heo 2283fd1e8a1fSTejun Heo /** 2284fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 2285fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 2286fd1e8a1fSTejun Heo * 2287fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 2288fd1e8a1fSTejun Heo */ 2289fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 2290fd1e8a1fSTejun Heo { 2291999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 2292fd1e8a1fSTejun Heo } 2293fd1e8a1fSTejun Heo 2294fd1e8a1fSTejun Heo /** 2295fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 2296fd1e8a1fSTejun Heo * @lvl: loglevel 2297fd1e8a1fSTejun Heo * @ai: allocation info to dump 2298fd1e8a1fSTejun Heo * 2299fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 2300fd1e8a1fSTejun Heo */ 2301fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 2302fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 2303033e48fbSTejun Heo { 2304fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 2305033e48fbSTejun Heo char empty_str[] = "--------"; 2306fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 2307fd1e8a1fSTejun Heo int group, v; 2308fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 2309033e48fbSTejun Heo 2310fd1e8a1fSTejun Heo v = ai->nr_groups; 2311033e48fbSTejun Heo while (v /= 10) 2312fd1e8a1fSTejun Heo group_width++; 2313033e48fbSTejun Heo 2314fd1e8a1fSTejun Heo v = num_possible_cpus(); 2315fd1e8a1fSTejun Heo while (v /= 10) 2316fd1e8a1fSTejun Heo cpu_width++; 2317fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 2318033e48fbSTejun Heo 2319fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 2320fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 2321fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 2322033e48fbSTejun Heo 2323fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 2324fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 2325fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 2326fd1e8a1fSTejun Heo 2327fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 2328fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 2329fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 2330fd1e8a1fSTejun Heo 2331fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 2332fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 2333fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 2334fd1e8a1fSTejun Heo if (!(alloc % apl)) { 23351170532bSJoe Perches pr_cont("\n"); 2336fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 2337033e48fbSTejun Heo } 23381170532bSJoe Perches pr_cont("[%0*d] ", group_width, group); 2339fd1e8a1fSTejun Heo 2340fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 2341fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 23421170532bSJoe Perches pr_cont("%0*d ", 23431170532bSJoe Perches cpu_width, gi->cpu_map[unit]); 2344033e48fbSTejun Heo else 23451170532bSJoe Perches pr_cont("%s ", empty_str); 2346033e48fbSTejun Heo } 2347fd1e8a1fSTejun Heo } 23481170532bSJoe Perches pr_cont("\n"); 2349033e48fbSTejun Heo } 2350033e48fbSTejun Heo 2351fbf59bc9STejun Heo /** 23528d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 2353fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 235438a6be52STejun Heo * @base_addr: mapped address 2355fbf59bc9STejun Heo * 23568d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 235769ab285bSChristophe JAILLET * percpu area. This function is to be called from arch percpu area 235838a6be52STejun Heo * setup path. 23598d408b4bSTejun Heo * 2360fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 2361fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 23628d408b4bSTejun Heo * 2363fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 2364fd1e8a1fSTejun Heo * 2365fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 2366edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 2367edcb4639STejun Heo * the first chunk such that it's available only through reserved 2368edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 2369edcb4639STejun Heo * static areas on architectures where the addressing model has 2370edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 2371edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 2372edcb4639STejun Heo * 2373fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 2374fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 2375fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 23766074d5b0STejun Heo * 2377fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 2378fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 2379fd1e8a1fSTejun Heo * @ai->dyn_size. 23808d408b4bSTejun Heo * 2381fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 2382fd1e8a1fSTejun Heo * for vm areas. 23838d408b4bSTejun Heo * 2384fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 2385fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 2386fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 2387fd1e8a1fSTejun Heo * 2388fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 2389fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 2390fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 2391fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 2392fd1e8a1fSTejun Heo * all units is assumed. 23938d408b4bSTejun Heo * 239438a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 239538a6be52STejun Heo * copied static data to each unit. 2396fbf59bc9STejun Heo * 2397c0ebfdc3SDennis Zhou (Facebook) * The first chunk will always contain a static and a dynamic region. 2398c0ebfdc3SDennis Zhou (Facebook) * However, the static region is not managed by any chunk. If the first 2399c0ebfdc3SDennis Zhou (Facebook) * chunk also contains a reserved region, it is served by two chunks - 2400c0ebfdc3SDennis Zhou (Facebook) * one for the reserved region and one for the dynamic region. They 2401c0ebfdc3SDennis Zhou (Facebook) * share the same vm, but use offset regions in the area allocation map. 2402c0ebfdc3SDennis Zhou (Facebook) * The chunk serving the dynamic region is circulated in the chunk slots 2403c0ebfdc3SDennis Zhou (Facebook) * and available for dynamic allocation like any other chunk. 2404fbf59bc9STejun Heo */ 2405163fa234SKefeng Wang void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 2406fd1e8a1fSTejun Heo void *base_addr) 2407fbf59bc9STejun Heo { 2408b9c39442SDennis Zhou (Facebook) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2409d2f3c384SDennis Zhou (Facebook) size_t static_size, dyn_size; 24100c4169c3SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 24116563297cSTejun Heo unsigned long *group_offsets; 24126563297cSTejun Heo size_t *group_sizes; 2413fb435d52STejun Heo unsigned long *unit_off; 2414fbf59bc9STejun Heo unsigned int cpu; 2415fd1e8a1fSTejun Heo int *unit_map; 2416fd1e8a1fSTejun Heo int group, unit, i; 2417c0ebfdc3SDennis Zhou (Facebook) int map_size; 2418c0ebfdc3SDennis Zhou (Facebook) unsigned long tmp_addr; 2419f655f405SMike Rapoport size_t alloc_size; 24203c7be18aSRoman Gushchin enum pcpu_chunk_type type; 2421fbf59bc9STejun Heo 2422635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 2423635b75fcSTejun Heo if (unlikely(cond)) { \ 2424870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \ 2425870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \ 2426807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \ 2427635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 2428635b75fcSTejun Heo BUG(); \ 2429635b75fcSTejun Heo } \ 2430635b75fcSTejun Heo } while (0) 2431635b75fcSTejun Heo 24322f39e637STejun Heo /* sanity checks */ 2433635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 2434bbddff05STejun Heo #ifdef CONFIG_SMP 2435635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 2436f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 2437bbddff05STejun Heo #endif 2438635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 2439f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 2440635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 2441f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 2442635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 2443ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); 2444099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 2445fb29a2ccSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!ai->dyn_size); 2446d2f3c384SDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); 2447ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || 2448ca460b3cSDennis Zhou (Facebook) IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); 24499f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 24508d408b4bSTejun Heo 24516563297cSTejun Heo /* process group information and build config tables accordingly */ 2452f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_offsets[0]); 2453f655f405SMike Rapoport group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2454f655f405SMike Rapoport if (!group_offsets) 2455f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2456f655f405SMike Rapoport alloc_size); 2457f655f405SMike Rapoport 2458f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_sizes[0]); 2459f655f405SMike Rapoport group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2460f655f405SMike Rapoport if (!group_sizes) 2461f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2462f655f405SMike Rapoport alloc_size); 2463f655f405SMike Rapoport 2464f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_map[0]); 2465f655f405SMike Rapoport unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2466f655f405SMike Rapoport if (!unit_map) 2467f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2468f655f405SMike Rapoport alloc_size); 2469f655f405SMike Rapoport 2470f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_off[0]); 2471f655f405SMike Rapoport unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2472f655f405SMike Rapoport if (!unit_off) 2473f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2474f655f405SMike Rapoport alloc_size); 24752f39e637STejun Heo 2476fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2477ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 2478a855b84cSTejun Heo 2479a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 2480a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 24812f39e637STejun Heo 2482fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 2483fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 24842f39e637STejun Heo 24856563297cSTejun Heo group_offsets[group] = gi->base_offset; 24866563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 24876563297cSTejun Heo 2488fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 2489fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 2490fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 2491fd1e8a1fSTejun Heo continue; 2492fd1e8a1fSTejun Heo 24939f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 2494635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 2495635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 2496fd1e8a1fSTejun Heo 2497fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 2498fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 2499fb435d52STejun Heo 2500a855b84cSTejun Heo /* determine low/high unit_cpu */ 2501a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 2502a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 2503a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 2504a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 2505a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 2506a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 25070fc0531eSLinus Torvalds } 25080fc0531eSLinus Torvalds } 2509fd1e8a1fSTejun Heo pcpu_nr_units = unit; 25102f39e637STejun Heo 25112f39e637STejun Heo for_each_possible_cpu(cpu) 2512635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 2513635b75fcSTejun Heo 2514635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 2515635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 2516bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 25172f39e637STejun Heo 25186563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 25196563297cSTejun Heo pcpu_group_offsets = group_offsets; 25206563297cSTejun Heo pcpu_group_sizes = group_sizes; 2521fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 2522fb435d52STejun Heo pcpu_unit_offsets = unit_off; 25232f39e637STejun Heo 25242f39e637STejun Heo /* determine basic parameters */ 2525fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2526d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 25276563297cSTejun Heo pcpu_atom_size = ai->atom_size; 252861cf93d3SDennis Zhou pcpu_chunk_struct_size = struct_size(chunk, populated, 252961cf93d3SDennis Zhou BITS_TO_LONGS(pcpu_unit_pages)); 2530cafe8816STejun Heo 253130a5b536SDennis Zhou pcpu_stats_save_ai(ai); 253230a5b536SDennis Zhou 2533d9b55eebSTejun Heo /* 2534d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 2535d9b55eebSTejun Heo * empty chunks. 2536d9b55eebSTejun Heo */ 2537d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 25383c7be18aSRoman Gushchin pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * 25393c7be18aSRoman Gushchin sizeof(pcpu_chunk_lists[0]) * 25403c7be18aSRoman Gushchin PCPU_NR_CHUNK_TYPES, 25417e1c4e27SMike Rapoport SMP_CACHE_BYTES); 25423c7be18aSRoman Gushchin if (!pcpu_chunk_lists) 2543f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 25443c7be18aSRoman Gushchin pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) * 25453c7be18aSRoman Gushchin PCPU_NR_CHUNK_TYPES); 25463c7be18aSRoman Gushchin 25473c7be18aSRoman Gushchin for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) 2548fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 25493c7be18aSRoman Gushchin INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]); 2550fbf59bc9STejun Heo 2551edcb4639STejun Heo /* 2552d2f3c384SDennis Zhou (Facebook) * The end of the static region needs to be aligned with the 2553d2f3c384SDennis Zhou (Facebook) * minimum allocation size as this offsets the reserved and 2554d2f3c384SDennis Zhou (Facebook) * dynamic region. The first chunk ends page aligned by 2555d2f3c384SDennis Zhou (Facebook) * expanding the dynamic region, therefore the dynamic region 2556d2f3c384SDennis Zhou (Facebook) * can be shrunk to compensate while still staying above the 2557d2f3c384SDennis Zhou (Facebook) * configured sizes. 2558d2f3c384SDennis Zhou (Facebook) */ 2559d2f3c384SDennis Zhou (Facebook) static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); 2560d2f3c384SDennis Zhou (Facebook) dyn_size = ai->dyn_size - (static_size - ai->static_size); 2561d2f3c384SDennis Zhou (Facebook) 2562d2f3c384SDennis Zhou (Facebook) /* 2563c0ebfdc3SDennis Zhou (Facebook) * Initialize first chunk. 2564c0ebfdc3SDennis Zhou (Facebook) * If the reserved_size is non-zero, this initializes the reserved 2565c0ebfdc3SDennis Zhou (Facebook) * chunk. If the reserved_size is zero, the reserved chunk is NULL 2566c0ebfdc3SDennis Zhou (Facebook) * and the dynamic region is initialized here. The first chunk, 2567c0ebfdc3SDennis Zhou (Facebook) * pcpu_first_chunk, will always point to the chunk that serves 2568c0ebfdc3SDennis Zhou (Facebook) * the dynamic region. 2569edcb4639STejun Heo */ 2570d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size; 2571d2f3c384SDennis Zhou (Facebook) map_size = ai->reserved_size ?: dyn_size; 257240064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 257361ace7faSTejun Heo 2574edcb4639STejun Heo /* init dynamic chunk if necessary */ 2575b9c39442SDennis Zhou (Facebook) if (ai->reserved_size) { 25760c4169c3SDennis Zhou (Facebook) pcpu_reserved_chunk = chunk; 2577b9c39442SDennis Zhou (Facebook) 2578d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size + 2579c0ebfdc3SDennis Zhou (Facebook) ai->reserved_size; 2580d2f3c384SDennis Zhou (Facebook) map_size = dyn_size; 258140064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2582edcb4639STejun Heo } 2583edcb4639STejun Heo 25842441d15cSTejun Heo /* link the first chunk in */ 25850c4169c3SDennis Zhou (Facebook) pcpu_first_chunk = chunk; 25860760fa3dSRoman Gushchin pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages; 2587ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 2588fbf59bc9STejun Heo 25897e8a6304SDennis Zhou (Facebook) /* include all regions of the first chunk */ 25907e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += PFN_DOWN(size_sum); 25917e8a6304SDennis Zhou (Facebook) 259230a5b536SDennis Zhou pcpu_stats_chunk_alloc(); 2593df95e795SDennis Zhou trace_percpu_create_chunk(base_addr); 259430a5b536SDennis Zhou 2595fbf59bc9STejun Heo /* we're done */ 2596bba174f5STejun Heo pcpu_base_addr = base_addr; 2597fbf59bc9STejun Heo } 259866c3a757STejun Heo 2599bbddff05STejun Heo #ifdef CONFIG_SMP 2600bbddff05STejun Heo 260117f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 2602f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 2603f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 2604f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 2605f58dc01bSTejun Heo }; 260666c3a757STejun Heo 2607f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 2608f58dc01bSTejun Heo 2609f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 261066c3a757STejun Heo { 26115479c78aSCyrill Gorcunov if (!str) 26125479c78aSCyrill Gorcunov return -EINVAL; 26135479c78aSCyrill Gorcunov 2614f58dc01bSTejun Heo if (0) 2615f58dc01bSTejun Heo /* nada */; 2616f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 2617f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 2618f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 2619f58dc01bSTejun Heo #endif 2620f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2621f58dc01bSTejun Heo else if (!strcmp(str, "page")) 2622f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 2623f58dc01bSTejun Heo #endif 2624f58dc01bSTejun Heo else 2625870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str); 262666c3a757STejun Heo 2627f58dc01bSTejun Heo return 0; 262866c3a757STejun Heo } 2629f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 263066c3a757STejun Heo 26313c9a024fSTejun Heo /* 26323c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 26333c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 26343c9a024fSTejun Heo * to be used. 26353c9a024fSTejun Heo */ 263608fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 263708fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 26383c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 26393c9a024fSTejun Heo #endif 26403c9a024fSTejun Heo 26413c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 26423c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 26433c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 26443c9a024fSTejun Heo #endif 26453c9a024fSTejun Heo 26463c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 26473c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 26483c9a024fSTejun Heo /** 2649fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 2650fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2651fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2652fbf59bc9STejun Heo * @atom_size: allocation atom size 2653fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2654fbf59bc9STejun Heo * 2655fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 2656fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 2657fbf59bc9STejun Heo * atom size and distances between CPUs. 2658fbf59bc9STejun Heo * 2659bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of 2660fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 2661fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 2662fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 2663fbf59bc9STejun Heo * of allocated virtual address space. 2664fbf59bc9STejun Heo * 2665fbf59bc9STejun Heo * RETURNS: 2666fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 2667fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 2668fbf59bc9STejun Heo */ 2669258e0815SDennis Zhou static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info( 2670fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 2671fbf59bc9STejun Heo size_t atom_size, 2672fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 2673fbf59bc9STejun Heo { 2674fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 2675fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 2676d7d29ac7SWonhyuk Yang static struct cpumask mask __initdata; 2677fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 2678fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 2679fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 26803f649ab7SKees Cook int upa, max_upa, best_upa; /* units_per_alloc */ 2681fbf59bc9STejun Heo int last_allocs, group, unit; 2682fbf59bc9STejun Heo unsigned int cpu, tcpu; 2683fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 2684fbf59bc9STejun Heo unsigned int *cpu_map; 2685fbf59bc9STejun Heo 2686fbf59bc9STejun Heo /* this function may be called multiple times */ 2687fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 2688fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 2689d7d29ac7SWonhyuk Yang cpumask_clear(&mask); 2690fbf59bc9STejun Heo 2691fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 2692fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 2693fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 2694fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 2695fbf59bc9STejun Heo 2696fbf59bc9STejun Heo /* 2697fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 2698fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 269925985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 2700fbf59bc9STejun Heo * or larger than min_unit_size. 2701fbf59bc9STejun Heo */ 2702fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 2703fbf59bc9STejun Heo 27049c015162SDennis Zhou (Facebook) /* determine the maximum # of units that can fit in an allocation */ 2705fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 2706fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 2707f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2708fbf59bc9STejun Heo upa--; 2709fbf59bc9STejun Heo max_upa = upa; 2710fbf59bc9STejun Heo 2711d7d29ac7SWonhyuk Yang cpumask_copy(&mask, cpu_possible_mask); 2712d7d29ac7SWonhyuk Yang 2713fbf59bc9STejun Heo /* group cpus according to their proximity */ 2714d7d29ac7SWonhyuk Yang for (group = 0; !cpumask_empty(&mask); group++) { 2715d7d29ac7SWonhyuk Yang /* pop the group's first cpu */ 2716d7d29ac7SWonhyuk Yang cpu = cpumask_first(&mask); 2717fbf59bc9STejun Heo group_map[cpu] = group; 2718fbf59bc9STejun Heo group_cnt[group]++; 2719d7d29ac7SWonhyuk Yang cpumask_clear_cpu(cpu, &mask); 2720d7d29ac7SWonhyuk Yang 2721d7d29ac7SWonhyuk Yang for_each_cpu(tcpu, &mask) { 2722d7d29ac7SWonhyuk Yang if (!cpu_distance_fn || 2723d7d29ac7SWonhyuk Yang (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE && 2724d7d29ac7SWonhyuk Yang cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) { 2725d7d29ac7SWonhyuk Yang group_map[tcpu] = group; 2726d7d29ac7SWonhyuk Yang group_cnt[group]++; 2727d7d29ac7SWonhyuk Yang cpumask_clear_cpu(tcpu, &mask); 2728fbf59bc9STejun Heo } 2729d7d29ac7SWonhyuk Yang } 2730d7d29ac7SWonhyuk Yang } 2731d7d29ac7SWonhyuk Yang nr_groups = group; 2732fbf59bc9STejun Heo 2733fbf59bc9STejun Heo /* 27349c015162SDennis Zhou (Facebook) * Wasted space is caused by a ratio imbalance of upa to group_cnt. 27359c015162SDennis Zhou (Facebook) * Expand the unit_size until we use >= 75% of the units allocated. 27369c015162SDennis Zhou (Facebook) * Related to atom_size, which could be much larger than the unit_size. 2737fbf59bc9STejun Heo */ 2738fbf59bc9STejun Heo last_allocs = INT_MAX; 2739fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 2740fbf59bc9STejun Heo int allocs = 0, wasted = 0; 2741fbf59bc9STejun Heo 2742f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2743fbf59bc9STejun Heo continue; 2744fbf59bc9STejun Heo 2745fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2746fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 2747fbf59bc9STejun Heo allocs += this_allocs; 2748fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 2749fbf59bc9STejun Heo } 2750fbf59bc9STejun Heo 2751fbf59bc9STejun Heo /* 2752fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 2753fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 2754fbf59bc9STejun Heo * passes the following check. 2755fbf59bc9STejun Heo */ 2756fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 2757fbf59bc9STejun Heo continue; 2758fbf59bc9STejun Heo 2759fbf59bc9STejun Heo /* and then don't consume more memory */ 2760fbf59bc9STejun Heo if (allocs > last_allocs) 2761fbf59bc9STejun Heo break; 2762fbf59bc9STejun Heo last_allocs = allocs; 2763fbf59bc9STejun Heo best_upa = upa; 2764fbf59bc9STejun Heo } 2765fbf59bc9STejun Heo upa = best_upa; 2766fbf59bc9STejun Heo 2767fbf59bc9STejun Heo /* allocate and fill alloc_info */ 2768fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 2769fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 2770fbf59bc9STejun Heo 2771fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 2772fbf59bc9STejun Heo if (!ai) 2773fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 2774fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 2775fbf59bc9STejun Heo 2776fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2777fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 2778fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 2779fbf59bc9STejun Heo } 2780fbf59bc9STejun Heo 2781fbf59bc9STejun Heo ai->static_size = static_size; 2782fbf59bc9STejun Heo ai->reserved_size = reserved_size; 2783fbf59bc9STejun Heo ai->dyn_size = dyn_size; 2784fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 2785fbf59bc9STejun Heo ai->atom_size = atom_size; 2786fbf59bc9STejun Heo ai->alloc_size = alloc_size; 2787fbf59bc9STejun Heo 27882de7852fSPeng Fan for (group = 0, unit = 0; group < nr_groups; group++) { 2789fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2790fbf59bc9STejun Heo 2791fbf59bc9STejun Heo /* 2792fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 2793fbf59bc9STejun Heo * back-to-back. The caller should update this to 2794fbf59bc9STejun Heo * reflect actual allocation. 2795fbf59bc9STejun Heo */ 2796fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 2797fbf59bc9STejun Heo 2798fbf59bc9STejun Heo for_each_possible_cpu(cpu) 2799fbf59bc9STejun Heo if (group_map[cpu] == group) 2800fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 2801fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 2802fbf59bc9STejun Heo unit += gi->nr_units; 2803fbf59bc9STejun Heo } 2804fbf59bc9STejun Heo BUG_ON(unit != nr_units); 2805fbf59bc9STejun Heo 2806fbf59bc9STejun Heo return ai; 2807fbf59bc9STejun Heo } 28083c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 2809fbf59bc9STejun Heo 28103c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 281166c3a757STejun Heo /** 281266c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 281366c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 28144ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2815c8826dd5STejun Heo * @atom_size: allocation atom size 2816c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2817c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 281825985edcSLucas De Marchi * @free_fn: function to free percpu page 281966c3a757STejun Heo * 282066c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 282166c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 282266c3a757STejun Heo * 282366c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 2824c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 2825c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 2826c8826dd5STejun Heo * aligned to @atom_size. 2827c8826dd5STejun Heo * 2828c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 2829c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 2830c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 2831c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 2832c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 2833c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 283466c3a757STejun Heo * 28354ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 283666c3a757STejun Heo * 283766c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 2838c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 283966c3a757STejun Heo * 284066c3a757STejun Heo * RETURNS: 2841fb435d52STejun Heo * 0 on success, -errno on failure. 284266c3a757STejun Heo */ 28434ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 2844c8826dd5STejun Heo size_t atom_size, 2845c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 2846c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2847c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 284866c3a757STejun Heo { 2849c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 2850c8826dd5STejun Heo void **areas = NULL; 2851fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 285293c76b6bSzijun_hu size_t size_sum, areas_size; 285393c76b6bSzijun_hu unsigned long max_distance; 2854163fa234SKefeng Wang int group, i, highest_group, rc = 0; 285566c3a757STejun Heo 2856c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 2857c8826dd5STejun Heo cpu_distance_fn); 2858fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2859fd1e8a1fSTejun Heo return PTR_ERR(ai); 286066c3a757STejun Heo 2861fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2862c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 286366c3a757STejun Heo 286426fb3daeSMike Rapoport areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); 2865c8826dd5STejun Heo if (!areas) { 2866fb435d52STejun Heo rc = -ENOMEM; 2867c8826dd5STejun Heo goto out_free; 2868fa8a7094STejun Heo } 286966c3a757STejun Heo 28709b739662Szijun_hu /* allocate, copy and determine base address & max_distance */ 28719b739662Szijun_hu highest_group = 0; 2872c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2873c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2874c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 2875c8826dd5STejun Heo void *ptr; 287666c3a757STejun Heo 2877c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 2878c8826dd5STejun Heo cpu = gi->cpu_map[i]; 2879c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 2880c8826dd5STejun Heo 2881c8826dd5STejun Heo /* allocate space for the whole group */ 2882c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 2883c8826dd5STejun Heo if (!ptr) { 2884c8826dd5STejun Heo rc = -ENOMEM; 2885c8826dd5STejun Heo goto out_free_areas; 2886c8826dd5STejun Heo } 2887f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2888f528f0b8SCatalin Marinas kmemleak_free(ptr); 2889c8826dd5STejun Heo areas[group] = ptr; 2890c8826dd5STejun Heo 2891c8826dd5STejun Heo base = min(ptr, base); 28929b739662Szijun_hu if (ptr > areas[highest_group]) 28939b739662Szijun_hu highest_group = group; 28949b739662Szijun_hu } 28959b739662Szijun_hu max_distance = areas[highest_group] - base; 28969b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 28979b739662Szijun_hu 28989b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */ 28999b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) { 29009b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 29019b739662Szijun_hu max_distance, VMALLOC_TOTAL); 29029b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 29039b739662Szijun_hu /* and fail if we have fallback */ 29049b739662Szijun_hu rc = -EINVAL; 29059b739662Szijun_hu goto out_free_areas; 29069b739662Szijun_hu #endif 290742b64281STejun Heo } 290842b64281STejun Heo 290942b64281STejun Heo /* 291042b64281STejun Heo * Copy data and free unused parts. This should happen after all 291142b64281STejun Heo * allocations are complete; otherwise, we may end up with 291242b64281STejun Heo * overlapping groups. 291342b64281STejun Heo */ 291442b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 291542b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 291642b64281STejun Heo void *ptr = areas[group]; 2917c8826dd5STejun Heo 2918c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 2919c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 2920c8826dd5STejun Heo /* unused unit, free whole */ 2921c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 2922c8826dd5STejun Heo continue; 2923c8826dd5STejun Heo } 2924c8826dd5STejun Heo /* copy and return the unused part */ 2925fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 2926c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 2927c8826dd5STejun Heo } 292866c3a757STejun Heo } 292966c3a757STejun Heo 2930c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 29316ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2932c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 29336ea529a2STejun Heo } 2934c8826dd5STejun Heo 293500206a69SMatteo Croce pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", 293600206a69SMatteo Croce PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, 2937fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 293866c3a757STejun Heo 2939163fa234SKefeng Wang pcpu_setup_first_chunk(ai, base); 2940c8826dd5STejun Heo goto out_free; 2941c8826dd5STejun Heo 2942c8826dd5STejun Heo out_free_areas: 2943c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 2944f851c8d8SMichael Holzheu if (areas[group]) 2945c8826dd5STejun Heo free_fn(areas[group], 2946c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 2947c8826dd5STejun Heo out_free: 2948fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2949c8826dd5STejun Heo if (areas) 2950999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 2951fb435d52STejun Heo return rc; 2952d4b95f80STejun Heo } 29533c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 2954d4b95f80STejun Heo 29553c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 2956d4b95f80STejun Heo /** 295700ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2958d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2959d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 296025985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 2961d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 2962d4b95f80STejun Heo * 296300ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 296400ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 2965d4b95f80STejun Heo * 2966d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 2967d4b95f80STejun Heo * page-by-page into vmalloc area. 2968d4b95f80STejun Heo * 2969d4b95f80STejun Heo * RETURNS: 2970fb435d52STejun Heo * 0 on success, -errno on failure. 2971d4b95f80STejun Heo */ 2972fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 2973d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2974d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 2975d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 2976d4b95f80STejun Heo { 29778f05a6a6STejun Heo static struct vm_struct vm; 2978fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 297900ae4064STejun Heo char psize_str[16]; 2980ce3141a2STejun Heo int unit_pages; 2981d4b95f80STejun Heo size_t pages_size; 2982ce3141a2STejun Heo struct page **pages; 2983163fa234SKefeng Wang int unit, i, j, rc = 0; 29848f606604Szijun_hu int upa; 29858f606604Szijun_hu int nr_g0_units; 2986d4b95f80STejun Heo 298700ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 298800ae4064STejun Heo 29894ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 2990fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2991fd1e8a1fSTejun Heo return PTR_ERR(ai); 2992fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 29938f606604Szijun_hu upa = ai->alloc_size/ai->unit_size; 29948f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa); 29950b59c25fSIgor Stoppa if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { 29968f606604Szijun_hu pcpu_free_alloc_info(ai); 29978f606604Szijun_hu return -EINVAL; 29988f606604Szijun_hu } 2999fd1e8a1fSTejun Heo 3000fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 3001d4b95f80STejun Heo 3002d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 3003fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 3004fd1e8a1fSTejun Heo sizeof(pages[0])); 30057e1c4e27SMike Rapoport pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 3006f655f405SMike Rapoport if (!pages) 3007f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 3008f655f405SMike Rapoport pages_size); 3009d4b95f80STejun Heo 30108f05a6a6STejun Heo /* allocate pages */ 3011d4b95f80STejun Heo j = 0; 30128f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) { 3013fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 30148f606604Szijun_hu for (i = 0; i < unit_pages; i++) { 3015d4b95f80STejun Heo void *ptr; 3016d4b95f80STejun Heo 30173cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 3018d4b95f80STejun Heo if (!ptr) { 3019870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n", 3020598d8091SJoe Perches psize_str, cpu); 3021d4b95f80STejun Heo goto enomem; 3022d4b95f80STejun Heo } 3023f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3024f528f0b8SCatalin Marinas kmemleak_free(ptr); 3025ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 3026d4b95f80STejun Heo } 30278f606604Szijun_hu } 3028d4b95f80STejun Heo 30298f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 30308f05a6a6STejun Heo vm.flags = VM_ALLOC; 3031fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 30328f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 30338f05a6a6STejun Heo 3034fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 30351d9d3257STejun Heo unsigned long unit_addr = 3036fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 30378f05a6a6STejun Heo 3038ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 30398f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 30408f05a6a6STejun Heo 30418f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 3042fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 3043ce3141a2STejun Heo unit_pages); 3044fb435d52STejun Heo if (rc < 0) 3045fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 30468f05a6a6STejun Heo 30478f05a6a6STejun Heo /* 30488f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 30498f05a6a6STejun Heo * cache for the linear mapping here - something 30508f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 30518f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 30528f05a6a6STejun Heo * data structures are not set up yet. 30538f05a6a6STejun Heo */ 30548f05a6a6STejun Heo 30558f05a6a6STejun Heo /* copy static data */ 3056fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 305766c3a757STejun Heo } 305866c3a757STejun Heo 305966c3a757STejun Heo /* we're ready, commit */ 306000206a69SMatteo Croce pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", 306100206a69SMatteo Croce unit_pages, psize_str, ai->static_size, 3062fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 306366c3a757STejun Heo 3064163fa234SKefeng Wang pcpu_setup_first_chunk(ai, vm.addr); 3065d4b95f80STejun Heo goto out_free_ar; 3066d4b95f80STejun Heo 3067d4b95f80STejun Heo enomem: 3068d4b95f80STejun Heo while (--j >= 0) 3069ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 3070fb435d52STejun Heo rc = -ENOMEM; 3071d4b95f80STejun Heo out_free_ar: 3072999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 3073fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 3074fb435d52STejun Heo return rc; 307566c3a757STejun Heo } 30763c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 3077d4b95f80STejun Heo 3078bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 30798c4bfc6eSTejun Heo /* 3080bbddff05STejun Heo * Generic SMP percpu area setup. 3081e74e3962STejun Heo * 3082e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 3083e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 3084e74e3962STejun Heo * important because many archs have addressing restrictions and might 3085e74e3962STejun Heo * fail if the percpu area is located far away from the previous 3086e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 3087e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 3088e74e3962STejun Heo * on the physical linear memory mapping which uses large page 3089e74e3962STejun Heo * mappings on applicable archs. 3090e74e3962STejun Heo */ 3091e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 3092e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 3093e74e3962STejun Heo 3094c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 3095c8826dd5STejun Heo size_t align) 3096c8826dd5STejun Heo { 309726fb3daeSMike Rapoport return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS)); 3098c8826dd5STejun Heo } 3099c8826dd5STejun Heo 3100c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 3101c8826dd5STejun Heo { 3102999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 3103c8826dd5STejun Heo } 3104c8826dd5STejun Heo 3105e74e3962STejun Heo void __init setup_per_cpu_areas(void) 3106e74e3962STejun Heo { 3107e74e3962STejun Heo unsigned long delta; 3108e74e3962STejun Heo unsigned int cpu; 3109fb435d52STejun Heo int rc; 3110e74e3962STejun Heo 3111e74e3962STejun Heo /* 3112e74e3962STejun Heo * Always reserve area for module percpu variables. That's 3113e74e3962STejun Heo * what the legacy allocator did. 3114e74e3962STejun Heo */ 3115fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 3116c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 3117c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 3118fb435d52STejun Heo if (rc < 0) 3119bbddff05STejun Heo panic("Failed to initialize percpu areas."); 3120e74e3962STejun Heo 3121e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 3122e74e3962STejun Heo for_each_possible_cpu(cpu) 3123fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 3124e74e3962STejun Heo } 3125e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 3126099a19d9STejun Heo 3127bbddff05STejun Heo #else /* CONFIG_SMP */ 3128bbddff05STejun Heo 3129bbddff05STejun Heo /* 3130bbddff05STejun Heo * UP percpu area setup. 3131bbddff05STejun Heo * 3132bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 3133bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 3134bbddff05STejun Heo * variables and don't require any special preparation. 3135bbddff05STejun Heo */ 3136bbddff05STejun Heo void __init setup_per_cpu_areas(void) 3137bbddff05STejun Heo { 3138bbddff05STejun Heo const size_t unit_size = 3139bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 3140bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 3141bbddff05STejun Heo struct pcpu_alloc_info *ai; 3142bbddff05STejun Heo void *fc; 3143bbddff05STejun Heo 3144bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 314526fb3daeSMike Rapoport fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 3146bbddff05STejun Heo if (!ai || !fc) 3147bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 3148100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3149100d13c3SCatalin Marinas kmemleak_free(fc); 3150bbddff05STejun Heo 3151bbddff05STejun Heo ai->dyn_size = unit_size; 3152bbddff05STejun Heo ai->unit_size = unit_size; 3153bbddff05STejun Heo ai->atom_size = unit_size; 3154bbddff05STejun Heo ai->alloc_size = unit_size; 3155bbddff05STejun Heo ai->groups[0].nr_units = 1; 3156bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 3157bbddff05STejun Heo 3158163fa234SKefeng Wang pcpu_setup_first_chunk(ai, fc); 3159438a5061SNicolas Pitre pcpu_free_alloc_info(ai); 3160bbddff05STejun Heo } 3161bbddff05STejun Heo 3162bbddff05STejun Heo #endif /* CONFIG_SMP */ 3163bbddff05STejun Heo 3164099a19d9STejun Heo /* 31657e8a6304SDennis Zhou (Facebook) * pcpu_nr_pages - calculate total number of populated backing pages 31667e8a6304SDennis Zhou (Facebook) * 31677e8a6304SDennis Zhou (Facebook) * This reflects the number of pages populated to back chunks. Metadata is 31687e8a6304SDennis Zhou (Facebook) * excluded in the number exposed in meminfo as the number of backing pages 31697e8a6304SDennis Zhou (Facebook) * scales with the number of cpus and can quickly outweigh the memory used for 31707e8a6304SDennis Zhou (Facebook) * metadata. It also keeps this calculation nice and simple. 31717e8a6304SDennis Zhou (Facebook) * 31727e8a6304SDennis Zhou (Facebook) * RETURNS: 31737e8a6304SDennis Zhou (Facebook) * Total number of populated backing pages in use by the allocator. 31747e8a6304SDennis Zhou (Facebook) */ 31757e8a6304SDennis Zhou (Facebook) unsigned long pcpu_nr_pages(void) 31767e8a6304SDennis Zhou (Facebook) { 31777e8a6304SDennis Zhou (Facebook) return pcpu_nr_populated * pcpu_nr_units; 31787e8a6304SDennis Zhou (Facebook) } 31797e8a6304SDennis Zhou (Facebook) 31807e8a6304SDennis Zhou (Facebook) /* 31811a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or 31821a4d7607STejun Heo * workqueue is available. Plug async management until everything is up 31831a4d7607STejun Heo * and running. 31841a4d7607STejun Heo */ 31851a4d7607STejun Heo static int __init percpu_enable_async(void) 31861a4d7607STejun Heo { 31871a4d7607STejun Heo pcpu_async_enabled = true; 31881a4d7607STejun Heo return 0; 31891a4d7607STejun Heo } 31901a4d7607STejun Heo subsys_initcall(percpu_enable_async); 3191