155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2fbf59bc9STejun Heo /* 388999a89STejun Heo * mm/percpu.c - percpu memory allocator 4fbf59bc9STejun Heo * 5fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 6fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 7fbf59bc9STejun Heo * 85e81ee3eSDennis Zhou (Facebook) * Copyright (C) 2017 Facebook Inc. 9bfacd38fSDennis Zhou * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> 105e81ee3eSDennis Zhou (Facebook) * 119c015162SDennis Zhou (Facebook) * The percpu allocator handles both static and dynamic areas. Percpu 129c015162SDennis Zhou (Facebook) * areas are allocated in chunks which are divided into units. There is 139c015162SDennis Zhou (Facebook) * a 1-to-1 mapping for units to possible cpus. These units are grouped 149c015162SDennis Zhou (Facebook) * based on NUMA properties of the machine. 15fbf59bc9STejun Heo * 16fbf59bc9STejun Heo * c0 c1 c2 17fbf59bc9STejun Heo * ------------------- ------------------- ------------ 18fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 19fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 20fbf59bc9STejun Heo * 219c015162SDennis Zhou (Facebook) * Allocation is done by offsets into a unit's address space. Ie., an 229c015162SDennis Zhou (Facebook) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 239c015162SDennis Zhou (Facebook) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 249c015162SDennis Zhou (Facebook) * and even sparse. Access is handled by configuring percpu base 259c015162SDennis Zhou (Facebook) * registers according to the cpu to unit mappings and offsetting the 269c015162SDennis Zhou (Facebook) * base address using pcpu_unit_size. 27fbf59bc9STejun Heo * 289c015162SDennis Zhou (Facebook) * There is special consideration for the first chunk which must handle 299c015162SDennis Zhou (Facebook) * the static percpu variables in the kernel image as allocation services 305e81ee3eSDennis Zhou (Facebook) * are not online yet. In short, the first chunk is structured like so: 319c015162SDennis Zhou (Facebook) * 329c015162SDennis Zhou (Facebook) * <Static | [Reserved] | Dynamic> 339c015162SDennis Zhou (Facebook) * 349c015162SDennis Zhou (Facebook) * The static data is copied from the original section managed by the 359c015162SDennis Zhou (Facebook) * linker. The reserved section, if non-zero, primarily manages static 369c015162SDennis Zhou (Facebook) * percpu variables from kernel modules. Finally, the dynamic section 379c015162SDennis Zhou (Facebook) * takes care of normal allocations. 38fbf59bc9STejun Heo * 395e81ee3eSDennis Zhou (Facebook) * The allocator organizes chunks into lists according to free size and 403c7be18aSRoman Gushchin * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT 413c7be18aSRoman Gushchin * flag should be passed. All memcg-aware allocations are sharing one set 423c7be18aSRoman Gushchin * of chunks and all unaccounted allocations and allocations performed 433c7be18aSRoman Gushchin * by processes belonging to the root memory cgroup are using the second set. 443c7be18aSRoman Gushchin * 453c7be18aSRoman Gushchin * The allocator tries to allocate from the fullest chunk first. Each chunk 463c7be18aSRoman Gushchin * is managed by a bitmap with metadata blocks. The allocation map is updated 473c7be18aSRoman Gushchin * on every allocation and free to reflect the current state while the boundary 485e81ee3eSDennis Zhou (Facebook) * map is only updated on allocation. Each metadata block contains 495e81ee3eSDennis Zhou (Facebook) * information to help mitigate the need to iterate over large portions 505e81ee3eSDennis Zhou (Facebook) * of the bitmap. The reverse mapping from page to chunk is stored in 515e81ee3eSDennis Zhou (Facebook) * the page's index. Lastly, units are lazily backed and grow in unison. 52fbf59bc9STejun Heo * 535e81ee3eSDennis Zhou (Facebook) * There is a unique conversion that goes on here between bytes and bits. 545e81ee3eSDennis Zhou (Facebook) * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 555e81ee3eSDennis Zhou (Facebook) * tracks the number of pages it is responsible for in nr_pages. Helper 565e81ee3eSDennis Zhou (Facebook) * functions are used to convert from between the bytes, bits, and blocks. 575e81ee3eSDennis Zhou (Facebook) * All hints are managed in bits unless explicitly stated. 589c015162SDennis Zhou (Facebook) * 594091fb95SMasahiro Yamada * To use this allocator, arch code should do the following: 60fbf59bc9STejun Heo * 61fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 62e0100983STejun Heo * regular address to percpu pointer and back if they need to be 63e0100983STejun Heo * different from the default 64fbf59bc9STejun Heo * 658d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 668d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 67fbf59bc9STejun Heo */ 68fbf59bc9STejun Heo 69870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 70870d4b12SJoe Perches 71fbf59bc9STejun Heo #include <linux/bitmap.h> 72d7d29ac7SWonhyuk Yang #include <linux/cpumask.h> 7357c8a661SMike Rapoport #include <linux/memblock.h> 74fd1e8a1fSTejun Heo #include <linux/err.h> 75ca460b3cSDennis Zhou (Facebook) #include <linux/lcm.h> 76fbf59bc9STejun Heo #include <linux/list.h> 77a530b795STejun Heo #include <linux/log2.h> 78fbf59bc9STejun Heo #include <linux/mm.h> 79fbf59bc9STejun Heo #include <linux/module.h> 80fbf59bc9STejun Heo #include <linux/mutex.h> 81fbf59bc9STejun Heo #include <linux/percpu.h> 82fbf59bc9STejun Heo #include <linux/pfn.h> 83fbf59bc9STejun Heo #include <linux/slab.h> 84ccea34b5STejun Heo #include <linux/spinlock.h> 85fbf59bc9STejun Heo #include <linux/vmalloc.h> 86a56dbddfSTejun Heo #include <linux/workqueue.h> 87f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 8871546d10STejun Heo #include <linux/sched.h> 8928307d93SFilipe Manana #include <linux/sched/mm.h> 903c7be18aSRoman Gushchin #include <linux/memcontrol.h> 91fbf59bc9STejun Heo 92fbf59bc9STejun Heo #include <asm/cacheflush.h> 93e0100983STejun Heo #include <asm/sections.h> 94fbf59bc9STejun Heo #include <asm/tlbflush.h> 953b034b0dSVivek Goyal #include <asm/io.h> 96fbf59bc9STejun Heo 97df95e795SDennis Zhou #define CREATE_TRACE_POINTS 98df95e795SDennis Zhou #include <trace/events/percpu.h> 99df95e795SDennis Zhou 1008fa3ed80SDennis Zhou #include "percpu-internal.h" 1018fa3ed80SDennis Zhou 102ac9380f6SRoman Gushchin /* 103ac9380f6SRoman Gushchin * The slots are sorted by the size of the biggest continuous free area. 104ac9380f6SRoman Gushchin * 1-31 bytes share the same slot. 105ac9380f6SRoman Gushchin */ 10640064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT 5 1078744d859SDennis Zhou /* chunks in slots below this are subject to being sidelined on failed alloc */ 1088744d859SDennis Zhou #define PCPU_SLOT_FAIL_THRESHOLD 3 10940064aecSDennis Zhou (Facebook) 1101a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2 1111a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4 112fbf59bc9STejun Heo 113bbddff05STejun Heo #ifdef CONFIG_SMP 114e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 115e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 116e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 11743cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 11843cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 11943cf38ebSTejun Heo (unsigned long)__per_cpu_start) 120e0100983STejun Heo #endif 121e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 122e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 12343cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 12443cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 12543cf38ebSTejun Heo (unsigned long)__per_cpu_start) 126e0100983STejun Heo #endif 127bbddff05STejun Heo #else /* CONFIG_SMP */ 128bbddff05STejun Heo /* on UP, it's always identity mapped */ 129bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 130bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 131bbddff05STejun Heo #endif /* CONFIG_SMP */ 132e0100983STejun Heo 1331328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init; 1341328710bSDaniel Micay static int pcpu_unit_size __ro_after_init; 1351328710bSDaniel Micay static int pcpu_nr_units __ro_after_init; 1361328710bSDaniel Micay static int pcpu_atom_size __ro_after_init; 1378fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init; 1388d55ba5dSWei Yongjun static int pcpu_free_slot __ro_after_init; 139f1833241SRoman Gushchin int pcpu_sidelined_slot __ro_after_init; 140f1833241SRoman Gushchin int pcpu_to_depopulate_slot __ro_after_init; 1411328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init; 142fbf59bc9STejun Heo 143a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 1441328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init; 1451328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init; 1462f39e637STejun Heo 147fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 1481328710bSDaniel Micay void *pcpu_base_addr __ro_after_init; 149fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 150fbf59bc9STejun Heo 1511328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 1521328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 1532f39e637STejun Heo 1546563297cSTejun Heo /* group information, used for vm allocation */ 1551328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init; 1561328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init; 1571328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init; 1586563297cSTejun Heo 159ae9e6bc9STejun Heo /* 160ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 161ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 162ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 163ae9e6bc9STejun Heo */ 1648fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 165ae9e6bc9STejun Heo 166ae9e6bc9STejun Heo /* 167ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 168e2266705SDennis Zhou (Facebook) * chunk and serves it for reserved allocations. When the reserved 169e2266705SDennis Zhou (Facebook) * region doesn't exist, the following variable is NULL. 170ae9e6bc9STejun Heo */ 1718fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 172edcb4639STejun Heo 1738fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 1746710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 175fbf59bc9STejun Heo 1763c7be18aSRoman Gushchin struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ 177fbf59bc9STejun Heo 1784f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */ 1794f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks); 1804f996e23STejun Heo 181b539b87fSTejun Heo /* 182faf65ddeSRoman Gushchin * The number of empty populated pages, protected by pcpu_lock. 1830760fa3dSRoman Gushchin * The reserved chunk doesn't contribute to the count. 184b539b87fSTejun Heo */ 185faf65ddeSRoman Gushchin int pcpu_nr_empty_pop_pages; 186b539b87fSTejun Heo 1871a4d7607STejun Heo /* 1887e8a6304SDennis Zhou (Facebook) * The number of populated pages in use by the allocator, protected by 1897e8a6304SDennis Zhou (Facebook) * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets 1907e8a6304SDennis Zhou (Facebook) * allocated/deallocated, it is allocated/deallocated in all units of a chunk 1917e8a6304SDennis Zhou (Facebook) * and increments/decrements this count by 1). 1927e8a6304SDennis Zhou (Facebook) */ 1937e8a6304SDennis Zhou (Facebook) static unsigned long pcpu_nr_populated; 1947e8a6304SDennis Zhou (Facebook) 1957e8a6304SDennis Zhou (Facebook) /* 1961a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We 1971a4d7607STejun Heo * try to keep the number of populated free pages between 1981a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 1991a4d7607STejun Heo * empty chunk. 2001a4d7607STejun Heo */ 201fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 202fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 2031a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly; 2041a4d7607STejun Heo static bool pcpu_atomic_alloc_failed; 2051a4d7607STejun Heo 2061a4d7607STejun Heo static void pcpu_schedule_balance_work(void) 2071a4d7607STejun Heo { 2081a4d7607STejun Heo if (pcpu_async_enabled) 2091a4d7607STejun Heo schedule_work(&pcpu_balance_work); 2101a4d7607STejun Heo } 211a56dbddfSTejun Heo 212c0ebfdc3SDennis Zhou (Facebook) /** 213560f2c23SDennis Zhou (Facebook) * pcpu_addr_in_chunk - check if the address is served from this chunk 214560f2c23SDennis Zhou (Facebook) * @chunk: chunk of interest 215560f2c23SDennis Zhou (Facebook) * @addr: percpu address 216c0ebfdc3SDennis Zhou (Facebook) * 217c0ebfdc3SDennis Zhou (Facebook) * RETURNS: 218560f2c23SDennis Zhou (Facebook) * True if the address is served from this chunk. 219c0ebfdc3SDennis Zhou (Facebook) */ 220560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) 221020ec653STejun Heo { 222c0ebfdc3SDennis Zhou (Facebook) void *start_addr, *end_addr; 223020ec653STejun Heo 224560f2c23SDennis Zhou (Facebook) if (!chunk) 225c0ebfdc3SDennis Zhou (Facebook) return false; 226c0ebfdc3SDennis Zhou (Facebook) 227560f2c23SDennis Zhou (Facebook) start_addr = chunk->base_addr + chunk->start_offset; 228560f2c23SDennis Zhou (Facebook) end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - 229560f2c23SDennis Zhou (Facebook) chunk->end_offset; 230c0ebfdc3SDennis Zhou (Facebook) 231c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr; 232020ec653STejun Heo } 233020ec653STejun Heo 234d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 235fbf59bc9STejun Heo { 236cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 237fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 238fbf59bc9STejun Heo } 239fbf59bc9STejun Heo 240d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 241d9b55eebSTejun Heo { 242d9b55eebSTejun Heo if (size == pcpu_unit_size) 2431c29a3ceSDennis Zhou return pcpu_free_slot; 244d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 245d9b55eebSTejun Heo } 246d9b55eebSTejun Heo 247fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 248fbf59bc9STejun Heo { 24992c14cabSDennis Zhou const struct pcpu_block_md *chunk_md = &chunk->chunk_md; 25092c14cabSDennis Zhou 25192c14cabSDennis Zhou if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || 25292c14cabSDennis Zhou chunk_md->contig_hint == 0) 253fbf59bc9STejun Heo return 0; 254fbf59bc9STejun Heo 25592c14cabSDennis Zhou return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); 256fbf59bc9STejun Heo } 257fbf59bc9STejun Heo 25888999a89STejun Heo /* set the pointer to a chunk in a page struct */ 25988999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 26088999a89STejun Heo { 26188999a89STejun Heo page->index = (unsigned long)pcpu; 26288999a89STejun Heo } 26388999a89STejun Heo 26488999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 26588999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 26688999a89STejun Heo { 26788999a89STejun Heo return (struct pcpu_chunk *)page->index; 26888999a89STejun Heo } 26988999a89STejun Heo 27088999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 271fbf59bc9STejun Heo { 2722f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 273fbf59bc9STejun Heo } 274fbf59bc9STejun Heo 275c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 276c0ebfdc3SDennis Zhou (Facebook) { 277c0ebfdc3SDennis Zhou (Facebook) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 278c0ebfdc3SDennis Zhou (Facebook) } 279c0ebfdc3SDennis Zhou (Facebook) 2809983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 281fbf59bc9STejun Heo unsigned int cpu, int page_idx) 282fbf59bc9STejun Heo { 283c0ebfdc3SDennis Zhou (Facebook) return (unsigned long)chunk->base_addr + 284c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(cpu, page_idx); 285fbf59bc9STejun Heo } 286fbf59bc9STejun Heo 287ca460b3cSDennis Zhou (Facebook) /* 288ca460b3cSDennis Zhou (Facebook) * The following are helper functions to help access bitmaps and convert 289ca460b3cSDennis Zhou (Facebook) * between bitmap offsets to address offsets. 290ca460b3cSDennis Zhou (Facebook) */ 291ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) 292ca460b3cSDennis Zhou (Facebook) { 293ca460b3cSDennis Zhou (Facebook) return chunk->alloc_map + 294ca460b3cSDennis Zhou (Facebook) (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); 295ca460b3cSDennis Zhou (Facebook) } 296ca460b3cSDennis Zhou (Facebook) 297ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off) 298ca460b3cSDennis Zhou (Facebook) { 299ca460b3cSDennis Zhou (Facebook) return off / PCPU_BITMAP_BLOCK_BITS; 300ca460b3cSDennis Zhou (Facebook) } 301ca460b3cSDennis Zhou (Facebook) 302ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off) 303ca460b3cSDennis Zhou (Facebook) { 304ca460b3cSDennis Zhou (Facebook) return off & (PCPU_BITMAP_BLOCK_BITS - 1); 305ca460b3cSDennis Zhou (Facebook) } 306ca460b3cSDennis Zhou (Facebook) 307b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off) 308b185cd0dSDennis Zhou (Facebook) { 309b185cd0dSDennis Zhou (Facebook) return index * PCPU_BITMAP_BLOCK_BITS + off; 310b185cd0dSDennis Zhou (Facebook) } 311b185cd0dSDennis Zhou (Facebook) 3128ea2e1e3SRoman Gushchin /** 3138ea2e1e3SRoman Gushchin * pcpu_check_block_hint - check against the contig hint 3148ea2e1e3SRoman Gushchin * @block: block of interest 3158ea2e1e3SRoman Gushchin * @bits: size of allocation 3168ea2e1e3SRoman Gushchin * @align: alignment of area (max PAGE_SIZE) 3178ea2e1e3SRoman Gushchin * 3188ea2e1e3SRoman Gushchin * Check to see if the allocation can fit in the block's contig hint. 3198ea2e1e3SRoman Gushchin * Note, a chunk uses the same hints as a block so this can also check against 3208ea2e1e3SRoman Gushchin * the chunk's contig hint. 3218ea2e1e3SRoman Gushchin */ 3228ea2e1e3SRoman Gushchin static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits, 3238ea2e1e3SRoman Gushchin size_t align) 3248ea2e1e3SRoman Gushchin { 3258ea2e1e3SRoman Gushchin int bit_off = ALIGN(block->contig_hint_start, align) - 3268ea2e1e3SRoman Gushchin block->contig_hint_start; 3278ea2e1e3SRoman Gushchin 3288ea2e1e3SRoman Gushchin return bit_off + bits <= block->contig_hint; 3298ea2e1e3SRoman Gushchin } 3308ea2e1e3SRoman Gushchin 331382b88e9SDennis Zhou /* 332382b88e9SDennis Zhou * pcpu_next_hint - determine which hint to use 333382b88e9SDennis Zhou * @block: block of interest 334382b88e9SDennis Zhou * @alloc_bits: size of allocation 335382b88e9SDennis Zhou * 336382b88e9SDennis Zhou * This determines if we should scan based on the scan_hint or first_free. 337382b88e9SDennis Zhou * In general, we want to scan from first_free to fulfill allocations by 338382b88e9SDennis Zhou * first fit. However, if we know a scan_hint at position scan_hint_start 339382b88e9SDennis Zhou * cannot fulfill an allocation, we can begin scanning from there knowing 340382b88e9SDennis Zhou * the contig_hint will be our fallback. 341382b88e9SDennis Zhou */ 342382b88e9SDennis Zhou static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) 343382b88e9SDennis Zhou { 344382b88e9SDennis Zhou /* 345382b88e9SDennis Zhou * The three conditions below determine if we can skip past the 346382b88e9SDennis Zhou * scan_hint. First, does the scan hint exist. Second, is the 347382b88e9SDennis Zhou * contig_hint after the scan_hint (possibly not true iff 348382b88e9SDennis Zhou * contig_hint == scan_hint). Third, is the allocation request 349382b88e9SDennis Zhou * larger than the scan_hint. 350382b88e9SDennis Zhou */ 351382b88e9SDennis Zhou if (block->scan_hint && 352382b88e9SDennis Zhou block->contig_hint_start > block->scan_hint_start && 353382b88e9SDennis Zhou alloc_bits > block->scan_hint) 354382b88e9SDennis Zhou return block->scan_hint_start + block->scan_hint; 355382b88e9SDennis Zhou 356382b88e9SDennis Zhou return block->first_free; 357382b88e9SDennis Zhou } 358382b88e9SDennis Zhou 359fbf59bc9STejun Heo /** 360525ca84dSDennis Zhou (Facebook) * pcpu_next_md_free_region - finds the next hint free area 361525ca84dSDennis Zhou (Facebook) * @chunk: chunk of interest 362525ca84dSDennis Zhou (Facebook) * @bit_off: chunk offset 363525ca84dSDennis Zhou (Facebook) * @bits: size of free area 364525ca84dSDennis Zhou (Facebook) * 365525ca84dSDennis Zhou (Facebook) * Helper function for pcpu_for_each_md_free_region. It checks 366525ca84dSDennis Zhou (Facebook) * block->contig_hint and performs aggregation across blocks to find the 367525ca84dSDennis Zhou (Facebook) * next hint. It modifies bit_off and bits in-place to be consumed in the 368525ca84dSDennis Zhou (Facebook) * loop. 369525ca84dSDennis Zhou (Facebook) */ 370525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, 371525ca84dSDennis Zhou (Facebook) int *bits) 372525ca84dSDennis Zhou (Facebook) { 373525ca84dSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 374525ca84dSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 375525ca84dSDennis Zhou (Facebook) struct pcpu_block_md *block; 376525ca84dSDennis Zhou (Facebook) 377525ca84dSDennis Zhou (Facebook) *bits = 0; 378525ca84dSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 379525ca84dSDennis Zhou (Facebook) block++, i++) { 380525ca84dSDennis Zhou (Facebook) /* handles contig area across blocks */ 381525ca84dSDennis Zhou (Facebook) if (*bits) { 382525ca84dSDennis Zhou (Facebook) *bits += block->left_free; 383525ca84dSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 384525ca84dSDennis Zhou (Facebook) continue; 385525ca84dSDennis Zhou (Facebook) return; 386525ca84dSDennis Zhou (Facebook) } 387525ca84dSDennis Zhou (Facebook) 388525ca84dSDennis Zhou (Facebook) /* 389525ca84dSDennis Zhou (Facebook) * This checks three things. First is there a contig_hint to 390525ca84dSDennis Zhou (Facebook) * check. Second, have we checked this hint before by 391525ca84dSDennis Zhou (Facebook) * comparing the block_off. Third, is this the same as the 392525ca84dSDennis Zhou (Facebook) * right contig hint. In the last case, it spills over into 393525ca84dSDennis Zhou (Facebook) * the next block and should be handled by the contig area 394525ca84dSDennis Zhou (Facebook) * across blocks code. 395525ca84dSDennis Zhou (Facebook) */ 396525ca84dSDennis Zhou (Facebook) *bits = block->contig_hint; 397525ca84dSDennis Zhou (Facebook) if (*bits && block->contig_hint_start >= block_off && 398525ca84dSDennis Zhou (Facebook) *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { 399525ca84dSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, 400525ca84dSDennis Zhou (Facebook) block->contig_hint_start); 401525ca84dSDennis Zhou (Facebook) return; 402525ca84dSDennis Zhou (Facebook) } 4031fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 4041fa4df3eSDennis Zhou block_off = 0; 405525ca84dSDennis Zhou (Facebook) 406525ca84dSDennis Zhou (Facebook) *bits = block->right_free; 407525ca84dSDennis Zhou (Facebook) *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 408525ca84dSDennis Zhou (Facebook) } 409525ca84dSDennis Zhou (Facebook) } 410525ca84dSDennis Zhou (Facebook) 411b4c2116cSDennis Zhou (Facebook) /** 412b4c2116cSDennis Zhou (Facebook) * pcpu_next_fit_region - finds fit areas for a given allocation request 413b4c2116cSDennis Zhou (Facebook) * @chunk: chunk of interest 414b4c2116cSDennis Zhou (Facebook) * @alloc_bits: size of allocation 415b4c2116cSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 416b4c2116cSDennis Zhou (Facebook) * @bit_off: chunk offset 417b4c2116cSDennis Zhou (Facebook) * @bits: size of free area 418b4c2116cSDennis Zhou (Facebook) * 419b4c2116cSDennis Zhou (Facebook) * Finds the next free region that is viable for use with a given size and 420b4c2116cSDennis Zhou (Facebook) * alignment. This only returns if there is a valid area to be used for this 421b4c2116cSDennis Zhou (Facebook) * allocation. block->first_free is returned if the allocation request fits 422b4c2116cSDennis Zhou (Facebook) * within the block to see if the request can be fulfilled prior to the contig 423b4c2116cSDennis Zhou (Facebook) * hint. 424b4c2116cSDennis Zhou (Facebook) */ 425b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, 426b4c2116cSDennis Zhou (Facebook) int align, int *bit_off, int *bits) 427b4c2116cSDennis Zhou (Facebook) { 428b4c2116cSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 429b4c2116cSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 430b4c2116cSDennis Zhou (Facebook) struct pcpu_block_md *block; 431b4c2116cSDennis Zhou (Facebook) 432b4c2116cSDennis Zhou (Facebook) *bits = 0; 433b4c2116cSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 434b4c2116cSDennis Zhou (Facebook) block++, i++) { 435b4c2116cSDennis Zhou (Facebook) /* handles contig area across blocks */ 436b4c2116cSDennis Zhou (Facebook) if (*bits) { 437b4c2116cSDennis Zhou (Facebook) *bits += block->left_free; 438b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 439b4c2116cSDennis Zhou (Facebook) return; 440b4c2116cSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 441b4c2116cSDennis Zhou (Facebook) continue; 442b4c2116cSDennis Zhou (Facebook) } 443b4c2116cSDennis Zhou (Facebook) 444b4c2116cSDennis Zhou (Facebook) /* check block->contig_hint */ 445b4c2116cSDennis Zhou (Facebook) *bits = ALIGN(block->contig_hint_start, align) - 446b4c2116cSDennis Zhou (Facebook) block->contig_hint_start; 447b4c2116cSDennis Zhou (Facebook) /* 448b4c2116cSDennis Zhou (Facebook) * This uses the block offset to determine if this has been 449b4c2116cSDennis Zhou (Facebook) * checked in the prior iteration. 450b4c2116cSDennis Zhou (Facebook) */ 451b4c2116cSDennis Zhou (Facebook) if (block->contig_hint && 452b4c2116cSDennis Zhou (Facebook) block->contig_hint_start >= block_off && 453b4c2116cSDennis Zhou (Facebook) block->contig_hint >= *bits + alloc_bits) { 454382b88e9SDennis Zhou int start = pcpu_next_hint(block, alloc_bits); 455382b88e9SDennis Zhou 456b4c2116cSDennis Zhou (Facebook) *bits += alloc_bits + block->contig_hint_start - 457382b88e9SDennis Zhou start; 458382b88e9SDennis Zhou *bit_off = pcpu_block_off_to_off(i, start); 459b4c2116cSDennis Zhou (Facebook) return; 460b4c2116cSDennis Zhou (Facebook) } 4611fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 4621fa4df3eSDennis Zhou block_off = 0; 463b4c2116cSDennis Zhou (Facebook) 464b4c2116cSDennis Zhou (Facebook) *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 465b4c2116cSDennis Zhou (Facebook) align); 466b4c2116cSDennis Zhou (Facebook) *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; 467b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, *bit_off); 468b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 469b4c2116cSDennis Zhou (Facebook) return; 470b4c2116cSDennis Zhou (Facebook) } 471b4c2116cSDennis Zhou (Facebook) 472b4c2116cSDennis Zhou (Facebook) /* no valid offsets were found - fail condition */ 473b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_chunk_map_bits(chunk); 474b4c2116cSDennis Zhou (Facebook) } 475b4c2116cSDennis Zhou (Facebook) 476525ca84dSDennis Zhou (Facebook) /* 477525ca84dSDennis Zhou (Facebook) * Metadata free area iterators. These perform aggregation of free areas 478525ca84dSDennis Zhou (Facebook) * based on the metadata blocks and return the offset @bit_off and size in 479b4c2116cSDennis Zhou (Facebook) * bits of the free area @bits. pcpu_for_each_fit_region only returns when 480b4c2116cSDennis Zhou (Facebook) * a fit is found for the allocation request. 481525ca84dSDennis Zhou (Facebook) */ 482525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ 483525ca84dSDennis Zhou (Facebook) for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ 484525ca84dSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 485525ca84dSDennis Zhou (Facebook) (bit_off) += (bits) + 1, \ 486525ca84dSDennis Zhou (Facebook) pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) 487525ca84dSDennis Zhou (Facebook) 488b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ 489b4c2116cSDennis Zhou (Facebook) for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 490b4c2116cSDennis Zhou (Facebook) &(bits)); \ 491b4c2116cSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 492b4c2116cSDennis Zhou (Facebook) (bit_off) += (bits), \ 493b4c2116cSDennis Zhou (Facebook) pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 494b4c2116cSDennis Zhou (Facebook) &(bits))) 495b4c2116cSDennis Zhou (Facebook) 496525ca84dSDennis Zhou (Facebook) /** 49790459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 4981880d93bSTejun Heo * @size: bytes to allocate 49947504ee0SDennis Zhou * @gfp: allocation flags 500fbf59bc9STejun Heo * 5011880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 50247504ee0SDennis Zhou * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. 50347504ee0SDennis Zhou * This is to facilitate passing through whitelisted flags. The 50447504ee0SDennis Zhou * returned memory is always zeroed. 505fbf59bc9STejun Heo * 506fbf59bc9STejun Heo * RETURNS: 5071880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 508fbf59bc9STejun Heo */ 50947504ee0SDennis Zhou static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) 510fbf59bc9STejun Heo { 511099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 512099a19d9STejun Heo return NULL; 513099a19d9STejun Heo 514fbf59bc9STejun Heo if (size <= PAGE_SIZE) 515554fef1cSDennis Zhou return kzalloc(size, gfp); 5167af4c093SJesper Juhl else 51788dca4caSChristoph Hellwig return __vmalloc(size, gfp | __GFP_ZERO); 5181880d93bSTejun Heo } 519fbf59bc9STejun Heo 5201880d93bSTejun Heo /** 5211880d93bSTejun Heo * pcpu_mem_free - free memory 5221880d93bSTejun Heo * @ptr: memory to free 5231880d93bSTejun Heo * 52490459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 5251880d93bSTejun Heo */ 5261d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr) 5271880d93bSTejun Heo { 5281d5cfdb0STetsuo Handa kvfree(ptr); 529fbf59bc9STejun Heo } 530fbf59bc9STejun Heo 5318744d859SDennis Zhou static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, 5328744d859SDennis Zhou bool move_front) 5338744d859SDennis Zhou { 5348744d859SDennis Zhou if (chunk != pcpu_reserved_chunk) { 5358744d859SDennis Zhou if (move_front) 536faf65ddeSRoman Gushchin list_move(&chunk->list, &pcpu_chunk_lists[slot]); 5378744d859SDennis Zhou else 538faf65ddeSRoman Gushchin list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]); 5398744d859SDennis Zhou } 5408744d859SDennis Zhou } 5418744d859SDennis Zhou 5428744d859SDennis Zhou static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) 5438744d859SDennis Zhou { 5448744d859SDennis Zhou __pcpu_chunk_move(chunk, slot, true); 5458744d859SDennis Zhou } 5468744d859SDennis Zhou 547fbf59bc9STejun Heo /** 548fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 549fbf59bc9STejun Heo * @chunk: chunk of interest 550fbf59bc9STejun Heo * @oslot: the previous slot it was on 551fbf59bc9STejun Heo * 552fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 553fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 554edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 555edcb4639STejun Heo * chunk slots. 556ccea34b5STejun Heo * 557ccea34b5STejun Heo * CONTEXT: 558ccea34b5STejun Heo * pcpu_lock. 559fbf59bc9STejun Heo */ 560fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 561fbf59bc9STejun Heo { 562fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 563fbf59bc9STejun Heo 564f1833241SRoman Gushchin /* leave isolated chunks in-place */ 565f1833241SRoman Gushchin if (chunk->isolated) 566f1833241SRoman Gushchin return; 567f1833241SRoman Gushchin 5688744d859SDennis Zhou if (oslot != nslot) 5698744d859SDennis Zhou __pcpu_chunk_move(chunk, nslot, oslot < nslot); 57040064aecSDennis Zhou (Facebook) } 57140064aecSDennis Zhou (Facebook) 572f1833241SRoman Gushchin static void pcpu_isolate_chunk(struct pcpu_chunk *chunk) 573f1833241SRoman Gushchin { 574f1833241SRoman Gushchin lockdep_assert_held(&pcpu_lock); 575f1833241SRoman Gushchin 576f1833241SRoman Gushchin if (!chunk->isolated) { 577f1833241SRoman Gushchin chunk->isolated = true; 578faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages; 579f1833241SRoman Gushchin } 580faf65ddeSRoman Gushchin list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]); 581f1833241SRoman Gushchin } 582f1833241SRoman Gushchin 583f1833241SRoman Gushchin static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk) 584f1833241SRoman Gushchin { 585f1833241SRoman Gushchin lockdep_assert_held(&pcpu_lock); 586f1833241SRoman Gushchin 587f1833241SRoman Gushchin if (chunk->isolated) { 588f1833241SRoman Gushchin chunk->isolated = false; 589faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages; 590f1833241SRoman Gushchin pcpu_chunk_relocate(chunk, -1); 591f1833241SRoman Gushchin } 592f1833241SRoman Gushchin } 593f1833241SRoman Gushchin 59440064aecSDennis Zhou (Facebook) /* 595b239f7daSDennis Zhou * pcpu_update_empty_pages - update empty page counters 596b239f7daSDennis Zhou * @chunk: chunk of interest 597b239f7daSDennis Zhou * @nr: nr of empty pages 59840064aecSDennis Zhou (Facebook) * 599b239f7daSDennis Zhou * This is used to keep track of the empty pages now based on the premise 600b239f7daSDennis Zhou * a md_block covers a page. The hint update functions recognize if a block 601b239f7daSDennis Zhou * is made full or broken to calculate deltas for keeping track of free pages. 60240064aecSDennis Zhou (Facebook) */ 603b239f7daSDennis Zhou static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) 604b239f7daSDennis Zhou { 605b239f7daSDennis Zhou chunk->nr_empty_pop_pages += nr; 606f1833241SRoman Gushchin if (chunk != pcpu_reserved_chunk && !chunk->isolated) 607faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages += nr; 60840064aecSDennis Zhou (Facebook) } 60940064aecSDennis Zhou (Facebook) 610d9f3a01eSDennis Zhou /* 611d9f3a01eSDennis Zhou * pcpu_region_overlap - determines if two regions overlap 612d9f3a01eSDennis Zhou * @a: start of first region, inclusive 613d9f3a01eSDennis Zhou * @b: end of first region, exclusive 614d9f3a01eSDennis Zhou * @x: start of second region, inclusive 615d9f3a01eSDennis Zhou * @y: end of second region, exclusive 616d9f3a01eSDennis Zhou * 617d9f3a01eSDennis Zhou * This is used to determine if the hint region [a, b) overlaps with the 618d9f3a01eSDennis Zhou * allocated region [x, y). 619d9f3a01eSDennis Zhou */ 620d9f3a01eSDennis Zhou static inline bool pcpu_region_overlap(int a, int b, int x, int y) 621d9f3a01eSDennis Zhou { 622d9f3a01eSDennis Zhou return (a < y) && (x < b); 62340064aecSDennis Zhou (Facebook) } 62440064aecSDennis Zhou (Facebook) 62540064aecSDennis Zhou (Facebook) /** 626ca460b3cSDennis Zhou (Facebook) * pcpu_block_update - updates a block given a free area 627ca460b3cSDennis Zhou (Facebook) * @block: block of interest 628ca460b3cSDennis Zhou (Facebook) * @start: start offset in block 629ca460b3cSDennis Zhou (Facebook) * @end: end offset in block 630ca460b3cSDennis Zhou (Facebook) * 631ca460b3cSDennis Zhou (Facebook) * Updates a block given a known free area. The region [start, end) is 632268625a6SDennis Zhou (Facebook) * expected to be the entirety of the free area within a block. Chooses 633268625a6SDennis Zhou (Facebook) * the best starting offset if the contig hints are equal. 634ca460b3cSDennis Zhou (Facebook) */ 635ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) 636ca460b3cSDennis Zhou (Facebook) { 637ca460b3cSDennis Zhou (Facebook) int contig = end - start; 638ca460b3cSDennis Zhou (Facebook) 639ca460b3cSDennis Zhou (Facebook) block->first_free = min(block->first_free, start); 640ca460b3cSDennis Zhou (Facebook) if (start == 0) 641ca460b3cSDennis Zhou (Facebook) block->left_free = contig; 642ca460b3cSDennis Zhou (Facebook) 643047924c9SDennis Zhou if (end == block->nr_bits) 644ca460b3cSDennis Zhou (Facebook) block->right_free = contig; 645ca460b3cSDennis Zhou (Facebook) 646ca460b3cSDennis Zhou (Facebook) if (contig > block->contig_hint) { 647382b88e9SDennis Zhou /* promote the old contig_hint to be the new scan_hint */ 648382b88e9SDennis Zhou if (start > block->contig_hint_start) { 649382b88e9SDennis Zhou if (block->contig_hint > block->scan_hint) { 650382b88e9SDennis Zhou block->scan_hint_start = 651382b88e9SDennis Zhou block->contig_hint_start; 652382b88e9SDennis Zhou block->scan_hint = block->contig_hint; 653382b88e9SDennis Zhou } else if (start < block->scan_hint_start) { 654382b88e9SDennis Zhou /* 655382b88e9SDennis Zhou * The old contig_hint == scan_hint. But, the 656382b88e9SDennis Zhou * new contig is larger so hold the invariant 657382b88e9SDennis Zhou * scan_hint_start < contig_hint_start. 658382b88e9SDennis Zhou */ 659382b88e9SDennis Zhou block->scan_hint = 0; 660382b88e9SDennis Zhou } 661382b88e9SDennis Zhou } else { 662382b88e9SDennis Zhou block->scan_hint = 0; 663382b88e9SDennis Zhou } 664ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = start; 665ca460b3cSDennis Zhou (Facebook) block->contig_hint = contig; 666382b88e9SDennis Zhou } else if (contig == block->contig_hint) { 667382b88e9SDennis Zhou if (block->contig_hint_start && 668382b88e9SDennis Zhou (!start || 669382b88e9SDennis Zhou __ffs(start) > __ffs(block->contig_hint_start))) { 670382b88e9SDennis Zhou /* start has a better alignment so use it */ 671268625a6SDennis Zhou (Facebook) block->contig_hint_start = start; 672382b88e9SDennis Zhou if (start < block->scan_hint_start && 673382b88e9SDennis Zhou block->contig_hint > block->scan_hint) 674382b88e9SDennis Zhou block->scan_hint = 0; 675382b88e9SDennis Zhou } else if (start > block->scan_hint_start || 676382b88e9SDennis Zhou block->contig_hint > block->scan_hint) { 677382b88e9SDennis Zhou /* 678382b88e9SDennis Zhou * Knowing contig == contig_hint, update the scan_hint 679382b88e9SDennis Zhou * if it is farther than or larger than the current 680382b88e9SDennis Zhou * scan_hint. 681382b88e9SDennis Zhou */ 682382b88e9SDennis Zhou block->scan_hint_start = start; 683382b88e9SDennis Zhou block->scan_hint = contig; 684382b88e9SDennis Zhou } 685382b88e9SDennis Zhou } else { 686382b88e9SDennis Zhou /* 687382b88e9SDennis Zhou * The region is smaller than the contig_hint. So only update 688382b88e9SDennis Zhou * the scan_hint if it is larger than or equal and farther than 689382b88e9SDennis Zhou * the current scan_hint. 690382b88e9SDennis Zhou */ 691382b88e9SDennis Zhou if ((start < block->contig_hint_start && 692382b88e9SDennis Zhou (contig > block->scan_hint || 693382b88e9SDennis Zhou (contig == block->scan_hint && 694382b88e9SDennis Zhou start > block->scan_hint_start)))) { 695382b88e9SDennis Zhou block->scan_hint_start = start; 696382b88e9SDennis Zhou block->scan_hint = contig; 697382b88e9SDennis Zhou } 698ca460b3cSDennis Zhou (Facebook) } 699ca460b3cSDennis Zhou (Facebook) } 700ca460b3cSDennis Zhou (Facebook) 701b89462a9SDennis Zhou /* 702b89462a9SDennis Zhou * pcpu_block_update_scan - update a block given a free area from a scan 703b89462a9SDennis Zhou * @chunk: chunk of interest 704b89462a9SDennis Zhou * @bit_off: chunk offset 705b89462a9SDennis Zhou * @bits: size of free area 706b89462a9SDennis Zhou * 707b89462a9SDennis Zhou * Finding the final allocation spot first goes through pcpu_find_block_fit() 708b89462a9SDennis Zhou * to find a block that can hold the allocation and then pcpu_alloc_area() 709b89462a9SDennis Zhou * where a scan is used. When allocations require specific alignments, 710b89462a9SDennis Zhou * we can inadvertently create holes which will not be seen in the alloc 711b89462a9SDennis Zhou * or free paths. 712b89462a9SDennis Zhou * 713b89462a9SDennis Zhou * This takes a given free area hole and updates a block as it may change the 714b89462a9SDennis Zhou * scan_hint. We need to scan backwards to ensure we don't miss free bits 715b89462a9SDennis Zhou * from alignment. 716b89462a9SDennis Zhou */ 717b89462a9SDennis Zhou static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, 718b89462a9SDennis Zhou int bits) 719b89462a9SDennis Zhou { 720b89462a9SDennis Zhou int s_off = pcpu_off_to_block_off(bit_off); 721b89462a9SDennis Zhou int e_off = s_off + bits; 722b89462a9SDennis Zhou int s_index, l_bit; 723b89462a9SDennis Zhou struct pcpu_block_md *block; 724b89462a9SDennis Zhou 725b89462a9SDennis Zhou if (e_off > PCPU_BITMAP_BLOCK_BITS) 726b89462a9SDennis Zhou return; 727b89462a9SDennis Zhou 728b89462a9SDennis Zhou s_index = pcpu_off_to_block_index(bit_off); 729b89462a9SDennis Zhou block = chunk->md_blocks + s_index; 730b89462a9SDennis Zhou 731b89462a9SDennis Zhou /* scan backwards in case of alignment skipping free bits */ 732b89462a9SDennis Zhou l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); 733b89462a9SDennis Zhou s_off = (s_off == l_bit) ? 0 : l_bit + 1; 734b89462a9SDennis Zhou 735b89462a9SDennis Zhou pcpu_block_update(block, s_off, e_off); 736b89462a9SDennis Zhou } 737b89462a9SDennis Zhou 738ca460b3cSDennis Zhou (Facebook) /** 73992c14cabSDennis Zhou * pcpu_chunk_refresh_hint - updates metadata about a chunk 74092c14cabSDennis Zhou * @chunk: chunk of interest 741d33d9f3dSDennis Zhou * @full_scan: if we should scan from the beginning 74292c14cabSDennis Zhou * 74392c14cabSDennis Zhou * Iterates over the metadata blocks to find the largest contig area. 744d33d9f3dSDennis Zhou * A full scan can be avoided on the allocation path as this is triggered 745d33d9f3dSDennis Zhou * if we broke the contig_hint. In doing so, the scan_hint will be before 746d33d9f3dSDennis Zhou * the contig_hint or after if the scan_hint == contig_hint. This cannot 747d33d9f3dSDennis Zhou * be prevented on freeing as we want to find the largest area possibly 748d33d9f3dSDennis Zhou * spanning blocks. 74992c14cabSDennis Zhou */ 750d33d9f3dSDennis Zhou static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) 75192c14cabSDennis Zhou { 75292c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 75392c14cabSDennis Zhou int bit_off, bits; 75492c14cabSDennis Zhou 755d33d9f3dSDennis Zhou /* promote scan_hint to contig_hint */ 756d33d9f3dSDennis Zhou if (!full_scan && chunk_md->scan_hint) { 757d33d9f3dSDennis Zhou bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; 758d33d9f3dSDennis Zhou chunk_md->contig_hint_start = chunk_md->scan_hint_start; 759d33d9f3dSDennis Zhou chunk_md->contig_hint = chunk_md->scan_hint; 760d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 761d33d9f3dSDennis Zhou } else { 76292c14cabSDennis Zhou bit_off = chunk_md->first_free; 763d33d9f3dSDennis Zhou chunk_md->contig_hint = 0; 764d33d9f3dSDennis Zhou } 765d33d9f3dSDennis Zhou 76692c14cabSDennis Zhou bits = 0; 767e837dfdeSDennis Zhou pcpu_for_each_md_free_region(chunk, bit_off, bits) 76892c14cabSDennis Zhou pcpu_block_update(chunk_md, bit_off, bit_off + bits); 769ca460b3cSDennis Zhou (Facebook) } 770ca460b3cSDennis Zhou (Facebook) 771ca460b3cSDennis Zhou (Facebook) /** 772ca460b3cSDennis Zhou (Facebook) * pcpu_block_refresh_hint 773ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 774ca460b3cSDennis Zhou (Facebook) * @index: index of the metadata block 775ca460b3cSDennis Zhou (Facebook) * 776ca460b3cSDennis Zhou (Facebook) * Scans over the block beginning at first_free and updates the block 777ca460b3cSDennis Zhou (Facebook) * metadata accordingly. 778ca460b3cSDennis Zhou (Facebook) */ 779ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) 780ca460b3cSDennis Zhou (Facebook) { 781ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *block = chunk->md_blocks + index; 782ca460b3cSDennis Zhou (Facebook) unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 783e837dfdeSDennis Zhou unsigned int rs, re, start; /* region start, region end */ 784ca460b3cSDennis Zhou (Facebook) 785da3afdd5SDennis Zhou /* promote scan_hint to contig_hint */ 786da3afdd5SDennis Zhou if (block->scan_hint) { 787da3afdd5SDennis Zhou start = block->scan_hint_start + block->scan_hint; 788da3afdd5SDennis Zhou block->contig_hint_start = block->scan_hint_start; 789da3afdd5SDennis Zhou block->contig_hint = block->scan_hint; 790da3afdd5SDennis Zhou block->scan_hint = 0; 791da3afdd5SDennis Zhou } else { 792da3afdd5SDennis Zhou start = block->first_free; 793ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 794da3afdd5SDennis Zhou } 795da3afdd5SDennis Zhou 796da3afdd5SDennis Zhou block->right_free = 0; 797ca460b3cSDennis Zhou (Facebook) 798ca460b3cSDennis Zhou (Facebook) /* iterate over free areas and update the contig hints */ 799e837dfdeSDennis Zhou bitmap_for_each_clear_region(alloc_map, rs, re, start, 800e837dfdeSDennis Zhou PCPU_BITMAP_BLOCK_BITS) 801ca460b3cSDennis Zhou (Facebook) pcpu_block_update(block, rs, re); 802ca460b3cSDennis Zhou (Facebook) } 803ca460b3cSDennis Zhou (Facebook) 804ca460b3cSDennis Zhou (Facebook) /** 805ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_alloc - update hint on allocation path 806ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 807ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 808ca460b3cSDennis Zhou (Facebook) * @bits: size of request 809fc304334SDennis Zhou (Facebook) * 810fc304334SDennis Zhou (Facebook) * Updates metadata for the allocation path. The metadata only has to be 811fc304334SDennis Zhou (Facebook) * refreshed by a full scan iff the chunk's contig hint is broken. Block level 812fc304334SDennis Zhou (Facebook) * scans are required if the block's contig hint is broken. 813ca460b3cSDennis Zhou (Facebook) */ 814ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, 815ca460b3cSDennis Zhou (Facebook) int bits) 816ca460b3cSDennis Zhou (Facebook) { 81792c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 818b239f7daSDennis Zhou int nr_empty_pages = 0; 819ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 820ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 821ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 822ca460b3cSDennis Zhou (Facebook) 823ca460b3cSDennis Zhou (Facebook) /* 824ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 825ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 826ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 827ca460b3cSDennis Zhou (Facebook) * range. 828ca460b3cSDennis Zhou (Facebook) */ 829ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 830ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 831ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 832ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 833ca460b3cSDennis Zhou (Facebook) 834ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 835ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 836ca460b3cSDennis Zhou (Facebook) 837ca460b3cSDennis Zhou (Facebook) /* 838ca460b3cSDennis Zhou (Facebook) * Update s_block. 839fc304334SDennis Zhou (Facebook) * block->first_free must be updated if the allocation takes its place. 840fc304334SDennis Zhou (Facebook) * If the allocation breaks the contig_hint, a scan is required to 841fc304334SDennis Zhou (Facebook) * restore this hint. 842ca460b3cSDennis Zhou (Facebook) */ 843b239f7daSDennis Zhou if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 844b239f7daSDennis Zhou nr_empty_pages++; 845b239f7daSDennis Zhou 846fc304334SDennis Zhou (Facebook) if (s_off == s_block->first_free) 847fc304334SDennis Zhou (Facebook) s_block->first_free = find_next_zero_bit( 848fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, s_index), 849fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, 850fc304334SDennis Zhou (Facebook) s_off + bits); 851fc304334SDennis Zhou (Facebook) 852382b88e9SDennis Zhou if (pcpu_region_overlap(s_block->scan_hint_start, 853382b88e9SDennis Zhou s_block->scan_hint_start + s_block->scan_hint, 854382b88e9SDennis Zhou s_off, 855382b88e9SDennis Zhou s_off + bits)) 856382b88e9SDennis Zhou s_block->scan_hint = 0; 857382b88e9SDennis Zhou 858d9f3a01eSDennis Zhou if (pcpu_region_overlap(s_block->contig_hint_start, 859d9f3a01eSDennis Zhou s_block->contig_hint_start + 860d9f3a01eSDennis Zhou s_block->contig_hint, 861d9f3a01eSDennis Zhou s_off, 862d9f3a01eSDennis Zhou s_off + bits)) { 863fc304334SDennis Zhou (Facebook) /* block contig hint is broken - scan to fix it */ 864da3afdd5SDennis Zhou if (!s_off) 865da3afdd5SDennis Zhou s_block->left_free = 0; 866ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, s_index); 867fc304334SDennis Zhou (Facebook) } else { 868fc304334SDennis Zhou (Facebook) /* update left and right contig manually */ 869fc304334SDennis Zhou (Facebook) s_block->left_free = min(s_block->left_free, s_off); 870fc304334SDennis Zhou (Facebook) if (s_index == e_index) 871fc304334SDennis Zhou (Facebook) s_block->right_free = min_t(int, s_block->right_free, 872fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 873fc304334SDennis Zhou (Facebook) else 874fc304334SDennis Zhou (Facebook) s_block->right_free = 0; 875fc304334SDennis Zhou (Facebook) } 876ca460b3cSDennis Zhou (Facebook) 877ca460b3cSDennis Zhou (Facebook) /* 878ca460b3cSDennis Zhou (Facebook) * Update e_block. 879ca460b3cSDennis Zhou (Facebook) */ 880ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 881b239f7daSDennis Zhou if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 882b239f7daSDennis Zhou nr_empty_pages++; 883b239f7daSDennis Zhou 884fc304334SDennis Zhou (Facebook) /* 885fc304334SDennis Zhou (Facebook) * When the allocation is across blocks, the end is along 886fc304334SDennis Zhou (Facebook) * the left part of the e_block. 887fc304334SDennis Zhou (Facebook) */ 888fc304334SDennis Zhou (Facebook) e_block->first_free = find_next_zero_bit( 889fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, e_index), 890fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, e_off); 891fc304334SDennis Zhou (Facebook) 892fc304334SDennis Zhou (Facebook) if (e_off == PCPU_BITMAP_BLOCK_BITS) { 893fc304334SDennis Zhou (Facebook) /* reset the block */ 894fc304334SDennis Zhou (Facebook) e_block++; 895fc304334SDennis Zhou (Facebook) } else { 896382b88e9SDennis Zhou if (e_off > e_block->scan_hint_start) 897382b88e9SDennis Zhou e_block->scan_hint = 0; 898382b88e9SDennis Zhou 899da3afdd5SDennis Zhou e_block->left_free = 0; 900fc304334SDennis Zhou (Facebook) if (e_off > e_block->contig_hint_start) { 901fc304334SDennis Zhou (Facebook) /* contig hint is broken - scan to fix it */ 902ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, e_index); 903fc304334SDennis Zhou (Facebook) } else { 904fc304334SDennis Zhou (Facebook) e_block->right_free = 905fc304334SDennis Zhou (Facebook) min_t(int, e_block->right_free, 906fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 907fc304334SDennis Zhou (Facebook) } 908fc304334SDennis Zhou (Facebook) } 909ca460b3cSDennis Zhou (Facebook) 910ca460b3cSDennis Zhou (Facebook) /* update in-between md_blocks */ 911b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 912ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 913382b88e9SDennis Zhou block->scan_hint = 0; 914ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 915ca460b3cSDennis Zhou (Facebook) block->left_free = 0; 916ca460b3cSDennis Zhou (Facebook) block->right_free = 0; 917ca460b3cSDennis Zhou (Facebook) } 918ca460b3cSDennis Zhou (Facebook) } 919ca460b3cSDennis Zhou (Facebook) 920b239f7daSDennis Zhou if (nr_empty_pages) 921b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr_empty_pages); 922b239f7daSDennis Zhou 923d33d9f3dSDennis Zhou if (pcpu_region_overlap(chunk_md->scan_hint_start, 924d33d9f3dSDennis Zhou chunk_md->scan_hint_start + 925d33d9f3dSDennis Zhou chunk_md->scan_hint, 926d33d9f3dSDennis Zhou bit_off, 927d33d9f3dSDennis Zhou bit_off + bits)) 928d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 929d33d9f3dSDennis Zhou 930fc304334SDennis Zhou (Facebook) /* 931fc304334SDennis Zhou (Facebook) * The only time a full chunk scan is required is if the chunk 932fc304334SDennis Zhou (Facebook) * contig hint is broken. Otherwise, it means a smaller space 933fc304334SDennis Zhou (Facebook) * was used and therefore the chunk contig hint is still correct. 934fc304334SDennis Zhou (Facebook) */ 93592c14cabSDennis Zhou if (pcpu_region_overlap(chunk_md->contig_hint_start, 93692c14cabSDennis Zhou chunk_md->contig_hint_start + 93792c14cabSDennis Zhou chunk_md->contig_hint, 938d9f3a01eSDennis Zhou bit_off, 939d9f3a01eSDennis Zhou bit_off + bits)) 940d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, false); 941ca460b3cSDennis Zhou (Facebook) } 942ca460b3cSDennis Zhou (Facebook) 943ca460b3cSDennis Zhou (Facebook) /** 944ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_free - updates the block hints on the free path 945ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 946ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 947ca460b3cSDennis Zhou (Facebook) * @bits: size of request 948b185cd0dSDennis Zhou (Facebook) * 949b185cd0dSDennis Zhou (Facebook) * Updates metadata for the allocation path. This avoids a blind block 950b185cd0dSDennis Zhou (Facebook) * refresh by making use of the block contig hints. If this fails, it scans 951b185cd0dSDennis Zhou (Facebook) * forward and backward to determine the extent of the free area. This is 952b185cd0dSDennis Zhou (Facebook) * capped at the boundary of blocks. 953b185cd0dSDennis Zhou (Facebook) * 954b185cd0dSDennis Zhou (Facebook) * A chunk update is triggered if a page becomes free, a block becomes free, 955b185cd0dSDennis Zhou (Facebook) * or the free spans across blocks. This tradeoff is to minimize iterating 95692c14cabSDennis Zhou * over the block metadata to update chunk_md->contig_hint. 95792c14cabSDennis Zhou * chunk_md->contig_hint may be off by up to a page, but it will never be more 95892c14cabSDennis Zhou * than the available space. If the contig hint is contained in one block, it 95992c14cabSDennis Zhou * will be accurate. 960ca460b3cSDennis Zhou (Facebook) */ 961ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, 962ca460b3cSDennis Zhou (Facebook) int bits) 963ca460b3cSDennis Zhou (Facebook) { 964b239f7daSDennis Zhou int nr_empty_pages = 0; 965ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 966ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 967ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 968b185cd0dSDennis Zhou (Facebook) int start, end; /* start and end of the whole free area */ 969ca460b3cSDennis Zhou (Facebook) 970ca460b3cSDennis Zhou (Facebook) /* 971ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 972ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 973ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 974ca460b3cSDennis Zhou (Facebook) * range. 975ca460b3cSDennis Zhou (Facebook) */ 976ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 977ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 978ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 979ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 980ca460b3cSDennis Zhou (Facebook) 981ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 982ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 983ca460b3cSDennis Zhou (Facebook) 984b185cd0dSDennis Zhou (Facebook) /* 985b185cd0dSDennis Zhou (Facebook) * Check if the freed area aligns with the block->contig_hint. 986b185cd0dSDennis Zhou (Facebook) * If it does, then the scan to find the beginning/end of the 987b185cd0dSDennis Zhou (Facebook) * larger free area can be avoided. 988b185cd0dSDennis Zhou (Facebook) * 989b185cd0dSDennis Zhou (Facebook) * start and end refer to beginning and end of the free area 990b185cd0dSDennis Zhou (Facebook) * within each their respective blocks. This is not necessarily 991b185cd0dSDennis Zhou (Facebook) * the entire free area as it may span blocks past the beginning 992b185cd0dSDennis Zhou (Facebook) * or end of the block. 993b185cd0dSDennis Zhou (Facebook) */ 994b185cd0dSDennis Zhou (Facebook) start = s_off; 995b185cd0dSDennis Zhou (Facebook) if (s_off == s_block->contig_hint + s_block->contig_hint_start) { 996b185cd0dSDennis Zhou (Facebook) start = s_block->contig_hint_start; 997b185cd0dSDennis Zhou (Facebook) } else { 998b185cd0dSDennis Zhou (Facebook) /* 999b185cd0dSDennis Zhou (Facebook) * Scan backwards to find the extent of the free area. 1000b185cd0dSDennis Zhou (Facebook) * find_last_bit returns the starting bit, so if the start bit 1001b185cd0dSDennis Zhou (Facebook) * is returned, that means there was no last bit and the 1002b185cd0dSDennis Zhou (Facebook) * remainder of the chunk is free. 1003b185cd0dSDennis Zhou (Facebook) */ 1004b185cd0dSDennis Zhou (Facebook) int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), 1005b185cd0dSDennis Zhou (Facebook) start); 1006b185cd0dSDennis Zhou (Facebook) start = (start == l_bit) ? 0 : l_bit + 1; 1007b185cd0dSDennis Zhou (Facebook) } 1008b185cd0dSDennis Zhou (Facebook) 1009b185cd0dSDennis Zhou (Facebook) end = e_off; 1010b185cd0dSDennis Zhou (Facebook) if (e_off == e_block->contig_hint_start) 1011b185cd0dSDennis Zhou (Facebook) end = e_block->contig_hint_start + e_block->contig_hint; 1012b185cd0dSDennis Zhou (Facebook) else 1013b185cd0dSDennis Zhou (Facebook) end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), 1014b185cd0dSDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, end); 1015b185cd0dSDennis Zhou (Facebook) 1016ca460b3cSDennis Zhou (Facebook) /* update s_block */ 1017b185cd0dSDennis Zhou (Facebook) e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; 1018b239f7daSDennis Zhou if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) 1019b239f7daSDennis Zhou nr_empty_pages++; 1020b185cd0dSDennis Zhou (Facebook) pcpu_block_update(s_block, start, e_off); 1021ca460b3cSDennis Zhou (Facebook) 1022ca460b3cSDennis Zhou (Facebook) /* freeing in the same block */ 1023ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 1024ca460b3cSDennis Zhou (Facebook) /* update e_block */ 1025b239f7daSDennis Zhou if (end == PCPU_BITMAP_BLOCK_BITS) 1026b239f7daSDennis Zhou nr_empty_pages++; 1027b185cd0dSDennis Zhou (Facebook) pcpu_block_update(e_block, 0, end); 1028ca460b3cSDennis Zhou (Facebook) 1029ca460b3cSDennis Zhou (Facebook) /* reset md_blocks in the middle */ 1030b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 1031ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 1032ca460b3cSDennis Zhou (Facebook) block->first_free = 0; 1033382b88e9SDennis Zhou block->scan_hint = 0; 1034ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = 0; 1035ca460b3cSDennis Zhou (Facebook) block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 1036ca460b3cSDennis Zhou (Facebook) block->left_free = PCPU_BITMAP_BLOCK_BITS; 1037ca460b3cSDennis Zhou (Facebook) block->right_free = PCPU_BITMAP_BLOCK_BITS; 1038ca460b3cSDennis Zhou (Facebook) } 1039ca460b3cSDennis Zhou (Facebook) } 1040ca460b3cSDennis Zhou (Facebook) 1041b239f7daSDennis Zhou if (nr_empty_pages) 1042b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr_empty_pages); 1043b239f7daSDennis Zhou 1044b185cd0dSDennis Zhou (Facebook) /* 1045b239f7daSDennis Zhou * Refresh chunk metadata when the free makes a block free or spans 1046b239f7daSDennis Zhou * across blocks. The contig_hint may be off by up to a page, but if 1047b239f7daSDennis Zhou * the contig_hint is contained in a block, it will be accurate with 1048b239f7daSDennis Zhou * the else condition below. 1049b185cd0dSDennis Zhou (Facebook) */ 1050b239f7daSDennis Zhou if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) 1051d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, true); 1052b185cd0dSDennis Zhou (Facebook) else 105392c14cabSDennis Zhou pcpu_block_update(&chunk->chunk_md, 105492c14cabSDennis Zhou pcpu_block_off_to_off(s_index, start), 105592c14cabSDennis Zhou end); 1056ca460b3cSDennis Zhou (Facebook) } 1057ca460b3cSDennis Zhou (Facebook) 1058ca460b3cSDennis Zhou (Facebook) /** 105940064aecSDennis Zhou (Facebook) * pcpu_is_populated - determines if the region is populated 106040064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 106140064aecSDennis Zhou (Facebook) * @bit_off: chunk offset 106240064aecSDennis Zhou (Facebook) * @bits: size of area 106340064aecSDennis Zhou (Facebook) * @next_off: return value for the next offset to start searching 106440064aecSDennis Zhou (Facebook) * 106540064aecSDennis Zhou (Facebook) * For atomic allocations, check if the backing pages are populated. 106640064aecSDennis Zhou (Facebook) * 106740064aecSDennis Zhou (Facebook) * RETURNS: 106840064aecSDennis Zhou (Facebook) * Bool if the backing pages are populated. 106940064aecSDennis Zhou (Facebook) * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. 107040064aecSDennis Zhou (Facebook) */ 107140064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 107240064aecSDennis Zhou (Facebook) int *next_off) 107340064aecSDennis Zhou (Facebook) { 1074e837dfdeSDennis Zhou unsigned int page_start, page_end, rs, re; 107540064aecSDennis Zhou (Facebook) 107640064aecSDennis Zhou (Facebook) page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 107740064aecSDennis Zhou (Facebook) page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 107840064aecSDennis Zhou (Facebook) 107940064aecSDennis Zhou (Facebook) rs = page_start; 1080e837dfdeSDennis Zhou bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); 108140064aecSDennis Zhou (Facebook) if (rs >= page_end) 108240064aecSDennis Zhou (Facebook) return true; 108340064aecSDennis Zhou (Facebook) 108440064aecSDennis Zhou (Facebook) *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 108540064aecSDennis Zhou (Facebook) return false; 108640064aecSDennis Zhou (Facebook) } 108740064aecSDennis Zhou (Facebook) 108840064aecSDennis Zhou (Facebook) /** 108940064aecSDennis Zhou (Facebook) * pcpu_find_block_fit - finds the block index to start searching 109040064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 109140064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 109240064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE bytes) 109340064aecSDennis Zhou (Facebook) * @pop_only: use populated regions only 109440064aecSDennis Zhou (Facebook) * 1095b4c2116cSDennis Zhou (Facebook) * Given a chunk and an allocation spec, find the offset to begin searching 1096b4c2116cSDennis Zhou (Facebook) * for a free region. This iterates over the bitmap metadata blocks to 1097b4c2116cSDennis Zhou (Facebook) * find an offset that will be guaranteed to fit the requirements. It is 1098b4c2116cSDennis Zhou (Facebook) * not quite first fit as if the allocation does not fit in the contig hint 1099b4c2116cSDennis Zhou (Facebook) * of a block or chunk, it is skipped. This errs on the side of caution 1100b4c2116cSDennis Zhou (Facebook) * to prevent excess iteration. Poor alignment can cause the allocator to 1101b4c2116cSDennis Zhou (Facebook) * skip over blocks and chunks that have valid free areas. 1102b4c2116cSDennis Zhou (Facebook) * 110340064aecSDennis Zhou (Facebook) * RETURNS: 110440064aecSDennis Zhou (Facebook) * The offset in the bitmap to begin searching. 110540064aecSDennis Zhou (Facebook) * -1 if no offset is found. 110640064aecSDennis Zhou (Facebook) */ 110740064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, 110840064aecSDennis Zhou (Facebook) size_t align, bool pop_only) 110940064aecSDennis Zhou (Facebook) { 111092c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1111b4c2116cSDennis Zhou (Facebook) int bit_off, bits, next_off; 111240064aecSDennis Zhou (Facebook) 111313f96637SDennis Zhou (Facebook) /* 11148ea2e1e3SRoman Gushchin * This is an optimization to prevent scanning by assuming if the 11158ea2e1e3SRoman Gushchin * allocation cannot fit in the global hint, there is memory pressure 11168ea2e1e3SRoman Gushchin * and creating a new chunk would happen soon. 111713f96637SDennis Zhou (Facebook) */ 11188ea2e1e3SRoman Gushchin if (!pcpu_check_block_hint(chunk_md, alloc_bits, align)) 111913f96637SDennis Zhou (Facebook) return -1; 112013f96637SDennis Zhou (Facebook) 1121d33d9f3dSDennis Zhou bit_off = pcpu_next_hint(chunk_md, alloc_bits); 1122b4c2116cSDennis Zhou (Facebook) bits = 0; 1123b4c2116cSDennis Zhou (Facebook) pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { 112440064aecSDennis Zhou (Facebook) if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, 1125b4c2116cSDennis Zhou (Facebook) &next_off)) 112640064aecSDennis Zhou (Facebook) break; 112740064aecSDennis Zhou (Facebook) 1128b4c2116cSDennis Zhou (Facebook) bit_off = next_off; 112940064aecSDennis Zhou (Facebook) bits = 0; 113040064aecSDennis Zhou (Facebook) } 113140064aecSDennis Zhou (Facebook) 113240064aecSDennis Zhou (Facebook) if (bit_off == pcpu_chunk_map_bits(chunk)) 113340064aecSDennis Zhou (Facebook) return -1; 113440064aecSDennis Zhou (Facebook) 113540064aecSDennis Zhou (Facebook) return bit_off; 113640064aecSDennis Zhou (Facebook) } 113740064aecSDennis Zhou (Facebook) 1138b89462a9SDennis Zhou /* 1139b89462a9SDennis Zhou * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() 1140b89462a9SDennis Zhou * @map: the address to base the search on 1141b89462a9SDennis Zhou * @size: the bitmap size in bits 1142b89462a9SDennis Zhou * @start: the bitnumber to start searching at 1143b89462a9SDennis Zhou * @nr: the number of zeroed bits we're looking for 1144b89462a9SDennis Zhou * @align_mask: alignment mask for zero area 1145b89462a9SDennis Zhou * @largest_off: offset of the largest area skipped 1146b89462a9SDennis Zhou * @largest_bits: size of the largest area skipped 1147b89462a9SDennis Zhou * 1148b89462a9SDennis Zhou * The @align_mask should be one less than a power of 2. 1149b89462a9SDennis Zhou * 1150b89462a9SDennis Zhou * This is a modified version of bitmap_find_next_zero_area_off() to remember 1151b89462a9SDennis Zhou * the largest area that was skipped. This is imperfect, but in general is 1152b89462a9SDennis Zhou * good enough. The largest remembered region is the largest failed region 1153b89462a9SDennis Zhou * seen. This does not include anything we possibly skipped due to alignment. 1154b89462a9SDennis Zhou * pcpu_block_update_scan() does scan backwards to try and recover what was 1155b89462a9SDennis Zhou * lost to alignment. While this can cause scanning to miss earlier possible 1156b89462a9SDennis Zhou * free areas, smaller allocations will eventually fill those holes. 1157b89462a9SDennis Zhou */ 1158b89462a9SDennis Zhou static unsigned long pcpu_find_zero_area(unsigned long *map, 1159b89462a9SDennis Zhou unsigned long size, 1160b89462a9SDennis Zhou unsigned long start, 1161b89462a9SDennis Zhou unsigned long nr, 1162b89462a9SDennis Zhou unsigned long align_mask, 1163b89462a9SDennis Zhou unsigned long *largest_off, 1164b89462a9SDennis Zhou unsigned long *largest_bits) 1165b89462a9SDennis Zhou { 1166b89462a9SDennis Zhou unsigned long index, end, i, area_off, area_bits; 1167b89462a9SDennis Zhou again: 1168b89462a9SDennis Zhou index = find_next_zero_bit(map, size, start); 1169b89462a9SDennis Zhou 1170b89462a9SDennis Zhou /* Align allocation */ 1171b89462a9SDennis Zhou index = __ALIGN_MASK(index, align_mask); 1172b89462a9SDennis Zhou area_off = index; 1173b89462a9SDennis Zhou 1174b89462a9SDennis Zhou end = index + nr; 1175b89462a9SDennis Zhou if (end > size) 1176b89462a9SDennis Zhou return end; 1177b89462a9SDennis Zhou i = find_next_bit(map, end, index); 1178b89462a9SDennis Zhou if (i < end) { 1179b89462a9SDennis Zhou area_bits = i - area_off; 1180b89462a9SDennis Zhou /* remember largest unused area with best alignment */ 1181b89462a9SDennis Zhou if (area_bits > *largest_bits || 1182b89462a9SDennis Zhou (area_bits == *largest_bits && *largest_off && 1183b89462a9SDennis Zhou (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { 1184b89462a9SDennis Zhou *largest_off = area_off; 1185b89462a9SDennis Zhou *largest_bits = area_bits; 1186b89462a9SDennis Zhou } 1187b89462a9SDennis Zhou 1188b89462a9SDennis Zhou start = i + 1; 1189b89462a9SDennis Zhou goto again; 1190b89462a9SDennis Zhou } 1191b89462a9SDennis Zhou return index; 1192b89462a9SDennis Zhou } 1193b89462a9SDennis Zhou 119440064aecSDennis Zhou (Facebook) /** 119540064aecSDennis Zhou (Facebook) * pcpu_alloc_area - allocates an area from a pcpu_chunk 119640064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 119740064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 119840064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 119940064aecSDennis Zhou (Facebook) * @start: bit_off to start searching 120040064aecSDennis Zhou (Facebook) * 120140064aecSDennis Zhou (Facebook) * This function takes in a @start offset to begin searching to fit an 1202b4c2116cSDennis Zhou (Facebook) * allocation of @alloc_bits with alignment @align. It needs to scan 1203b4c2116cSDennis Zhou (Facebook) * the allocation map because if it fits within the block's contig hint, 1204b4c2116cSDennis Zhou (Facebook) * @start will be block->first_free. This is an attempt to fill the 1205b4c2116cSDennis Zhou (Facebook) * allocation prior to breaking the contig hint. The allocation and 1206b4c2116cSDennis Zhou (Facebook) * boundary maps are updated accordingly if it confirms a valid 1207b4c2116cSDennis Zhou (Facebook) * free area. 120840064aecSDennis Zhou (Facebook) * 120940064aecSDennis Zhou (Facebook) * RETURNS: 121040064aecSDennis Zhou (Facebook) * Allocated addr offset in @chunk on success. 121140064aecSDennis Zhou (Facebook) * -1 if no matching area is found. 121240064aecSDennis Zhou (Facebook) */ 121340064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, 121440064aecSDennis Zhou (Facebook) size_t align, int start) 121540064aecSDennis Zhou (Facebook) { 121692c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 121740064aecSDennis Zhou (Facebook) size_t align_mask = (align) ? (align - 1) : 0; 1218b89462a9SDennis Zhou unsigned long area_off = 0, area_bits = 0; 121940064aecSDennis Zhou (Facebook) int bit_off, end, oslot; 12209f7dcf22STejun Heo 12214f996e23STejun Heo lockdep_assert_held(&pcpu_lock); 12224f996e23STejun Heo 122340064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1224833af842STejun Heo 1225833af842STejun Heo /* 122640064aecSDennis Zhou (Facebook) * Search to find a fit. 1227833af842STejun Heo */ 12288c43004aSDennis Zhou end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, 12298c43004aSDennis Zhou pcpu_chunk_map_bits(chunk)); 1230b89462a9SDennis Zhou bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, 1231b89462a9SDennis Zhou align_mask, &area_off, &area_bits); 123240064aecSDennis Zhou (Facebook) if (bit_off >= end) 1233a16037c8STejun Heo return -1; 1234a16037c8STejun Heo 1235b89462a9SDennis Zhou if (area_bits) 1236b89462a9SDennis Zhou pcpu_block_update_scan(chunk, area_off, area_bits); 1237b89462a9SDennis Zhou 123840064aecSDennis Zhou (Facebook) /* update alloc map */ 123940064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, bit_off, alloc_bits); 1240a16037c8STejun Heo 124140064aecSDennis Zhou (Facebook) /* update boundary map */ 124240064aecSDennis Zhou (Facebook) set_bit(bit_off, chunk->bound_map); 124340064aecSDennis Zhou (Facebook) bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); 124440064aecSDennis Zhou (Facebook) set_bit(bit_off + alloc_bits, chunk->bound_map); 1245a16037c8STejun Heo 124640064aecSDennis Zhou (Facebook) chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; 124740064aecSDennis Zhou (Facebook) 124886b442fbSDennis Zhou (Facebook) /* update first free bit */ 124992c14cabSDennis Zhou if (bit_off == chunk_md->first_free) 125092c14cabSDennis Zhou chunk_md->first_free = find_next_zero_bit( 125186b442fbSDennis Zhou (Facebook) chunk->alloc_map, 125286b442fbSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk), 125386b442fbSDennis Zhou (Facebook) bit_off + alloc_bits); 125486b442fbSDennis Zhou (Facebook) 1255ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); 125640064aecSDennis Zhou (Facebook) 125740064aecSDennis Zhou (Facebook) pcpu_chunk_relocate(chunk, oslot); 125840064aecSDennis Zhou (Facebook) 125940064aecSDennis Zhou (Facebook) return bit_off * PCPU_MIN_ALLOC_SIZE; 1260a16037c8STejun Heo } 1261a16037c8STejun Heo 1262a16037c8STejun Heo /** 126340064aecSDennis Zhou (Facebook) * pcpu_free_area - frees the corresponding offset 1264fbf59bc9STejun Heo * @chunk: chunk of interest 126540064aecSDennis Zhou (Facebook) * @off: addr offset into chunk 1266fbf59bc9STejun Heo * 126740064aecSDennis Zhou (Facebook) * This function determines the size of an allocation to free using 126840064aecSDennis Zhou (Facebook) * the boundary bitmap and clears the allocation map. 12695b32af91SRoman Gushchin * 12705b32af91SRoman Gushchin * RETURNS: 12715b32af91SRoman Gushchin * Number of freed bytes. 1272fbf59bc9STejun Heo */ 12735b32af91SRoman Gushchin static int pcpu_free_area(struct pcpu_chunk *chunk, int off) 1274fbf59bc9STejun Heo { 127592c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 12765b32af91SRoman Gushchin int bit_off, bits, end, oslot, freed; 1277fbf59bc9STejun Heo 12785ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock); 127930a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk); 12805ccd30e4SDennis Zhou 128140064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1282723ad1d9SAl Viro 128340064aecSDennis Zhou (Facebook) bit_off = off / PCPU_MIN_ALLOC_SIZE; 1284fbf59bc9STejun Heo 128540064aecSDennis Zhou (Facebook) /* find end index */ 128640064aecSDennis Zhou (Facebook) end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), 128740064aecSDennis Zhou (Facebook) bit_off + 1); 128840064aecSDennis Zhou (Facebook) bits = end - bit_off; 128940064aecSDennis Zhou (Facebook) bitmap_clear(chunk->alloc_map, bit_off, bits); 12903d331ad7SAl Viro 12915b32af91SRoman Gushchin freed = bits * PCPU_MIN_ALLOC_SIZE; 12925b32af91SRoman Gushchin 129340064aecSDennis Zhou (Facebook) /* update metadata */ 12945b32af91SRoman Gushchin chunk->free_bytes += freed; 1295fbf59bc9STejun Heo 129686b442fbSDennis Zhou (Facebook) /* update first free bit */ 129792c14cabSDennis Zhou chunk_md->first_free = min(chunk_md->first_free, bit_off); 129886b442fbSDennis Zhou (Facebook) 1299ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_free(chunk, bit_off, bits); 1300b539b87fSTejun Heo 1301fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 13025b32af91SRoman Gushchin 13035b32af91SRoman Gushchin return freed; 1304fbf59bc9STejun Heo } 1305fbf59bc9STejun Heo 1306047924c9SDennis Zhou static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) 1307047924c9SDennis Zhou { 1308047924c9SDennis Zhou block->scan_hint = 0; 1309047924c9SDennis Zhou block->contig_hint = nr_bits; 1310047924c9SDennis Zhou block->left_free = nr_bits; 1311047924c9SDennis Zhou block->right_free = nr_bits; 1312047924c9SDennis Zhou block->first_free = 0; 1313047924c9SDennis Zhou block->nr_bits = nr_bits; 1314047924c9SDennis Zhou } 1315047924c9SDennis Zhou 1316ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) 1317ca460b3cSDennis Zhou (Facebook) { 1318ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *md_block; 1319ca460b3cSDennis Zhou (Facebook) 132092c14cabSDennis Zhou /* init the chunk's block */ 132192c14cabSDennis Zhou pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); 132292c14cabSDennis Zhou 1323ca460b3cSDennis Zhou (Facebook) for (md_block = chunk->md_blocks; 1324ca460b3cSDennis Zhou (Facebook) md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); 1325047924c9SDennis Zhou md_block++) 1326047924c9SDennis Zhou pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); 1327ca460b3cSDennis Zhou (Facebook) } 1328ca460b3cSDennis Zhou (Facebook) 132940064aecSDennis Zhou (Facebook) /** 133040064aecSDennis Zhou (Facebook) * pcpu_alloc_first_chunk - creates chunks that serve the first chunk 133140064aecSDennis Zhou (Facebook) * @tmp_addr: the start of the region served 133240064aecSDennis Zhou (Facebook) * @map_size: size of the region served 133340064aecSDennis Zhou (Facebook) * 133440064aecSDennis Zhou (Facebook) * This is responsible for creating the chunks that serve the first chunk. The 133540064aecSDennis Zhou (Facebook) * base_addr is page aligned down of @tmp_addr while the region end is page 133640064aecSDennis Zhou (Facebook) * aligned up. Offsets are kept track of to determine the region served. All 133740064aecSDennis Zhou (Facebook) * this is done to appease the bitmap allocator in avoiding partial blocks. 133840064aecSDennis Zhou (Facebook) * 133940064aecSDennis Zhou (Facebook) * RETURNS: 134040064aecSDennis Zhou (Facebook) * Chunk serving the region at @tmp_addr of @map_size. 134140064aecSDennis Zhou (Facebook) */ 1342c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 134340064aecSDennis Zhou (Facebook) int map_size) 134410edf5b0SDennis Zhou (Facebook) { 134510edf5b0SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 1346ca460b3cSDennis Zhou (Facebook) unsigned long aligned_addr, lcm_align; 134740064aecSDennis Zhou (Facebook) int start_offset, offset_bits, region_size, region_bits; 1348f655f405SMike Rapoport size_t alloc_size; 1349c0ebfdc3SDennis Zhou (Facebook) 1350c0ebfdc3SDennis Zhou (Facebook) /* region calculations */ 1351c0ebfdc3SDennis Zhou (Facebook) aligned_addr = tmp_addr & PAGE_MASK; 1352c0ebfdc3SDennis Zhou (Facebook) 1353c0ebfdc3SDennis Zhou (Facebook) start_offset = tmp_addr - aligned_addr; 13546b9d7c8eSDennis Zhou (Facebook) 1355ca460b3cSDennis Zhou (Facebook) /* 1356ca460b3cSDennis Zhou (Facebook) * Align the end of the region with the LCM of PAGE_SIZE and 1357ca460b3cSDennis Zhou (Facebook) * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of 1358ca460b3cSDennis Zhou (Facebook) * the other. 1359ca460b3cSDennis Zhou (Facebook) */ 1360ca460b3cSDennis Zhou (Facebook) lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); 1361ca460b3cSDennis Zhou (Facebook) region_size = ALIGN(start_offset + map_size, lcm_align); 136210edf5b0SDennis Zhou (Facebook) 1363c0ebfdc3SDennis Zhou (Facebook) /* allocate chunk */ 136461cf93d3SDennis Zhou alloc_size = struct_size(chunk, populated, 136561cf93d3SDennis Zhou BITS_TO_LONGS(region_size >> PAGE_SHIFT)); 1366f655f405SMike Rapoport chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1367f655f405SMike Rapoport if (!chunk) 1368f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1369f655f405SMike Rapoport alloc_size); 1370c0ebfdc3SDennis Zhou (Facebook) 137110edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->list); 1372c0ebfdc3SDennis Zhou (Facebook) 1373c0ebfdc3SDennis Zhou (Facebook) chunk->base_addr = (void *)aligned_addr; 137410edf5b0SDennis Zhou (Facebook) chunk->start_offset = start_offset; 13756b9d7c8eSDennis Zhou (Facebook) chunk->end_offset = region_size - chunk->start_offset - map_size; 1376c0ebfdc3SDennis Zhou (Facebook) 13778ab16c43SDennis Zhou (Facebook) chunk->nr_pages = region_size >> PAGE_SHIFT; 137840064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 1379c0ebfdc3SDennis Zhou (Facebook) 1380f655f405SMike Rapoport alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); 1381f655f405SMike Rapoport chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1382f655f405SMike Rapoport if (!chunk->alloc_map) 1383f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1384f655f405SMike Rapoport alloc_size); 1385f655f405SMike Rapoport 1386f655f405SMike Rapoport alloc_size = 1387f655f405SMike Rapoport BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); 1388f655f405SMike Rapoport chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1389f655f405SMike Rapoport if (!chunk->bound_map) 1390f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1391f655f405SMike Rapoport alloc_size); 1392f655f405SMike Rapoport 1393f655f405SMike Rapoport alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); 1394f655f405SMike Rapoport chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1395f655f405SMike Rapoport if (!chunk->md_blocks) 1396f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1397f655f405SMike Rapoport alloc_size); 1398f655f405SMike Rapoport 13993c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 1400faf65ddeSRoman Gushchin /* first chunk is free to use */ 14013c7be18aSRoman Gushchin chunk->obj_cgroups = NULL; 14023c7be18aSRoman Gushchin #endif 1403ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 140410edf5b0SDennis Zhou (Facebook) 140510edf5b0SDennis Zhou (Facebook) /* manage populated page bitmap */ 140610edf5b0SDennis Zhou (Facebook) chunk->immutable = true; 14078ab16c43SDennis Zhou (Facebook) bitmap_fill(chunk->populated, chunk->nr_pages); 14088ab16c43SDennis Zhou (Facebook) chunk->nr_populated = chunk->nr_pages; 1409b239f7daSDennis Zhou chunk->nr_empty_pop_pages = chunk->nr_pages; 141010edf5b0SDennis Zhou (Facebook) 141140064aecSDennis Zhou (Facebook) chunk->free_bytes = map_size; 1412c0ebfdc3SDennis Zhou (Facebook) 1413c0ebfdc3SDennis Zhou (Facebook) if (chunk->start_offset) { 1414c0ebfdc3SDennis Zhou (Facebook) /* hide the beginning of the bitmap */ 141540064aecSDennis Zhou (Facebook) offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; 141640064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 0, offset_bits); 141740064aecSDennis Zhou (Facebook) set_bit(0, chunk->bound_map); 141840064aecSDennis Zhou (Facebook) set_bit(offset_bits, chunk->bound_map); 1419ca460b3cSDennis Zhou (Facebook) 142092c14cabSDennis Zhou chunk->chunk_md.first_free = offset_bits; 142186b442fbSDennis Zhou (Facebook) 1422ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, 0, offset_bits); 1423c0ebfdc3SDennis Zhou (Facebook) } 1424c0ebfdc3SDennis Zhou (Facebook) 14256b9d7c8eSDennis Zhou (Facebook) if (chunk->end_offset) { 14266b9d7c8eSDennis Zhou (Facebook) /* hide the end of the bitmap */ 142740064aecSDennis Zhou (Facebook) offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; 142840064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 142940064aecSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk) - offset_bits, 143040064aecSDennis Zhou (Facebook) offset_bits); 143140064aecSDennis Zhou (Facebook) set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, 143240064aecSDennis Zhou (Facebook) chunk->bound_map); 143340064aecSDennis Zhou (Facebook) set_bit(region_bits, chunk->bound_map); 14346b9d7c8eSDennis Zhou (Facebook) 1435ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) 1436ca460b3cSDennis Zhou (Facebook) - offset_bits, offset_bits); 1437ca460b3cSDennis Zhou (Facebook) } 143840064aecSDennis Zhou (Facebook) 143910edf5b0SDennis Zhou (Facebook) return chunk; 144010edf5b0SDennis Zhou (Facebook) } 144110edf5b0SDennis Zhou (Facebook) 1442faf65ddeSRoman Gushchin static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) 14436081089fSTejun Heo { 14446081089fSTejun Heo struct pcpu_chunk *chunk; 144540064aecSDennis Zhou (Facebook) int region_bits; 14466081089fSTejun Heo 144747504ee0SDennis Zhou chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); 14486081089fSTejun Heo if (!chunk) 14496081089fSTejun Heo return NULL; 14506081089fSTejun Heo 14516081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 1452c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages; 145340064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 145440064aecSDennis Zhou (Facebook) 145540064aecSDennis Zhou (Facebook) chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 145647504ee0SDennis Zhou sizeof(chunk->alloc_map[0]), gfp); 145740064aecSDennis Zhou (Facebook) if (!chunk->alloc_map) 145840064aecSDennis Zhou (Facebook) goto alloc_map_fail; 145940064aecSDennis Zhou (Facebook) 146040064aecSDennis Zhou (Facebook) chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 146147504ee0SDennis Zhou sizeof(chunk->bound_map[0]), gfp); 146240064aecSDennis Zhou (Facebook) if (!chunk->bound_map) 146340064aecSDennis Zhou (Facebook) goto bound_map_fail; 146440064aecSDennis Zhou (Facebook) 1465ca460b3cSDennis Zhou (Facebook) chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 146647504ee0SDennis Zhou sizeof(chunk->md_blocks[0]), gfp); 1467ca460b3cSDennis Zhou (Facebook) if (!chunk->md_blocks) 1468ca460b3cSDennis Zhou (Facebook) goto md_blocks_fail; 1469ca460b3cSDennis Zhou (Facebook) 14703c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 1471faf65ddeSRoman Gushchin if (!mem_cgroup_kmem_disabled()) { 14723c7be18aSRoman Gushchin chunk->obj_cgroups = 14733c7be18aSRoman Gushchin pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * 14743c7be18aSRoman Gushchin sizeof(struct obj_cgroup *), gfp); 14753c7be18aSRoman Gushchin if (!chunk->obj_cgroups) 14763c7be18aSRoman Gushchin goto objcg_fail; 14773c7be18aSRoman Gushchin } 14783c7be18aSRoman Gushchin #endif 14793c7be18aSRoman Gushchin 1480ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 1481ca460b3cSDennis Zhou (Facebook) 148240064aecSDennis Zhou (Facebook) /* init metadata */ 148340064aecSDennis Zhou (Facebook) chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; 1484c0ebfdc3SDennis Zhou (Facebook) 14856081089fSTejun Heo return chunk; 148640064aecSDennis Zhou (Facebook) 14873c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 14883c7be18aSRoman Gushchin objcg_fail: 14893c7be18aSRoman Gushchin pcpu_mem_free(chunk->md_blocks); 14903c7be18aSRoman Gushchin #endif 1491ca460b3cSDennis Zhou (Facebook) md_blocks_fail: 1492ca460b3cSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 149340064aecSDennis Zhou (Facebook) bound_map_fail: 149440064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 149540064aecSDennis Zhou (Facebook) alloc_map_fail: 149640064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk); 149740064aecSDennis Zhou (Facebook) 149840064aecSDennis Zhou (Facebook) return NULL; 14996081089fSTejun Heo } 15006081089fSTejun Heo 15016081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 15026081089fSTejun Heo { 15036081089fSTejun Heo if (!chunk) 15046081089fSTejun Heo return; 15053c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 15063c7be18aSRoman Gushchin pcpu_mem_free(chunk->obj_cgroups); 15073c7be18aSRoman Gushchin #endif 15086685b357SMike Rapoport pcpu_mem_free(chunk->md_blocks); 150940064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 151040064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 15111d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 15126081089fSTejun Heo } 15136081089fSTejun Heo 1514b539b87fSTejun Heo /** 1515b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 1516b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 1517b539b87fSTejun Heo * @page_start: the start page 1518b539b87fSTejun Heo * @page_end: the end page 1519b539b87fSTejun Heo * 1520b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1521b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 1522b539b87fSTejun Heo * successful population. 152340064aecSDennis Zhou (Facebook) * 152440064aecSDennis Zhou (Facebook) * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it 152540064aecSDennis Zhou (Facebook) * is to serve an allocation in that area. 1526b539b87fSTejun Heo */ 152740064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, 1528b239f7daSDennis Zhou int page_end) 1529b539b87fSTejun Heo { 1530b539b87fSTejun Heo int nr = page_end - page_start; 1531b539b87fSTejun Heo 1532b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1533b539b87fSTejun Heo 1534b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 1535b539b87fSTejun Heo chunk->nr_populated += nr; 15367e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += nr; 153740064aecSDennis Zhou (Facebook) 1538b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr); 153940064aecSDennis Zhou (Facebook) } 1540b539b87fSTejun Heo 1541b539b87fSTejun Heo /** 1542b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 1543b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 1544b539b87fSTejun Heo * @page_start: the start page 1545b539b87fSTejun Heo * @page_end: the end page 1546b539b87fSTejun Heo * 1547b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1548b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 1549b539b87fSTejun Heo * each successful depopulation. 1550b539b87fSTejun Heo */ 1551b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 1552b539b87fSTejun Heo int page_start, int page_end) 1553b539b87fSTejun Heo { 1554b539b87fSTejun Heo int nr = page_end - page_start; 1555b539b87fSTejun Heo 1556b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1557b539b87fSTejun Heo 1558b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 1559b539b87fSTejun Heo chunk->nr_populated -= nr; 15607e8a6304SDennis Zhou (Facebook) pcpu_nr_populated -= nr; 1561b239f7daSDennis Zhou 1562b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr); 1563b539b87fSTejun Heo } 1564b539b87fSTejun Heo 1565fbf59bc9STejun Heo /* 15669f645532STejun Heo * Chunk management implementation. 1567fbf59bc9STejun Heo * 15689f645532STejun Heo * To allow different implementations, chunk alloc/free and 15699f645532STejun Heo * [de]population are implemented in a separate file which is pulled 15709f645532STejun Heo * into this file and compiled together. The following functions 15719f645532STejun Heo * should be implemented. 1572ccea34b5STejun Heo * 15739f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 15749f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 1575*93274f1dSDennis Zhou * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk 15769f645532STejun Heo * pcpu_create_chunk - create a new chunk 15779f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 15789f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 15799f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1580fbf59bc9STejun Heo */ 158115d9f3d1SDennis Zhou static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 158247504ee0SDennis Zhou int page_start, int page_end, gfp_t gfp); 158315d9f3d1SDennis Zhou static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 158415d9f3d1SDennis Zhou int page_start, int page_end); 1585*93274f1dSDennis Zhou static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 1586*93274f1dSDennis Zhou int page_start, int page_end); 1587faf65ddeSRoman Gushchin static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp); 15889f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 15899f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 15909f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1591fbf59bc9STejun Heo 1592b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 1593b0c9778bSTejun Heo #include "percpu-km.c" 1594b0c9778bSTejun Heo #else 15959f645532STejun Heo #include "percpu-vm.c" 1596b0c9778bSTejun Heo #endif 1597fbf59bc9STejun Heo 1598fbf59bc9STejun Heo /** 159988999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 160088999a89STejun Heo * @addr: address for which the chunk needs to be determined. 160188999a89STejun Heo * 1602c0ebfdc3SDennis Zhou (Facebook) * This is an internal function that handles all but static allocations. 1603c0ebfdc3SDennis Zhou (Facebook) * Static percpu address values should never be passed into the allocator. 1604c0ebfdc3SDennis Zhou (Facebook) * 160588999a89STejun Heo * RETURNS: 160688999a89STejun Heo * The address of the found chunk. 160788999a89STejun Heo */ 160888999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 160988999a89STejun Heo { 1610c0ebfdc3SDennis Zhou (Facebook) /* is it in the dynamic region (first chunk)? */ 1611560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) 1612c0ebfdc3SDennis Zhou (Facebook) return pcpu_first_chunk; 1613c0ebfdc3SDennis Zhou (Facebook) 1614c0ebfdc3SDennis Zhou (Facebook) /* is it in the reserved region? */ 1615560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) 161688999a89STejun Heo return pcpu_reserved_chunk; 161788999a89STejun Heo 161888999a89STejun Heo /* 161988999a89STejun Heo * The address is relative to unit0 which might be unused and 162088999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 162188999a89STejun Heo * current processor before looking it up in the vmalloc 162288999a89STejun Heo * space. Note that any possible cpu id can be used here, so 162388999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 162488999a89STejun Heo */ 162588999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 16269f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 162788999a89STejun Heo } 162888999a89STejun Heo 16293c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 1630faf65ddeSRoman Gushchin static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, 16313c7be18aSRoman Gushchin struct obj_cgroup **objcgp) 16323c7be18aSRoman Gushchin { 16333c7be18aSRoman Gushchin struct obj_cgroup *objcg; 16343c7be18aSRoman Gushchin 1635279c3393SRoman Gushchin if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT)) 1636faf65ddeSRoman Gushchin return true; 16373c7be18aSRoman Gushchin 16383c7be18aSRoman Gushchin objcg = get_obj_cgroup_from_current(); 16393c7be18aSRoman Gushchin if (!objcg) 1640faf65ddeSRoman Gushchin return true; 16413c7be18aSRoman Gushchin 16423c7be18aSRoman Gushchin if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) { 16433c7be18aSRoman Gushchin obj_cgroup_put(objcg); 1644faf65ddeSRoman Gushchin return false; 16453c7be18aSRoman Gushchin } 16463c7be18aSRoman Gushchin 16473c7be18aSRoman Gushchin *objcgp = objcg; 1648faf65ddeSRoman Gushchin return true; 16493c7be18aSRoman Gushchin } 16503c7be18aSRoman Gushchin 16513c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 16523c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off, 16533c7be18aSRoman Gushchin size_t size) 16543c7be18aSRoman Gushchin { 16553c7be18aSRoman Gushchin if (!objcg) 16563c7be18aSRoman Gushchin return; 16573c7be18aSRoman Gushchin 1658faf65ddeSRoman Gushchin if (likely(chunk && chunk->obj_cgroups)) { 16593c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; 1660772616b0SRoman Gushchin 1661772616b0SRoman Gushchin rcu_read_lock(); 1662772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 1663772616b0SRoman Gushchin size * num_possible_cpus()); 1664772616b0SRoman Gushchin rcu_read_unlock(); 16653c7be18aSRoman Gushchin } else { 16663c7be18aSRoman Gushchin obj_cgroup_uncharge(objcg, size * num_possible_cpus()); 16673c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16683c7be18aSRoman Gushchin } 16693c7be18aSRoman Gushchin } 16703c7be18aSRoman Gushchin 16713c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 16723c7be18aSRoman Gushchin { 16733c7be18aSRoman Gushchin struct obj_cgroup *objcg; 16743c7be18aSRoman Gushchin 1675faf65ddeSRoman Gushchin if (unlikely(!chunk->obj_cgroups)) 16763c7be18aSRoman Gushchin return; 16773c7be18aSRoman Gushchin 16783c7be18aSRoman Gushchin objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; 1679faf65ddeSRoman Gushchin if (!objcg) 1680faf65ddeSRoman Gushchin return; 16813c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; 16823c7be18aSRoman Gushchin 16833c7be18aSRoman Gushchin obj_cgroup_uncharge(objcg, size * num_possible_cpus()); 16843c7be18aSRoman Gushchin 1685772616b0SRoman Gushchin rcu_read_lock(); 1686772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 1687772616b0SRoman Gushchin -(size * num_possible_cpus())); 1688772616b0SRoman Gushchin rcu_read_unlock(); 1689772616b0SRoman Gushchin 16903c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16913c7be18aSRoman Gushchin } 16923c7be18aSRoman Gushchin 16933c7be18aSRoman Gushchin #else /* CONFIG_MEMCG_KMEM */ 1694faf65ddeSRoman Gushchin static bool 16953c7be18aSRoman Gushchin pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) 16963c7be18aSRoman Gushchin { 1697faf65ddeSRoman Gushchin return true; 16983c7be18aSRoman Gushchin } 16993c7be18aSRoman Gushchin 17003c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 17013c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off, 17023c7be18aSRoman Gushchin size_t size) 17033c7be18aSRoman Gushchin { 17043c7be18aSRoman Gushchin } 17053c7be18aSRoman Gushchin 17063c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 17073c7be18aSRoman Gushchin { 17083c7be18aSRoman Gushchin } 17093c7be18aSRoman Gushchin #endif /* CONFIG_MEMCG_KMEM */ 17103c7be18aSRoman Gushchin 171188999a89STejun Heo /** 1712edcb4639STejun Heo * pcpu_alloc - the percpu allocator 1713cae3aeb8STejun Heo * @size: size of area to allocate in bytes 1714fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 1715edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 17165835d96eSTejun Heo * @gfp: allocation flags 1717fbf59bc9STejun Heo * 17185835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 17190ea7eeecSDaniel Borkmann * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN 17200ea7eeecSDaniel Borkmann * then no warning will be triggered on invalid or failed allocation 17210ea7eeecSDaniel Borkmann * requests. 1722fbf59bc9STejun Heo * 1723fbf59bc9STejun Heo * RETURNS: 1724fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1725fbf59bc9STejun Heo */ 17265835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 17275835d96eSTejun Heo gfp_t gfp) 1728fbf59bc9STejun Heo { 172928307d93SFilipe Manana gfp_t pcpu_gfp; 173028307d93SFilipe Manana bool is_atomic; 173128307d93SFilipe Manana bool do_warn; 17323c7be18aSRoman Gushchin struct obj_cgroup *objcg = NULL; 1733f2badb0cSTejun Heo static int warn_limit = 10; 17348744d859SDennis Zhou struct pcpu_chunk *chunk, *next; 1735f2badb0cSTejun Heo const char *err; 173640064aecSDennis Zhou (Facebook) int slot, off, cpu, ret; 1737403a91b1SJiri Kosina unsigned long flags; 1738f528f0b8SCatalin Marinas void __percpu *ptr; 173940064aecSDennis Zhou (Facebook) size_t bits, bit_align; 1740fbf59bc9STejun Heo 174128307d93SFilipe Manana gfp = current_gfp_context(gfp); 174228307d93SFilipe Manana /* whitelisted flags that can be passed to the backing allocators */ 174328307d93SFilipe Manana pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 174428307d93SFilipe Manana is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 174528307d93SFilipe Manana do_warn = !(gfp & __GFP_NOWARN); 174628307d93SFilipe Manana 1747723ad1d9SAl Viro /* 174840064aecSDennis Zhou (Facebook) * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, 174940064aecSDennis Zhou (Facebook) * therefore alignment must be a minimum of that many bytes. 175040064aecSDennis Zhou (Facebook) * An allocation may have internal fragmentation from rounding up 175140064aecSDennis Zhou (Facebook) * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. 1752723ad1d9SAl Viro */ 1753d2f3c384SDennis Zhou (Facebook) if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) 1754d2f3c384SDennis Zhou (Facebook) align = PCPU_MIN_ALLOC_SIZE; 1755723ad1d9SAl Viro 1756d2f3c384SDennis Zhou (Facebook) size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); 175740064aecSDennis Zhou (Facebook) bits = size >> PCPU_MIN_ALLOC_SHIFT; 175840064aecSDennis Zhou (Facebook) bit_align = align >> PCPU_MIN_ALLOC_SHIFT; 17592f69fa82SViro 17603ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 17613ca45a46Szijun_hu !is_power_of_2(align))) { 17620ea7eeecSDaniel Borkmann WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1763756a025fSJoe Perches size, align); 1764fbf59bc9STejun Heo return NULL; 1765fbf59bc9STejun Heo } 1766fbf59bc9STejun Heo 1767faf65ddeSRoman Gushchin if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg))) 17683c7be18aSRoman Gushchin return NULL; 17693c7be18aSRoman Gushchin 1770f52ba1feSKirill Tkhai if (!is_atomic) { 1771f52ba1feSKirill Tkhai /* 1772f52ba1feSKirill Tkhai * pcpu_balance_workfn() allocates memory under this mutex, 1773f52ba1feSKirill Tkhai * and it may wait for memory reclaim. Allow current task 1774f52ba1feSKirill Tkhai * to become OOM victim, in case of memory pressure. 1775f52ba1feSKirill Tkhai */ 17763c7be18aSRoman Gushchin if (gfp & __GFP_NOFAIL) { 17776710e594STejun Heo mutex_lock(&pcpu_alloc_mutex); 17783c7be18aSRoman Gushchin } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { 17793c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 1780f52ba1feSKirill Tkhai return NULL; 1781f52ba1feSKirill Tkhai } 17823c7be18aSRoman Gushchin } 17836710e594STejun Heo 1784403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1785fbf59bc9STejun Heo 1786edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 1787edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 1788edcb4639STejun Heo chunk = pcpu_reserved_chunk; 1789833af842STejun Heo 179040064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); 179140064aecSDennis Zhou (Facebook) if (off < 0) { 1792833af842STejun Heo err = "alloc from reserved chunk failed"; 1793ccea34b5STejun Heo goto fail_unlock; 1794f2badb0cSTejun Heo } 1795833af842STejun Heo 179640064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1797edcb4639STejun Heo if (off >= 0) 1798edcb4639STejun Heo goto area_found; 1799833af842STejun Heo 1800f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 1801ccea34b5STejun Heo goto fail_unlock; 1802edcb4639STejun Heo } 1803edcb4639STejun Heo 1804ccea34b5STejun Heo restart: 1805edcb4639STejun Heo /* search through normal chunks */ 1806f1833241SRoman Gushchin for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) { 1807faf65ddeSRoman Gushchin list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot], 1808faf65ddeSRoman Gushchin list) { 180940064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, 181040064aecSDennis Zhou (Facebook) is_atomic); 18118744d859SDennis Zhou if (off < 0) { 18128744d859SDennis Zhou if (slot < PCPU_SLOT_FAIL_THRESHOLD) 18138744d859SDennis Zhou pcpu_chunk_move(chunk, 0); 1814fbf59bc9STejun Heo continue; 18158744d859SDennis Zhou } 1816ccea34b5STejun Heo 181740064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1818f1833241SRoman Gushchin if (off >= 0) { 1819f1833241SRoman Gushchin pcpu_reintegrate_chunk(chunk); 1820fbf59bc9STejun Heo goto area_found; 1821f1833241SRoman Gushchin } 1822fbf59bc9STejun Heo } 1823fbf59bc9STejun Heo } 1824fbf59bc9STejun Heo 1825403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1826ccea34b5STejun Heo 1827b38d08f3STejun Heo /* 1828b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 1829b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 1830b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 1831b38d08f3STejun Heo */ 183211df02bfSDennis Zhou if (is_atomic) { 183311df02bfSDennis Zhou err = "atomic alloc failed, no space left"; 18345835d96eSTejun Heo goto fail; 183511df02bfSDennis Zhou } 18365835d96eSTejun Heo 1837faf65ddeSRoman Gushchin if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) { 1838faf65ddeSRoman Gushchin chunk = pcpu_create_chunk(pcpu_gfp); 1839f2badb0cSTejun Heo if (!chunk) { 1840f2badb0cSTejun Heo err = "failed to allocate new chunk"; 1841b38d08f3STejun Heo goto fail; 1842f2badb0cSTejun Heo } 1843ccea34b5STejun Heo 1844403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1845fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1846b38d08f3STejun Heo } else { 1847b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1848b38d08f3STejun Heo } 1849b38d08f3STejun Heo 1850ccea34b5STejun Heo goto restart; 1851fbf59bc9STejun Heo 1852fbf59bc9STejun Heo area_found: 185330a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size); 1854403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1855ccea34b5STejun Heo 1856dca49645STejun Heo /* populate if not all pages are already there */ 18575835d96eSTejun Heo if (!is_atomic) { 1858e837dfdeSDennis Zhou unsigned int page_start, page_end, rs, re; 1859e04d3208STejun Heo 1860dca49645STejun Heo page_start = PFN_DOWN(off); 1861dca49645STejun Heo page_end = PFN_UP(off + size); 1862dca49645STejun Heo 1863e837dfdeSDennis Zhou bitmap_for_each_clear_region(chunk->populated, rs, re, 186491e914c5SDennis Zhou (Facebook) page_start, page_end) { 1865dca49645STejun Heo WARN_ON(chunk->immutable); 1866dca49645STejun Heo 1867554fef1cSDennis Zhou ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); 1868b38d08f3STejun Heo 1869403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1870b38d08f3STejun Heo if (ret) { 187140064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off); 1872f2badb0cSTejun Heo err = "failed to populate"; 1873ccea34b5STejun Heo goto fail_unlock; 1874fbf59bc9STejun Heo } 1875b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, re); 1876b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1877dca49645STejun Heo } 1878dca49645STejun Heo 1879ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1880e04d3208STejun Heo } 1881ccea34b5STejun Heo 1882faf65ddeSRoman Gushchin if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 18831a4d7607STejun Heo pcpu_schedule_balance_work(); 18841a4d7607STejun Heo 1885dca49645STejun Heo /* clear the areas and return address relative to base address */ 1886dca49645STejun Heo for_each_possible_cpu(cpu) 1887dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1888dca49645STejun Heo 1889f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 18908a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp); 1891df95e795SDennis Zhou 1892df95e795SDennis Zhou trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1893df95e795SDennis Zhou chunk->base_addr, off, ptr); 1894df95e795SDennis Zhou 18953c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); 18963c7be18aSRoman Gushchin 1897f528f0b8SCatalin Marinas return ptr; 1898ccea34b5STejun Heo 1899ccea34b5STejun Heo fail_unlock: 1900403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1901b38d08f3STejun Heo fail: 1902df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1903df95e795SDennis Zhou 19040ea7eeecSDaniel Borkmann if (!is_atomic && do_warn && warn_limit) { 1905870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 19065835d96eSTejun Heo size, align, is_atomic, err); 1907f2badb0cSTejun Heo dump_stack(); 1908f2badb0cSTejun Heo if (!--warn_limit) 1909870d4b12SJoe Perches pr_info("limit reached, disable warning\n"); 1910f2badb0cSTejun Heo } 19111a4d7607STejun Heo if (is_atomic) { 1912f0953a1bSIngo Molnar /* see the flag handling in pcpu_balance_workfn() */ 19131a4d7607STejun Heo pcpu_atomic_alloc_failed = true; 19141a4d7607STejun Heo pcpu_schedule_balance_work(); 19156710e594STejun Heo } else { 19166710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex); 19171a4d7607STejun Heo } 19183c7be18aSRoman Gushchin 19193c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 19203c7be18aSRoman Gushchin 1921ccea34b5STejun Heo return NULL; 1922fbf59bc9STejun Heo } 1923edcb4639STejun Heo 1924edcb4639STejun Heo /** 19255835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1926edcb4639STejun Heo * @size: size of area to allocate in bytes 1927edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 19285835d96eSTejun Heo * @gfp: allocation flags 1929edcb4639STejun Heo * 19305835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 19315835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 19320ea7eeecSDaniel Borkmann * be called from any context but is a lot more likely to fail. If @gfp 19330ea7eeecSDaniel Borkmann * has __GFP_NOWARN then no warning will be triggered on invalid or failed 19340ea7eeecSDaniel Borkmann * allocation requests. 1935ccea34b5STejun Heo * 1936edcb4639STejun Heo * RETURNS: 1937edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1938edcb4639STejun Heo */ 19395835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 19405835d96eSTejun Heo { 19415835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 19425835d96eSTejun Heo } 19435835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 19445835d96eSTejun Heo 19455835d96eSTejun Heo /** 19465835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 19475835d96eSTejun Heo * @size: size of area to allocate in bytes 19485835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 19495835d96eSTejun Heo * 19505835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 19515835d96eSTejun Heo */ 195243cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1953edcb4639STejun Heo { 19545835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1955edcb4639STejun Heo } 1956fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1957fbf59bc9STejun Heo 1958edcb4639STejun Heo /** 1959edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1960edcb4639STejun Heo * @size: size of area to allocate in bytes 1961edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1962edcb4639STejun Heo * 19639329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 19649329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 19659329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 19669329ba97STejun Heo * Might trigger writeouts. 1967edcb4639STejun Heo * 1968ccea34b5STejun Heo * CONTEXT: 1969ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1970ccea34b5STejun Heo * 1971edcb4639STejun Heo * RETURNS: 1972edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1973edcb4639STejun Heo */ 197443cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1975edcb4639STejun Heo { 19765835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1977edcb4639STejun Heo } 1978edcb4639STejun Heo 1979a56dbddfSTejun Heo /** 198067c2669dSRoman Gushchin * pcpu_balance_free - manage the amount of free chunks 1981f1833241SRoman Gushchin * @empty_only: free chunks only if there are no populated pages 1982a56dbddfSTejun Heo * 1983f1833241SRoman Gushchin * If empty_only is %false, reclaim all fully free chunks regardless of the 1984f1833241SRoman Gushchin * number of populated pages. Otherwise, only reclaim chunks that have no 1985f1833241SRoman Gushchin * populated pages. 1986e4d77700SRoman Gushchin * 1987e4d77700SRoman Gushchin * CONTEXT: 1988e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily) 1989a56dbddfSTejun Heo */ 1990faf65ddeSRoman Gushchin static void pcpu_balance_free(bool empty_only) 1991fbf59bc9STejun Heo { 1992fe6bd8c3STejun Heo LIST_HEAD(to_free); 1993faf65ddeSRoman Gushchin struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot]; 1994a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 1995a56dbddfSTejun Heo 1996e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock); 1997a56dbddfSTejun Heo 19981a4d7607STejun Heo /* 19991a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM 20001a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one. 20011a4d7607STejun Heo */ 2002fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 20038d408b4bSTejun Heo WARN_ON(chunk->immutable); 2004a56dbddfSTejun Heo 2005a56dbddfSTejun Heo /* spare the first one */ 2006fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 2007a56dbddfSTejun Heo continue; 2008a56dbddfSTejun Heo 2009f1833241SRoman Gushchin if (!empty_only || chunk->nr_empty_pop_pages == 0) 2010fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 2011a56dbddfSTejun Heo } 2012a56dbddfSTejun Heo 2013e4d77700SRoman Gushchin if (list_empty(&to_free)) 2014e4d77700SRoman Gushchin return; 2015a56dbddfSTejun Heo 2016e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock); 2017fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 2018e837dfdeSDennis Zhou unsigned int rs, re; 2019dca49645STejun Heo 2020e837dfdeSDennis Zhou bitmap_for_each_set_region(chunk->populated, rs, re, 0, 202191e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 2022a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 2023b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 2024b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 2025b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 2026a93ace48STejun Heo } 20276081089fSTejun Heo pcpu_destroy_chunk(chunk); 2028accd4f36SEric Dumazet cond_resched(); 2029fbf59bc9STejun Heo } 2030e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock); 203167c2669dSRoman Gushchin } 203267c2669dSRoman Gushchin 203367c2669dSRoman Gushchin /** 203467c2669dSRoman Gushchin * pcpu_balance_populated - manage the amount of populated pages 203567c2669dSRoman Gushchin * 203667c2669dSRoman Gushchin * Maintain a certain amount of populated pages to satisfy atomic allocations. 203767c2669dSRoman Gushchin * It is possible that this is called when physical memory is scarce causing 203867c2669dSRoman Gushchin * OOM killer to be triggered. We should avoid doing so until an actual 203967c2669dSRoman Gushchin * allocation causes the failure as it is possible that requests can be 204067c2669dSRoman Gushchin * serviced from already backed regions. 2041e4d77700SRoman Gushchin * 2042e4d77700SRoman Gushchin * CONTEXT: 2043e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily) 204467c2669dSRoman Gushchin */ 2045faf65ddeSRoman Gushchin static void pcpu_balance_populated(void) 204667c2669dSRoman Gushchin { 204767c2669dSRoman Gushchin /* gfp flags passed to underlying allocators */ 204867c2669dSRoman Gushchin const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 204967c2669dSRoman Gushchin struct pcpu_chunk *chunk; 205067c2669dSRoman Gushchin int slot, nr_to_pop, ret; 2051971f3918STejun Heo 2052e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock); 2053971f3918STejun Heo 20541a4d7607STejun Heo /* 20551a4d7607STejun Heo * Ensure there are certain number of free populated pages for 20561a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic 20571a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation 20581a4d7607STejun Heo * failed previously, always populate the maximum amount. This 20591a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping 20601a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not 20611a4d7607STejun Heo * something we support properly and can be highly unreliable and 20621a4d7607STejun Heo * inefficient. 20631a4d7607STejun Heo */ 20641a4d7607STejun Heo retry_pop: 20651a4d7607STejun Heo if (pcpu_atomic_alloc_failed) { 20661a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 20671a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */ 20681a4d7607STejun Heo pcpu_atomic_alloc_failed = false; 20691a4d7607STejun Heo } else { 20701a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 2071faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages, 20721a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH); 20731a4d7607STejun Heo } 20741a4d7607STejun Heo 20751c29a3ceSDennis Zhou for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) { 2076e837dfdeSDennis Zhou unsigned int nr_unpop = 0, rs, re; 20771a4d7607STejun Heo 20781a4d7607STejun Heo if (!nr_to_pop) 20791a4d7607STejun Heo break; 20801a4d7607STejun Heo 2081faf65ddeSRoman Gushchin list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) { 20828ab16c43SDennis Zhou (Facebook) nr_unpop = chunk->nr_pages - chunk->nr_populated; 20831a4d7607STejun Heo if (nr_unpop) 20841a4d7607STejun Heo break; 20851a4d7607STejun Heo } 20861a4d7607STejun Heo 20871a4d7607STejun Heo if (!nr_unpop) 20881a4d7607STejun Heo continue; 20891a4d7607STejun Heo 20901a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */ 2091e837dfdeSDennis Zhou bitmap_for_each_clear_region(chunk->populated, rs, re, 0, 209291e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 2093e837dfdeSDennis Zhou int nr = min_t(int, re - rs, nr_to_pop); 20941a4d7607STejun Heo 2095e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock); 209647504ee0SDennis Zhou ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); 2097e4d77700SRoman Gushchin cond_resched(); 2098e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock); 20991a4d7607STejun Heo if (!ret) { 21001a4d7607STejun Heo nr_to_pop -= nr; 2101b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, rs + nr); 21021a4d7607STejun Heo } else { 21031a4d7607STejun Heo nr_to_pop = 0; 21041a4d7607STejun Heo } 21051a4d7607STejun Heo 21061a4d7607STejun Heo if (!nr_to_pop) 21071a4d7607STejun Heo break; 21081a4d7607STejun Heo } 21091a4d7607STejun Heo } 21101a4d7607STejun Heo 21111a4d7607STejun Heo if (nr_to_pop) { 21121a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */ 21131a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 2114e4d77700SRoman Gushchin chunk = pcpu_create_chunk(gfp); 2115e4d77700SRoman Gushchin cond_resched(); 2116e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock); 2117e4d77700SRoman Gushchin if (chunk) { 2118e4d77700SRoman Gushchin pcpu_chunk_relocate(chunk, -1); 21191a4d7607STejun Heo goto retry_pop; 21201a4d7607STejun Heo } 21211a4d7607STejun Heo } 2122a56dbddfSTejun Heo } 2123fbf59bc9STejun Heo 2124fbf59bc9STejun Heo /** 2125f1833241SRoman Gushchin * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages 2126f1833241SRoman Gushchin * 2127f1833241SRoman Gushchin * Scan over chunks in the depopulate list and try to release unused populated 2128f1833241SRoman Gushchin * pages back to the system. Depopulated chunks are sidelined to prevent 2129f1833241SRoman Gushchin * repopulating these pages unless required. Fully free chunks are reintegrated 2130f1833241SRoman Gushchin * and freed accordingly (1 is kept around). If we drop below the empty 2131f1833241SRoman Gushchin * populated pages threshold, reintegrate the chunk if it has empty free pages. 2132f1833241SRoman Gushchin * Each chunk is scanned in the reverse order to keep populated pages close to 2133f1833241SRoman Gushchin * the beginning of the chunk. 2134e4d77700SRoman Gushchin * 2135e4d77700SRoman Gushchin * CONTEXT: 2136e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily) 2137e4d77700SRoman Gushchin * 2138f1833241SRoman Gushchin */ 2139faf65ddeSRoman Gushchin static void pcpu_reclaim_populated(void) 2140f1833241SRoman Gushchin { 2141f1833241SRoman Gushchin struct pcpu_chunk *chunk; 2142f1833241SRoman Gushchin struct pcpu_block_md *block; 2143*93274f1dSDennis Zhou int freed_page_start, freed_page_end; 2144f1833241SRoman Gushchin int i, end; 2145*93274f1dSDennis Zhou bool reintegrate; 2146f1833241SRoman Gushchin 2147e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock); 2148f1833241SRoman Gushchin 2149f1833241SRoman Gushchin /* 2150f1833241SRoman Gushchin * Once a chunk is isolated to the to_depopulate list, the chunk is no 2151f1833241SRoman Gushchin * longer discoverable to allocations whom may populate pages. The only 2152f1833241SRoman Gushchin * other accessor is the free path which only returns area back to the 2153f1833241SRoman Gushchin * allocator not touching the populated bitmap. 2154f1833241SRoman Gushchin */ 2155faf65ddeSRoman Gushchin while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) { 2156faf65ddeSRoman Gushchin chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot], 2157f1833241SRoman Gushchin struct pcpu_chunk, list); 2158f1833241SRoman Gushchin WARN_ON(chunk->immutable); 2159f1833241SRoman Gushchin 2160f1833241SRoman Gushchin /* 2161f1833241SRoman Gushchin * Scan chunk's pages in the reverse order to keep populated 2162f1833241SRoman Gushchin * pages close to the beginning of the chunk. 2163f1833241SRoman Gushchin */ 2164*93274f1dSDennis Zhou freed_page_start = chunk->nr_pages; 2165*93274f1dSDennis Zhou freed_page_end = 0; 2166*93274f1dSDennis Zhou reintegrate = false; 2167f1833241SRoman Gushchin for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) { 2168f1833241SRoman Gushchin /* no more work to do */ 2169f1833241SRoman Gushchin if (chunk->nr_empty_pop_pages == 0) 2170f1833241SRoman Gushchin break; 2171f1833241SRoman Gushchin 2172f1833241SRoman Gushchin /* reintegrate chunk to prevent atomic alloc failures */ 2173faf65ddeSRoman Gushchin if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) { 2174*93274f1dSDennis Zhou reintegrate = true; 2175*93274f1dSDennis Zhou goto end_chunk; 2176f1833241SRoman Gushchin } 2177f1833241SRoman Gushchin 2178f1833241SRoman Gushchin /* 2179f1833241SRoman Gushchin * If the page is empty and populated, start or 2180f1833241SRoman Gushchin * extend the (i, end) range. If i == 0, decrease 2181f1833241SRoman Gushchin * i and perform the depopulation to cover the last 2182f1833241SRoman Gushchin * (first) page in the chunk. 2183f1833241SRoman Gushchin */ 2184f1833241SRoman Gushchin block = chunk->md_blocks + i; 2185f1833241SRoman Gushchin if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS && 2186f1833241SRoman Gushchin test_bit(i, chunk->populated)) { 2187f1833241SRoman Gushchin if (end == -1) 2188f1833241SRoman Gushchin end = i; 2189f1833241SRoman Gushchin if (i > 0) 2190f1833241SRoman Gushchin continue; 2191f1833241SRoman Gushchin i--; 2192f1833241SRoman Gushchin } 2193f1833241SRoman Gushchin 2194f1833241SRoman Gushchin /* depopulate if there is an active range */ 2195f1833241SRoman Gushchin if (end == -1) 2196f1833241SRoman Gushchin continue; 2197f1833241SRoman Gushchin 2198f1833241SRoman Gushchin spin_unlock_irq(&pcpu_lock); 2199f1833241SRoman Gushchin pcpu_depopulate_chunk(chunk, i + 1, end + 1); 2200f1833241SRoman Gushchin cond_resched(); 2201f1833241SRoman Gushchin spin_lock_irq(&pcpu_lock); 2202f1833241SRoman Gushchin 2203f1833241SRoman Gushchin pcpu_chunk_depopulated(chunk, i + 1, end + 1); 2204*93274f1dSDennis Zhou freed_page_start = min(freed_page_start, i + 1); 2205*93274f1dSDennis Zhou freed_page_end = max(freed_page_end, end + 1); 2206f1833241SRoman Gushchin 2207f1833241SRoman Gushchin /* reset the range and continue */ 2208f1833241SRoman Gushchin end = -1; 2209f1833241SRoman Gushchin } 2210f1833241SRoman Gushchin 2211*93274f1dSDennis Zhou end_chunk: 2212*93274f1dSDennis Zhou /* batch tlb flush per chunk to amortize cost */ 2213*93274f1dSDennis Zhou if (freed_page_start < freed_page_end) { 2214*93274f1dSDennis Zhou spin_unlock_irq(&pcpu_lock); 2215*93274f1dSDennis Zhou pcpu_post_unmap_tlb_flush(chunk, 2216*93274f1dSDennis Zhou freed_page_start, 2217*93274f1dSDennis Zhou freed_page_end); 2218*93274f1dSDennis Zhou cond_resched(); 2219*93274f1dSDennis Zhou spin_lock_irq(&pcpu_lock); 2220*93274f1dSDennis Zhou } 2221*93274f1dSDennis Zhou 2222*93274f1dSDennis Zhou if (reintegrate || chunk->free_bytes == pcpu_unit_size) 2223f1833241SRoman Gushchin pcpu_reintegrate_chunk(chunk); 2224f1833241SRoman Gushchin else 2225*93274f1dSDennis Zhou list_move_tail(&chunk->list, 2226faf65ddeSRoman Gushchin &pcpu_chunk_lists[pcpu_sidelined_slot]); 2227f1833241SRoman Gushchin } 2228fbf59bc9STejun Heo } 2229fbf59bc9STejun Heo 2230fbf59bc9STejun Heo /** 22313c7be18aSRoman Gushchin * pcpu_balance_workfn - manage the amount of free chunks and populated pages 22323c7be18aSRoman Gushchin * @work: unused 22333c7be18aSRoman Gushchin * 2234f1833241SRoman Gushchin * For each chunk type, manage the number of fully free chunks and the number of 2235f1833241SRoman Gushchin * populated pages. An important thing to consider is when pages are freed and 2236f1833241SRoman Gushchin * how they contribute to the global counts. 22373c7be18aSRoman Gushchin */ 22383c7be18aSRoman Gushchin static void pcpu_balance_workfn(struct work_struct *work) 22393c7be18aSRoman Gushchin { 2240f1833241SRoman Gushchin /* 2241f1833241SRoman Gushchin * pcpu_balance_free() is called twice because the first time we may 2242f1833241SRoman Gushchin * trim pages in the active pcpu_nr_empty_pop_pages which may cause us 2243f1833241SRoman Gushchin * to grow other chunks. This then gives pcpu_reclaim_populated() time 2244f1833241SRoman Gushchin * to move fully free chunks to the active list to be freed if 2245f1833241SRoman Gushchin * appropriate. 2246f1833241SRoman Gushchin */ 224767c2669dSRoman Gushchin mutex_lock(&pcpu_alloc_mutex); 2248e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock); 22493c7be18aSRoman Gushchin 2250faf65ddeSRoman Gushchin pcpu_balance_free(false); 2251faf65ddeSRoman Gushchin pcpu_reclaim_populated(); 2252faf65ddeSRoman Gushchin pcpu_balance_populated(); 2253faf65ddeSRoman Gushchin pcpu_balance_free(true); 2254e4d77700SRoman Gushchin 2255e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock); 225667c2669dSRoman Gushchin mutex_unlock(&pcpu_alloc_mutex); 22573c7be18aSRoman Gushchin } 22583c7be18aSRoman Gushchin 22593c7be18aSRoman Gushchin /** 2260fbf59bc9STejun Heo * free_percpu - free percpu area 2261fbf59bc9STejun Heo * @ptr: pointer to area to free 2262fbf59bc9STejun Heo * 2263ccea34b5STejun Heo * Free percpu area @ptr. 2264ccea34b5STejun Heo * 2265ccea34b5STejun Heo * CONTEXT: 2266ccea34b5STejun Heo * Can be called from atomic context. 2267fbf59bc9STejun Heo */ 226843cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 2269fbf59bc9STejun Heo { 2270129182e5SAndrew Morton void *addr; 2271fbf59bc9STejun Heo struct pcpu_chunk *chunk; 2272ccea34b5STejun Heo unsigned long flags; 22733c7be18aSRoman Gushchin int size, off; 2274198790d9SJohn Sperbeck bool need_balance = false; 2275fbf59bc9STejun Heo 2276fbf59bc9STejun Heo if (!ptr) 2277fbf59bc9STejun Heo return; 2278fbf59bc9STejun Heo 2279f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 2280f528f0b8SCatalin Marinas 2281129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 2282129182e5SAndrew Morton 2283ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 2284fbf59bc9STejun Heo 2285fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 2286bba174f5STejun Heo off = addr - chunk->base_addr; 2287fbf59bc9STejun Heo 22883c7be18aSRoman Gushchin size = pcpu_free_area(chunk, off); 22893c7be18aSRoman Gushchin 22903c7be18aSRoman Gushchin pcpu_memcg_free_hook(chunk, off, size); 2291fbf59bc9STejun Heo 2292f1833241SRoman Gushchin /* 2293f1833241SRoman Gushchin * If there are more than one fully free chunks, wake up grim reaper. 2294f1833241SRoman Gushchin * If the chunk is isolated, it may be in the process of being 2295f1833241SRoman Gushchin * reclaimed. Let reclaim manage cleaning up of that chunk. 2296f1833241SRoman Gushchin */ 2297f1833241SRoman Gushchin if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) { 2298fbf59bc9STejun Heo struct pcpu_chunk *pos; 2299fbf59bc9STejun Heo 2300faf65ddeSRoman Gushchin list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list) 2301fbf59bc9STejun Heo if (pos != chunk) { 2302198790d9SJohn Sperbeck need_balance = true; 2303fbf59bc9STejun Heo break; 2304fbf59bc9STejun Heo } 2305f1833241SRoman Gushchin } else if (pcpu_should_reclaim_chunk(chunk)) { 2306f1833241SRoman Gushchin pcpu_isolate_chunk(chunk); 2307f1833241SRoman Gushchin need_balance = true; 2308fbf59bc9STejun Heo } 2309fbf59bc9STejun Heo 2310df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr); 2311df95e795SDennis Zhou 2312ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 2313198790d9SJohn Sperbeck 2314198790d9SJohn Sperbeck if (need_balance) 2315198790d9SJohn Sperbeck pcpu_schedule_balance_work(); 2316fbf59bc9STejun Heo } 2317fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 2318fbf59bc9STejun Heo 2319383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 2320383776faSThomas Gleixner { 2321383776faSThomas Gleixner #ifdef CONFIG_SMP 2322383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start; 2323383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 2324383776faSThomas Gleixner unsigned int cpu; 2325383776faSThomas Gleixner 2326383776faSThomas Gleixner for_each_possible_cpu(cpu) { 2327383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu); 2328383776faSThomas Gleixner void *va = (void *)addr; 2329383776faSThomas Gleixner 2330383776faSThomas Gleixner if (va >= start && va < start + static_size) { 23318ce371f9SPeter Zijlstra if (can_addr) { 2332383776faSThomas Gleixner *can_addr = (unsigned long) (va - start); 23338ce371f9SPeter Zijlstra *can_addr += (unsigned long) 23348ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id()); 23358ce371f9SPeter Zijlstra } 2336383776faSThomas Gleixner return true; 2337383776faSThomas Gleixner } 2338383776faSThomas Gleixner } 2339383776faSThomas Gleixner #endif 2340383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */ 2341383776faSThomas Gleixner return false; 2342383776faSThomas Gleixner } 2343383776faSThomas Gleixner 23443b034b0dSVivek Goyal /** 234510fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 234610fad5e4STejun Heo * @addr: address to test 234710fad5e4STejun Heo * 234810fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 234910fad5e4STejun Heo * static percpu areas are not considered. For those, use 235010fad5e4STejun Heo * is_module_percpu_address(). 235110fad5e4STejun Heo * 235210fad5e4STejun Heo * RETURNS: 235310fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 235410fad5e4STejun Heo */ 235510fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 235610fad5e4STejun Heo { 2357383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL); 235810fad5e4STejun Heo } 235910fad5e4STejun Heo 236010fad5e4STejun Heo /** 23613b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 23623b034b0dSVivek Goyal * @addr: the address to be converted to physical address 23633b034b0dSVivek Goyal * 23643b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 23653b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 23663b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 23673b034b0dSVivek Goyal * until this function finishes. 23683b034b0dSVivek Goyal * 236967589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 237067589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 237167589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 237267589c71SDave Young * km) provides translation. 237367589c71SDave Young * 2374bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the 237567589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 237667589c71SDave Young * actually works, and the verification can discover both bugs in percpu 237767589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 237867589c71SDave Young * code. 237967589c71SDave Young * 23803b034b0dSVivek Goyal * RETURNS: 23813b034b0dSVivek Goyal * The physical address for @addr. 23823b034b0dSVivek Goyal */ 23833b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 23843b034b0dSVivek Goyal { 23859983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 23869983b6f0STejun Heo bool in_first_chunk = false; 2387a855b84cSTejun Heo unsigned long first_low, first_high; 23889983b6f0STejun Heo unsigned int cpu; 23899983b6f0STejun Heo 23909983b6f0STejun Heo /* 2391a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 23929983b6f0STejun Heo * necessary but will speed up lookups of addresses which 23939983b6f0STejun Heo * aren't in the first chunk. 2394c0ebfdc3SDennis Zhou (Facebook) * 2395c0ebfdc3SDennis Zhou (Facebook) * The address check is against full chunk sizes. pcpu_base_addr 2396c0ebfdc3SDennis Zhou (Facebook) * points to the beginning of the first chunk including the 2397c0ebfdc3SDennis Zhou (Facebook) * static region. Assumes good intent as the first chunk may 2398c0ebfdc3SDennis Zhou (Facebook) * not be full (ie. < pcpu_unit_pages in size). 23999983b6f0STejun Heo */ 2400c0ebfdc3SDennis Zhou (Facebook) first_low = (unsigned long)pcpu_base_addr + 2401c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 2402c0ebfdc3SDennis Zhou (Facebook) first_high = (unsigned long)pcpu_base_addr + 2403c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 2404a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 2405a855b84cSTejun Heo (unsigned long)addr < first_high) { 24069983b6f0STejun Heo for_each_possible_cpu(cpu) { 24079983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 24089983b6f0STejun Heo 24099983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 24109983b6f0STejun Heo in_first_chunk = true; 24119983b6f0STejun Heo break; 24129983b6f0STejun Heo } 24139983b6f0STejun Heo } 24149983b6f0STejun Heo } 24159983b6f0STejun Heo 24169983b6f0STejun Heo if (in_first_chunk) { 2417eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 24183b034b0dSVivek Goyal return __pa(addr); 24193b034b0dSVivek Goyal else 24209f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 24219f57bd4dSEugene Surovegin offset_in_page(addr); 2422020ec653STejun Heo } else 24239f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 24249f57bd4dSEugene Surovegin offset_in_page(addr); 24253b034b0dSVivek Goyal } 24263b034b0dSVivek Goyal 2427fbf59bc9STejun Heo /** 2428fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 2429fd1e8a1fSTejun Heo * @nr_groups: the number of groups 2430fd1e8a1fSTejun Heo * @nr_units: the number of units 2431033e48fbSTejun Heo * 2432fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 2433fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 2434fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 2435fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 2436fd1e8a1fSTejun Heo * pointer of other groups. 2437033e48fbSTejun Heo * 2438033e48fbSTejun Heo * RETURNS: 2439fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 2440fd1e8a1fSTejun Heo * failure. 2441033e48fbSTejun Heo */ 2442fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 2443fd1e8a1fSTejun Heo int nr_units) 2444fd1e8a1fSTejun Heo { 2445fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 2446fd1e8a1fSTejun Heo size_t base_size, ai_size; 2447fd1e8a1fSTejun Heo void *ptr; 2448fd1e8a1fSTejun Heo int unit; 2449fd1e8a1fSTejun Heo 245014d37612SGustavo A. R. Silva base_size = ALIGN(struct_size(ai, groups, nr_groups), 2451fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 2452fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 2453fd1e8a1fSTejun Heo 245426fb3daeSMike Rapoport ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); 2455fd1e8a1fSTejun Heo if (!ptr) 2456fd1e8a1fSTejun Heo return NULL; 2457fd1e8a1fSTejun Heo ai = ptr; 2458fd1e8a1fSTejun Heo ptr += base_size; 2459fd1e8a1fSTejun Heo 2460fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 2461fd1e8a1fSTejun Heo 2462fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 2463fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 2464fd1e8a1fSTejun Heo 2465fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 2466fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 2467fd1e8a1fSTejun Heo 2468fd1e8a1fSTejun Heo return ai; 2469fd1e8a1fSTejun Heo } 2470fd1e8a1fSTejun Heo 2471fd1e8a1fSTejun Heo /** 2472fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 2473fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 2474fd1e8a1fSTejun Heo * 2475fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 2476fd1e8a1fSTejun Heo */ 2477fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 2478fd1e8a1fSTejun Heo { 2479999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 2480fd1e8a1fSTejun Heo } 2481fd1e8a1fSTejun Heo 2482fd1e8a1fSTejun Heo /** 2483fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 2484fd1e8a1fSTejun Heo * @lvl: loglevel 2485fd1e8a1fSTejun Heo * @ai: allocation info to dump 2486fd1e8a1fSTejun Heo * 2487fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 2488fd1e8a1fSTejun Heo */ 2489fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 2490fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 2491033e48fbSTejun Heo { 2492fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 2493033e48fbSTejun Heo char empty_str[] = "--------"; 2494fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 2495fd1e8a1fSTejun Heo int group, v; 2496fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 2497033e48fbSTejun Heo 2498fd1e8a1fSTejun Heo v = ai->nr_groups; 2499033e48fbSTejun Heo while (v /= 10) 2500fd1e8a1fSTejun Heo group_width++; 2501033e48fbSTejun Heo 2502fd1e8a1fSTejun Heo v = num_possible_cpus(); 2503fd1e8a1fSTejun Heo while (v /= 10) 2504fd1e8a1fSTejun Heo cpu_width++; 2505fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 2506033e48fbSTejun Heo 2507fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 2508fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 2509fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 2510033e48fbSTejun Heo 2511fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 2512fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 2513fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 2514fd1e8a1fSTejun Heo 2515fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 2516fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 2517fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 2518fd1e8a1fSTejun Heo 2519fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 2520fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 2521fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 2522fd1e8a1fSTejun Heo if (!(alloc % apl)) { 25231170532bSJoe Perches pr_cont("\n"); 2524fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 2525033e48fbSTejun Heo } 25261170532bSJoe Perches pr_cont("[%0*d] ", group_width, group); 2527fd1e8a1fSTejun Heo 2528fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 2529fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 25301170532bSJoe Perches pr_cont("%0*d ", 25311170532bSJoe Perches cpu_width, gi->cpu_map[unit]); 2532033e48fbSTejun Heo else 25331170532bSJoe Perches pr_cont("%s ", empty_str); 2534033e48fbSTejun Heo } 2535fd1e8a1fSTejun Heo } 25361170532bSJoe Perches pr_cont("\n"); 2537033e48fbSTejun Heo } 2538033e48fbSTejun Heo 2539fbf59bc9STejun Heo /** 25408d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 2541fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 254238a6be52STejun Heo * @base_addr: mapped address 2543fbf59bc9STejun Heo * 25448d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 254569ab285bSChristophe JAILLET * percpu area. This function is to be called from arch percpu area 254638a6be52STejun Heo * setup path. 25478d408b4bSTejun Heo * 2548fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 2549fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 25508d408b4bSTejun Heo * 2551fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 2552fd1e8a1fSTejun Heo * 2553fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 2554edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 2555edcb4639STejun Heo * the first chunk such that it's available only through reserved 2556edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 2557edcb4639STejun Heo * static areas on architectures where the addressing model has 2558edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 2559edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 2560edcb4639STejun Heo * 2561fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 2562fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 2563fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 25646074d5b0STejun Heo * 2565fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 2566fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 2567fd1e8a1fSTejun Heo * @ai->dyn_size. 25688d408b4bSTejun Heo * 2569fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 2570fd1e8a1fSTejun Heo * for vm areas. 25718d408b4bSTejun Heo * 2572fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 2573fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 2574fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 2575fd1e8a1fSTejun Heo * 2576fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 2577fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 2578fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 2579fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 2580fd1e8a1fSTejun Heo * all units is assumed. 25818d408b4bSTejun Heo * 258238a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 258338a6be52STejun Heo * copied static data to each unit. 2584fbf59bc9STejun Heo * 2585c0ebfdc3SDennis Zhou (Facebook) * The first chunk will always contain a static and a dynamic region. 2586c0ebfdc3SDennis Zhou (Facebook) * However, the static region is not managed by any chunk. If the first 2587c0ebfdc3SDennis Zhou (Facebook) * chunk also contains a reserved region, it is served by two chunks - 2588c0ebfdc3SDennis Zhou (Facebook) * one for the reserved region and one for the dynamic region. They 2589c0ebfdc3SDennis Zhou (Facebook) * share the same vm, but use offset regions in the area allocation map. 2590c0ebfdc3SDennis Zhou (Facebook) * The chunk serving the dynamic region is circulated in the chunk slots 2591c0ebfdc3SDennis Zhou (Facebook) * and available for dynamic allocation like any other chunk. 2592fbf59bc9STejun Heo */ 2593163fa234SKefeng Wang void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 2594fd1e8a1fSTejun Heo void *base_addr) 2595fbf59bc9STejun Heo { 2596b9c39442SDennis Zhou (Facebook) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2597d2f3c384SDennis Zhou (Facebook) size_t static_size, dyn_size; 25980c4169c3SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 25996563297cSTejun Heo unsigned long *group_offsets; 26006563297cSTejun Heo size_t *group_sizes; 2601fb435d52STejun Heo unsigned long *unit_off; 2602fbf59bc9STejun Heo unsigned int cpu; 2603fd1e8a1fSTejun Heo int *unit_map; 2604fd1e8a1fSTejun Heo int group, unit, i; 2605c0ebfdc3SDennis Zhou (Facebook) int map_size; 2606c0ebfdc3SDennis Zhou (Facebook) unsigned long tmp_addr; 2607f655f405SMike Rapoport size_t alloc_size; 2608fbf59bc9STejun Heo 2609635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 2610635b75fcSTejun Heo if (unlikely(cond)) { \ 2611870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \ 2612870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \ 2613807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \ 2614635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 2615635b75fcSTejun Heo BUG(); \ 2616635b75fcSTejun Heo } \ 2617635b75fcSTejun Heo } while (0) 2618635b75fcSTejun Heo 26192f39e637STejun Heo /* sanity checks */ 2620635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 2621bbddff05STejun Heo #ifdef CONFIG_SMP 2622635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 2623f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 2624bbddff05STejun Heo #endif 2625635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 2626f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 2627635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 2628f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 2629635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 2630ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); 2631099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 2632fb29a2ccSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!ai->dyn_size); 2633d2f3c384SDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); 2634ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || 2635ca460b3cSDennis Zhou (Facebook) IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); 26369f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 26378d408b4bSTejun Heo 26386563297cSTejun Heo /* process group information and build config tables accordingly */ 2639f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_offsets[0]); 2640f655f405SMike Rapoport group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2641f655f405SMike Rapoport if (!group_offsets) 2642f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2643f655f405SMike Rapoport alloc_size); 2644f655f405SMike Rapoport 2645f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_sizes[0]); 2646f655f405SMike Rapoport group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2647f655f405SMike Rapoport if (!group_sizes) 2648f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2649f655f405SMike Rapoport alloc_size); 2650f655f405SMike Rapoport 2651f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_map[0]); 2652f655f405SMike Rapoport unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2653f655f405SMike Rapoport if (!unit_map) 2654f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2655f655f405SMike Rapoport alloc_size); 2656f655f405SMike Rapoport 2657f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_off[0]); 2658f655f405SMike Rapoport unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2659f655f405SMike Rapoport if (!unit_off) 2660f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2661f655f405SMike Rapoport alloc_size); 26622f39e637STejun Heo 2663fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2664ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 2665a855b84cSTejun Heo 2666a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 2667a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 26682f39e637STejun Heo 2669fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 2670fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 26712f39e637STejun Heo 26726563297cSTejun Heo group_offsets[group] = gi->base_offset; 26736563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 26746563297cSTejun Heo 2675fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 2676fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 2677fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 2678fd1e8a1fSTejun Heo continue; 2679fd1e8a1fSTejun Heo 26809f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 2681635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 2682635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 2683fd1e8a1fSTejun Heo 2684fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 2685fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 2686fb435d52STejun Heo 2687a855b84cSTejun Heo /* determine low/high unit_cpu */ 2688a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 2689a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 2690a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 2691a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 2692a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 2693a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 26940fc0531eSLinus Torvalds } 26950fc0531eSLinus Torvalds } 2696fd1e8a1fSTejun Heo pcpu_nr_units = unit; 26972f39e637STejun Heo 26982f39e637STejun Heo for_each_possible_cpu(cpu) 2699635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 2700635b75fcSTejun Heo 2701635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 2702635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 2703bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 27042f39e637STejun Heo 27056563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 27066563297cSTejun Heo pcpu_group_offsets = group_offsets; 27076563297cSTejun Heo pcpu_group_sizes = group_sizes; 2708fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 2709fb435d52STejun Heo pcpu_unit_offsets = unit_off; 27102f39e637STejun Heo 27112f39e637STejun Heo /* determine basic parameters */ 2712fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2713d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 27146563297cSTejun Heo pcpu_atom_size = ai->atom_size; 271561cf93d3SDennis Zhou pcpu_chunk_struct_size = struct_size(chunk, populated, 271661cf93d3SDennis Zhou BITS_TO_LONGS(pcpu_unit_pages)); 2717cafe8816STejun Heo 271830a5b536SDennis Zhou pcpu_stats_save_ai(ai); 271930a5b536SDennis Zhou 2720d9b55eebSTejun Heo /* 2721f1833241SRoman Gushchin * Allocate chunk slots. The slots after the active slots are: 2722f1833241SRoman Gushchin * sidelined_slot - isolated, depopulated chunks 2723f1833241SRoman Gushchin * free_slot - fully free chunks 2724f1833241SRoman Gushchin * to_depopulate_slot - isolated, chunks to depopulate 2725d9b55eebSTejun Heo */ 2726f1833241SRoman Gushchin pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1; 2727f1833241SRoman Gushchin pcpu_free_slot = pcpu_sidelined_slot + 1; 2728f1833241SRoman Gushchin pcpu_to_depopulate_slot = pcpu_free_slot + 1; 2729f1833241SRoman Gushchin pcpu_nr_slots = pcpu_to_depopulate_slot + 1; 27303c7be18aSRoman Gushchin pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * 2731faf65ddeSRoman Gushchin sizeof(pcpu_chunk_lists[0]), 27327e1c4e27SMike Rapoport SMP_CACHE_BYTES); 27333c7be18aSRoman Gushchin if (!pcpu_chunk_lists) 2734f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2735faf65ddeSRoman Gushchin pcpu_nr_slots * sizeof(pcpu_chunk_lists[0])); 27363c7be18aSRoman Gushchin 2737fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 2738faf65ddeSRoman Gushchin INIT_LIST_HEAD(&pcpu_chunk_lists[i]); 2739fbf59bc9STejun Heo 2740edcb4639STejun Heo /* 2741d2f3c384SDennis Zhou (Facebook) * The end of the static region needs to be aligned with the 2742d2f3c384SDennis Zhou (Facebook) * minimum allocation size as this offsets the reserved and 2743d2f3c384SDennis Zhou (Facebook) * dynamic region. The first chunk ends page aligned by 2744d2f3c384SDennis Zhou (Facebook) * expanding the dynamic region, therefore the dynamic region 2745d2f3c384SDennis Zhou (Facebook) * can be shrunk to compensate while still staying above the 2746d2f3c384SDennis Zhou (Facebook) * configured sizes. 2747d2f3c384SDennis Zhou (Facebook) */ 2748d2f3c384SDennis Zhou (Facebook) static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); 2749d2f3c384SDennis Zhou (Facebook) dyn_size = ai->dyn_size - (static_size - ai->static_size); 2750d2f3c384SDennis Zhou (Facebook) 2751d2f3c384SDennis Zhou (Facebook) /* 2752c0ebfdc3SDennis Zhou (Facebook) * Initialize first chunk. 2753c0ebfdc3SDennis Zhou (Facebook) * If the reserved_size is non-zero, this initializes the reserved 2754c0ebfdc3SDennis Zhou (Facebook) * chunk. If the reserved_size is zero, the reserved chunk is NULL 2755c0ebfdc3SDennis Zhou (Facebook) * and the dynamic region is initialized here. The first chunk, 2756c0ebfdc3SDennis Zhou (Facebook) * pcpu_first_chunk, will always point to the chunk that serves 2757c0ebfdc3SDennis Zhou (Facebook) * the dynamic region. 2758edcb4639STejun Heo */ 2759d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size; 2760d2f3c384SDennis Zhou (Facebook) map_size = ai->reserved_size ?: dyn_size; 276140064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 276261ace7faSTejun Heo 2763edcb4639STejun Heo /* init dynamic chunk if necessary */ 2764b9c39442SDennis Zhou (Facebook) if (ai->reserved_size) { 27650c4169c3SDennis Zhou (Facebook) pcpu_reserved_chunk = chunk; 2766b9c39442SDennis Zhou (Facebook) 2767d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size + 2768c0ebfdc3SDennis Zhou (Facebook) ai->reserved_size; 2769d2f3c384SDennis Zhou (Facebook) map_size = dyn_size; 277040064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2771edcb4639STejun Heo } 2772edcb4639STejun Heo 27732441d15cSTejun Heo /* link the first chunk in */ 27740c4169c3SDennis Zhou (Facebook) pcpu_first_chunk = chunk; 2775faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; 2776ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 2777fbf59bc9STejun Heo 27787e8a6304SDennis Zhou (Facebook) /* include all regions of the first chunk */ 27797e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += PFN_DOWN(size_sum); 27807e8a6304SDennis Zhou (Facebook) 278130a5b536SDennis Zhou pcpu_stats_chunk_alloc(); 2782df95e795SDennis Zhou trace_percpu_create_chunk(base_addr); 278330a5b536SDennis Zhou 2784fbf59bc9STejun Heo /* we're done */ 2785bba174f5STejun Heo pcpu_base_addr = base_addr; 2786fbf59bc9STejun Heo } 278766c3a757STejun Heo 2788bbddff05STejun Heo #ifdef CONFIG_SMP 2789bbddff05STejun Heo 279017f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 2791f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 2792f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 2793f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 2794f58dc01bSTejun Heo }; 279566c3a757STejun Heo 2796f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 2797f58dc01bSTejun Heo 2798f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 279966c3a757STejun Heo { 28005479c78aSCyrill Gorcunov if (!str) 28015479c78aSCyrill Gorcunov return -EINVAL; 28025479c78aSCyrill Gorcunov 2803f58dc01bSTejun Heo if (0) 2804f58dc01bSTejun Heo /* nada */; 2805f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 2806f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 2807f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 2808f58dc01bSTejun Heo #endif 2809f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2810f58dc01bSTejun Heo else if (!strcmp(str, "page")) 2811f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 2812f58dc01bSTejun Heo #endif 2813f58dc01bSTejun Heo else 2814870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str); 281566c3a757STejun Heo 2816f58dc01bSTejun Heo return 0; 281766c3a757STejun Heo } 2818f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 281966c3a757STejun Heo 28203c9a024fSTejun Heo /* 28213c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 28223c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 28233c9a024fSTejun Heo * to be used. 28243c9a024fSTejun Heo */ 282508fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 282608fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 28273c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 28283c9a024fSTejun Heo #endif 28293c9a024fSTejun Heo 28303c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 28313c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 28323c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 28333c9a024fSTejun Heo #endif 28343c9a024fSTejun Heo 28353c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 28363c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 28373c9a024fSTejun Heo /** 2838fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 2839fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2840fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2841fbf59bc9STejun Heo * @atom_size: allocation atom size 2842fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2843fbf59bc9STejun Heo * 2844fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 2845fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 2846fbf59bc9STejun Heo * atom size and distances between CPUs. 2847fbf59bc9STejun Heo * 2848bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of 2849fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 2850fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 2851fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 2852fbf59bc9STejun Heo * of allocated virtual address space. 2853fbf59bc9STejun Heo * 2854fbf59bc9STejun Heo * RETURNS: 2855fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 2856fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 2857fbf59bc9STejun Heo */ 2858258e0815SDennis Zhou static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info( 2859fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 2860fbf59bc9STejun Heo size_t atom_size, 2861fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 2862fbf59bc9STejun Heo { 2863fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 2864fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 2865d7d29ac7SWonhyuk Yang static struct cpumask mask __initdata; 2866fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 2867fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 2868fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 28693f649ab7SKees Cook int upa, max_upa, best_upa; /* units_per_alloc */ 2870fbf59bc9STejun Heo int last_allocs, group, unit; 2871fbf59bc9STejun Heo unsigned int cpu, tcpu; 2872fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 2873fbf59bc9STejun Heo unsigned int *cpu_map; 2874fbf59bc9STejun Heo 2875fbf59bc9STejun Heo /* this function may be called multiple times */ 2876fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 2877fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 2878d7d29ac7SWonhyuk Yang cpumask_clear(&mask); 2879fbf59bc9STejun Heo 2880fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 2881fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 2882fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 2883fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 2884fbf59bc9STejun Heo 2885fbf59bc9STejun Heo /* 2886fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 2887fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 288825985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 2889fbf59bc9STejun Heo * or larger than min_unit_size. 2890fbf59bc9STejun Heo */ 2891fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 2892fbf59bc9STejun Heo 28939c015162SDennis Zhou (Facebook) /* determine the maximum # of units that can fit in an allocation */ 2894fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 2895fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 2896f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2897fbf59bc9STejun Heo upa--; 2898fbf59bc9STejun Heo max_upa = upa; 2899fbf59bc9STejun Heo 2900d7d29ac7SWonhyuk Yang cpumask_copy(&mask, cpu_possible_mask); 2901d7d29ac7SWonhyuk Yang 2902fbf59bc9STejun Heo /* group cpus according to their proximity */ 2903d7d29ac7SWonhyuk Yang for (group = 0; !cpumask_empty(&mask); group++) { 2904d7d29ac7SWonhyuk Yang /* pop the group's first cpu */ 2905d7d29ac7SWonhyuk Yang cpu = cpumask_first(&mask); 2906fbf59bc9STejun Heo group_map[cpu] = group; 2907fbf59bc9STejun Heo group_cnt[group]++; 2908d7d29ac7SWonhyuk Yang cpumask_clear_cpu(cpu, &mask); 2909d7d29ac7SWonhyuk Yang 2910d7d29ac7SWonhyuk Yang for_each_cpu(tcpu, &mask) { 2911d7d29ac7SWonhyuk Yang if (!cpu_distance_fn || 2912d7d29ac7SWonhyuk Yang (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE && 2913d7d29ac7SWonhyuk Yang cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) { 2914d7d29ac7SWonhyuk Yang group_map[tcpu] = group; 2915d7d29ac7SWonhyuk Yang group_cnt[group]++; 2916d7d29ac7SWonhyuk Yang cpumask_clear_cpu(tcpu, &mask); 2917fbf59bc9STejun Heo } 2918d7d29ac7SWonhyuk Yang } 2919d7d29ac7SWonhyuk Yang } 2920d7d29ac7SWonhyuk Yang nr_groups = group; 2921fbf59bc9STejun Heo 2922fbf59bc9STejun Heo /* 29239c015162SDennis Zhou (Facebook) * Wasted space is caused by a ratio imbalance of upa to group_cnt. 29249c015162SDennis Zhou (Facebook) * Expand the unit_size until we use >= 75% of the units allocated. 29259c015162SDennis Zhou (Facebook) * Related to atom_size, which could be much larger than the unit_size. 2926fbf59bc9STejun Heo */ 2927fbf59bc9STejun Heo last_allocs = INT_MAX; 29284829c791SDennis Zhou best_upa = 0; 2929fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 2930fbf59bc9STejun Heo int allocs = 0, wasted = 0; 2931fbf59bc9STejun Heo 2932f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2933fbf59bc9STejun Heo continue; 2934fbf59bc9STejun Heo 2935fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2936fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 2937fbf59bc9STejun Heo allocs += this_allocs; 2938fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 2939fbf59bc9STejun Heo } 2940fbf59bc9STejun Heo 2941fbf59bc9STejun Heo /* 2942fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 2943fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 2944fbf59bc9STejun Heo * passes the following check. 2945fbf59bc9STejun Heo */ 2946fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 2947fbf59bc9STejun Heo continue; 2948fbf59bc9STejun Heo 2949fbf59bc9STejun Heo /* and then don't consume more memory */ 2950fbf59bc9STejun Heo if (allocs > last_allocs) 2951fbf59bc9STejun Heo break; 2952fbf59bc9STejun Heo last_allocs = allocs; 2953fbf59bc9STejun Heo best_upa = upa; 2954fbf59bc9STejun Heo } 29554829c791SDennis Zhou BUG_ON(!best_upa); 2956fbf59bc9STejun Heo upa = best_upa; 2957fbf59bc9STejun Heo 2958fbf59bc9STejun Heo /* allocate and fill alloc_info */ 2959fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 2960fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 2961fbf59bc9STejun Heo 2962fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 2963fbf59bc9STejun Heo if (!ai) 2964fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 2965fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 2966fbf59bc9STejun Heo 2967fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2968fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 2969fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 2970fbf59bc9STejun Heo } 2971fbf59bc9STejun Heo 2972fbf59bc9STejun Heo ai->static_size = static_size; 2973fbf59bc9STejun Heo ai->reserved_size = reserved_size; 2974fbf59bc9STejun Heo ai->dyn_size = dyn_size; 2975fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 2976fbf59bc9STejun Heo ai->atom_size = atom_size; 2977fbf59bc9STejun Heo ai->alloc_size = alloc_size; 2978fbf59bc9STejun Heo 29792de7852fSPeng Fan for (group = 0, unit = 0; group < nr_groups; group++) { 2980fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2981fbf59bc9STejun Heo 2982fbf59bc9STejun Heo /* 2983fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 2984fbf59bc9STejun Heo * back-to-back. The caller should update this to 2985fbf59bc9STejun Heo * reflect actual allocation. 2986fbf59bc9STejun Heo */ 2987fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 2988fbf59bc9STejun Heo 2989fbf59bc9STejun Heo for_each_possible_cpu(cpu) 2990fbf59bc9STejun Heo if (group_map[cpu] == group) 2991fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 2992fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 2993fbf59bc9STejun Heo unit += gi->nr_units; 2994fbf59bc9STejun Heo } 2995fbf59bc9STejun Heo BUG_ON(unit != nr_units); 2996fbf59bc9STejun Heo 2997fbf59bc9STejun Heo return ai; 2998fbf59bc9STejun Heo } 29993c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 3000fbf59bc9STejun Heo 30013c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 300266c3a757STejun Heo /** 300366c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 300466c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 30054ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 3006c8826dd5STejun Heo * @atom_size: allocation atom size 3007c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 3008c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 300925985edcSLucas De Marchi * @free_fn: function to free percpu page 301066c3a757STejun Heo * 301166c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 301266c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 301366c3a757STejun Heo * 301466c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 3015c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 3016c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 3017c8826dd5STejun Heo * aligned to @atom_size. 3018c8826dd5STejun Heo * 3019c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 3020c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 3021c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 3022c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 3023c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 3024c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 302566c3a757STejun Heo * 30264ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 302766c3a757STejun Heo * 302866c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 3029c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 303066c3a757STejun Heo * 303166c3a757STejun Heo * RETURNS: 3032fb435d52STejun Heo * 0 on success, -errno on failure. 303366c3a757STejun Heo */ 30344ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 3035c8826dd5STejun Heo size_t atom_size, 3036c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 3037c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 3038c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 303966c3a757STejun Heo { 3040c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 3041c8826dd5STejun Heo void **areas = NULL; 3042fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 304393c76b6bSzijun_hu size_t size_sum, areas_size; 304493c76b6bSzijun_hu unsigned long max_distance; 3045163fa234SKefeng Wang int group, i, highest_group, rc = 0; 304666c3a757STejun Heo 3047c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 3048c8826dd5STejun Heo cpu_distance_fn); 3049fd1e8a1fSTejun Heo if (IS_ERR(ai)) 3050fd1e8a1fSTejun Heo return PTR_ERR(ai); 305166c3a757STejun Heo 3052fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 3053c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 305466c3a757STejun Heo 305526fb3daeSMike Rapoport areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); 3056c8826dd5STejun Heo if (!areas) { 3057fb435d52STejun Heo rc = -ENOMEM; 3058c8826dd5STejun Heo goto out_free; 3059fa8a7094STejun Heo } 306066c3a757STejun Heo 30619b739662Szijun_hu /* allocate, copy and determine base address & max_distance */ 30629b739662Szijun_hu highest_group = 0; 3063c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 3064c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 3065c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 3066c8826dd5STejun Heo void *ptr; 306766c3a757STejun Heo 3068c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 3069c8826dd5STejun Heo cpu = gi->cpu_map[i]; 3070c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 3071c8826dd5STejun Heo 3072c8826dd5STejun Heo /* allocate space for the whole group */ 3073c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 3074c8826dd5STejun Heo if (!ptr) { 3075c8826dd5STejun Heo rc = -ENOMEM; 3076c8826dd5STejun Heo goto out_free_areas; 3077c8826dd5STejun Heo } 3078f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3079f528f0b8SCatalin Marinas kmemleak_free(ptr); 3080c8826dd5STejun Heo areas[group] = ptr; 3081c8826dd5STejun Heo 3082c8826dd5STejun Heo base = min(ptr, base); 30839b739662Szijun_hu if (ptr > areas[highest_group]) 30849b739662Szijun_hu highest_group = group; 30859b739662Szijun_hu } 30869b739662Szijun_hu max_distance = areas[highest_group] - base; 30879b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 30889b739662Szijun_hu 30899b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */ 30909b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) { 30919b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 30929b739662Szijun_hu max_distance, VMALLOC_TOTAL); 30939b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 30949b739662Szijun_hu /* and fail if we have fallback */ 30959b739662Szijun_hu rc = -EINVAL; 30969b739662Szijun_hu goto out_free_areas; 30979b739662Szijun_hu #endif 309842b64281STejun Heo } 309942b64281STejun Heo 310042b64281STejun Heo /* 310142b64281STejun Heo * Copy data and free unused parts. This should happen after all 310242b64281STejun Heo * allocations are complete; otherwise, we may end up with 310342b64281STejun Heo * overlapping groups. 310442b64281STejun Heo */ 310542b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 310642b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 310742b64281STejun Heo void *ptr = areas[group]; 3108c8826dd5STejun Heo 3109c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 3110c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 3111c8826dd5STejun Heo /* unused unit, free whole */ 3112c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 3113c8826dd5STejun Heo continue; 3114c8826dd5STejun Heo } 3115c8826dd5STejun Heo /* copy and return the unused part */ 3116fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 3117c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 3118c8826dd5STejun Heo } 311966c3a757STejun Heo } 312066c3a757STejun Heo 3121c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 31226ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 3123c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 31246ea529a2STejun Heo } 3125c8826dd5STejun Heo 312600206a69SMatteo Croce pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", 312700206a69SMatteo Croce PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, 3128fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 312966c3a757STejun Heo 3130163fa234SKefeng Wang pcpu_setup_first_chunk(ai, base); 3131c8826dd5STejun Heo goto out_free; 3132c8826dd5STejun Heo 3133c8826dd5STejun Heo out_free_areas: 3134c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 3135f851c8d8SMichael Holzheu if (areas[group]) 3136c8826dd5STejun Heo free_fn(areas[group], 3137c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 3138c8826dd5STejun Heo out_free: 3139fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 3140c8826dd5STejun Heo if (areas) 3141999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 3142fb435d52STejun Heo return rc; 3143d4b95f80STejun Heo } 31443c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 3145d4b95f80STejun Heo 31463c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 3147d4b95f80STejun Heo /** 314800ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 3149d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 3150d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 315125985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 3152d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 3153d4b95f80STejun Heo * 315400ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 315500ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 3156d4b95f80STejun Heo * 3157d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 3158d4b95f80STejun Heo * page-by-page into vmalloc area. 3159d4b95f80STejun Heo * 3160d4b95f80STejun Heo * RETURNS: 3161fb435d52STejun Heo * 0 on success, -errno on failure. 3162d4b95f80STejun Heo */ 3163fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 3164d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 3165d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 3166d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 3167d4b95f80STejun Heo { 31688f05a6a6STejun Heo static struct vm_struct vm; 3169fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 317000ae4064STejun Heo char psize_str[16]; 3171ce3141a2STejun Heo int unit_pages; 3172d4b95f80STejun Heo size_t pages_size; 3173ce3141a2STejun Heo struct page **pages; 3174163fa234SKefeng Wang int unit, i, j, rc = 0; 31758f606604Szijun_hu int upa; 31768f606604Szijun_hu int nr_g0_units; 3177d4b95f80STejun Heo 317800ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 317900ae4064STejun Heo 31804ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 3181fd1e8a1fSTejun Heo if (IS_ERR(ai)) 3182fd1e8a1fSTejun Heo return PTR_ERR(ai); 3183fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 31848f606604Szijun_hu upa = ai->alloc_size/ai->unit_size; 31858f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa); 31860b59c25fSIgor Stoppa if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { 31878f606604Szijun_hu pcpu_free_alloc_info(ai); 31888f606604Szijun_hu return -EINVAL; 31898f606604Szijun_hu } 3190fd1e8a1fSTejun Heo 3191fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 3192d4b95f80STejun Heo 3193d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 3194fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 3195fd1e8a1fSTejun Heo sizeof(pages[0])); 31967e1c4e27SMike Rapoport pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 3197f655f405SMike Rapoport if (!pages) 3198f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 3199f655f405SMike Rapoport pages_size); 3200d4b95f80STejun Heo 32018f05a6a6STejun Heo /* allocate pages */ 3202d4b95f80STejun Heo j = 0; 32038f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) { 3204fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 32058f606604Szijun_hu for (i = 0; i < unit_pages; i++) { 3206d4b95f80STejun Heo void *ptr; 3207d4b95f80STejun Heo 32083cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 3209d4b95f80STejun Heo if (!ptr) { 3210870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n", 3211598d8091SJoe Perches psize_str, cpu); 3212d4b95f80STejun Heo goto enomem; 3213d4b95f80STejun Heo } 3214f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3215f528f0b8SCatalin Marinas kmemleak_free(ptr); 3216ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 3217d4b95f80STejun Heo } 32188f606604Szijun_hu } 3219d4b95f80STejun Heo 32208f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 32218f05a6a6STejun Heo vm.flags = VM_ALLOC; 3222fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 32238f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 32248f05a6a6STejun Heo 3225fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 32261d9d3257STejun Heo unsigned long unit_addr = 3227fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 32288f05a6a6STejun Heo 3229ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 32308f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 32318f05a6a6STejun Heo 32328f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 3233fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 3234ce3141a2STejun Heo unit_pages); 3235fb435d52STejun Heo if (rc < 0) 3236fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 32378f05a6a6STejun Heo 32388f05a6a6STejun Heo /* 32398f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 32408f05a6a6STejun Heo * cache for the linear mapping here - something 32418f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 32428f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 32438f05a6a6STejun Heo * data structures are not set up yet. 32448f05a6a6STejun Heo */ 32458f05a6a6STejun Heo 32468f05a6a6STejun Heo /* copy static data */ 3247fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 324866c3a757STejun Heo } 324966c3a757STejun Heo 325066c3a757STejun Heo /* we're ready, commit */ 325100206a69SMatteo Croce pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", 325200206a69SMatteo Croce unit_pages, psize_str, ai->static_size, 3253fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 325466c3a757STejun Heo 3255163fa234SKefeng Wang pcpu_setup_first_chunk(ai, vm.addr); 3256d4b95f80STejun Heo goto out_free_ar; 3257d4b95f80STejun Heo 3258d4b95f80STejun Heo enomem: 3259d4b95f80STejun Heo while (--j >= 0) 3260ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 3261fb435d52STejun Heo rc = -ENOMEM; 3262d4b95f80STejun Heo out_free_ar: 3263999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 3264fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 3265fb435d52STejun Heo return rc; 326666c3a757STejun Heo } 32673c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 3268d4b95f80STejun Heo 3269bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 32708c4bfc6eSTejun Heo /* 3271bbddff05STejun Heo * Generic SMP percpu area setup. 3272e74e3962STejun Heo * 3273e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 3274e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 3275e74e3962STejun Heo * important because many archs have addressing restrictions and might 3276e74e3962STejun Heo * fail if the percpu area is located far away from the previous 3277e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 3278e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 3279e74e3962STejun Heo * on the physical linear memory mapping which uses large page 3280e74e3962STejun Heo * mappings on applicable archs. 3281e74e3962STejun Heo */ 3282e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 3283e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 3284e74e3962STejun Heo 3285c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 3286c8826dd5STejun Heo size_t align) 3287c8826dd5STejun Heo { 328826fb3daeSMike Rapoport return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS)); 3289c8826dd5STejun Heo } 3290c8826dd5STejun Heo 3291c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 3292c8826dd5STejun Heo { 3293999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 3294c8826dd5STejun Heo } 3295c8826dd5STejun Heo 3296e74e3962STejun Heo void __init setup_per_cpu_areas(void) 3297e74e3962STejun Heo { 3298e74e3962STejun Heo unsigned long delta; 3299e74e3962STejun Heo unsigned int cpu; 3300fb435d52STejun Heo int rc; 3301e74e3962STejun Heo 3302e74e3962STejun Heo /* 3303e74e3962STejun Heo * Always reserve area for module percpu variables. That's 3304e74e3962STejun Heo * what the legacy allocator did. 3305e74e3962STejun Heo */ 3306fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 3307c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 3308c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 3309fb435d52STejun Heo if (rc < 0) 3310bbddff05STejun Heo panic("Failed to initialize percpu areas."); 3311e74e3962STejun Heo 3312e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 3313e74e3962STejun Heo for_each_possible_cpu(cpu) 3314fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 3315e74e3962STejun Heo } 3316e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 3317099a19d9STejun Heo 3318bbddff05STejun Heo #else /* CONFIG_SMP */ 3319bbddff05STejun Heo 3320bbddff05STejun Heo /* 3321bbddff05STejun Heo * UP percpu area setup. 3322bbddff05STejun Heo * 3323bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 3324bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 3325bbddff05STejun Heo * variables and don't require any special preparation. 3326bbddff05STejun Heo */ 3327bbddff05STejun Heo void __init setup_per_cpu_areas(void) 3328bbddff05STejun Heo { 3329bbddff05STejun Heo const size_t unit_size = 3330bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 3331bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 3332bbddff05STejun Heo struct pcpu_alloc_info *ai; 3333bbddff05STejun Heo void *fc; 3334bbddff05STejun Heo 3335bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 333626fb3daeSMike Rapoport fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 3337bbddff05STejun Heo if (!ai || !fc) 3338bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 3339100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3340100d13c3SCatalin Marinas kmemleak_free(fc); 3341bbddff05STejun Heo 3342bbddff05STejun Heo ai->dyn_size = unit_size; 3343bbddff05STejun Heo ai->unit_size = unit_size; 3344bbddff05STejun Heo ai->atom_size = unit_size; 3345bbddff05STejun Heo ai->alloc_size = unit_size; 3346bbddff05STejun Heo ai->groups[0].nr_units = 1; 3347bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 3348bbddff05STejun Heo 3349163fa234SKefeng Wang pcpu_setup_first_chunk(ai, fc); 3350438a5061SNicolas Pitre pcpu_free_alloc_info(ai); 3351bbddff05STejun Heo } 3352bbddff05STejun Heo 3353bbddff05STejun Heo #endif /* CONFIG_SMP */ 3354bbddff05STejun Heo 3355099a19d9STejun Heo /* 33567e8a6304SDennis Zhou (Facebook) * pcpu_nr_pages - calculate total number of populated backing pages 33577e8a6304SDennis Zhou (Facebook) * 33587e8a6304SDennis Zhou (Facebook) * This reflects the number of pages populated to back chunks. Metadata is 33597e8a6304SDennis Zhou (Facebook) * excluded in the number exposed in meminfo as the number of backing pages 33607e8a6304SDennis Zhou (Facebook) * scales with the number of cpus and can quickly outweigh the memory used for 33617e8a6304SDennis Zhou (Facebook) * metadata. It also keeps this calculation nice and simple. 33627e8a6304SDennis Zhou (Facebook) * 33637e8a6304SDennis Zhou (Facebook) * RETURNS: 33647e8a6304SDennis Zhou (Facebook) * Total number of populated backing pages in use by the allocator. 33657e8a6304SDennis Zhou (Facebook) */ 33667e8a6304SDennis Zhou (Facebook) unsigned long pcpu_nr_pages(void) 33677e8a6304SDennis Zhou (Facebook) { 33687e8a6304SDennis Zhou (Facebook) return pcpu_nr_populated * pcpu_nr_units; 33697e8a6304SDennis Zhou (Facebook) } 33707e8a6304SDennis Zhou (Facebook) 33717e8a6304SDennis Zhou (Facebook) /* 33721a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or 33731a4d7607STejun Heo * workqueue is available. Plug async management until everything is up 33741a4d7607STejun Heo * and running. 33751a4d7607STejun Heo */ 33761a4d7607STejun Heo static int __init percpu_enable_async(void) 33771a4d7607STejun Heo { 33781a4d7607STejun Heo pcpu_async_enabled = true; 33791a4d7607STejun Heo return 0; 33801a4d7607STejun Heo } 33811a4d7607STejun Heo subsys_initcall(percpu_enable_async); 3382