155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2fbf59bc9STejun Heo /* 388999a89STejun Heo * mm/percpu.c - percpu memory allocator 4fbf59bc9STejun Heo * 5fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 6fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 7fbf59bc9STejun Heo * 85e81ee3eSDennis Zhou (Facebook) * Copyright (C) 2017 Facebook Inc. 9bfacd38fSDennis Zhou * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> 105e81ee3eSDennis Zhou (Facebook) * 119c015162SDennis Zhou (Facebook) * The percpu allocator handles both static and dynamic areas. Percpu 129c015162SDennis Zhou (Facebook) * areas are allocated in chunks which are divided into units. There is 139c015162SDennis Zhou (Facebook) * a 1-to-1 mapping for units to possible cpus. These units are grouped 149c015162SDennis Zhou (Facebook) * based on NUMA properties of the machine. 15fbf59bc9STejun Heo * 16fbf59bc9STejun Heo * c0 c1 c2 17fbf59bc9STejun Heo * ------------------- ------------------- ------------ 18fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 19fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 20fbf59bc9STejun Heo * 219c015162SDennis Zhou (Facebook) * Allocation is done by offsets into a unit's address space. Ie., an 229c015162SDennis Zhou (Facebook) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 239c015162SDennis Zhou (Facebook) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 249c015162SDennis Zhou (Facebook) * and even sparse. Access is handled by configuring percpu base 259c015162SDennis Zhou (Facebook) * registers according to the cpu to unit mappings and offsetting the 269c015162SDennis Zhou (Facebook) * base address using pcpu_unit_size. 27fbf59bc9STejun Heo * 289c015162SDennis Zhou (Facebook) * There is special consideration for the first chunk which must handle 299c015162SDennis Zhou (Facebook) * the static percpu variables in the kernel image as allocation services 305e81ee3eSDennis Zhou (Facebook) * are not online yet. In short, the first chunk is structured like so: 319c015162SDennis Zhou (Facebook) * 329c015162SDennis Zhou (Facebook) * <Static | [Reserved] | Dynamic> 339c015162SDennis Zhou (Facebook) * 349c015162SDennis Zhou (Facebook) * The static data is copied from the original section managed by the 359c015162SDennis Zhou (Facebook) * linker. The reserved section, if non-zero, primarily manages static 369c015162SDennis Zhou (Facebook) * percpu variables from kernel modules. Finally, the dynamic section 379c015162SDennis Zhou (Facebook) * takes care of normal allocations. 38fbf59bc9STejun Heo * 395e81ee3eSDennis Zhou (Facebook) * The allocator organizes chunks into lists according to free size and 403c7be18aSRoman Gushchin * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT 413c7be18aSRoman Gushchin * flag should be passed. All memcg-aware allocations are sharing one set 423c7be18aSRoman Gushchin * of chunks and all unaccounted allocations and allocations performed 433c7be18aSRoman Gushchin * by processes belonging to the root memory cgroup are using the second set. 443c7be18aSRoman Gushchin * 453c7be18aSRoman Gushchin * The allocator tries to allocate from the fullest chunk first. Each chunk 463c7be18aSRoman Gushchin * is managed by a bitmap with metadata blocks. The allocation map is updated 473c7be18aSRoman Gushchin * on every allocation and free to reflect the current state while the boundary 485e81ee3eSDennis Zhou (Facebook) * map is only updated on allocation. Each metadata block contains 495e81ee3eSDennis Zhou (Facebook) * information to help mitigate the need to iterate over large portions 505e81ee3eSDennis Zhou (Facebook) * of the bitmap. The reverse mapping from page to chunk is stored in 515e81ee3eSDennis Zhou (Facebook) * the page's index. Lastly, units are lazily backed and grow in unison. 52fbf59bc9STejun Heo * 535e81ee3eSDennis Zhou (Facebook) * There is a unique conversion that goes on here between bytes and bits. 545e81ee3eSDennis Zhou (Facebook) * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 555e81ee3eSDennis Zhou (Facebook) * tracks the number of pages it is responsible for in nr_pages. Helper 565e81ee3eSDennis Zhou (Facebook) * functions are used to convert from between the bytes, bits, and blocks. 575e81ee3eSDennis Zhou (Facebook) * All hints are managed in bits unless explicitly stated. 589c015162SDennis Zhou (Facebook) * 594091fb95SMasahiro Yamada * To use this allocator, arch code should do the following: 60fbf59bc9STejun Heo * 61fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 62e0100983STejun Heo * regular address to percpu pointer and back if they need to be 63e0100983STejun Heo * different from the default 64fbf59bc9STejun Heo * 658d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 668d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 67fbf59bc9STejun Heo */ 68fbf59bc9STejun Heo 69870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 70870d4b12SJoe Perches 71fbf59bc9STejun Heo #include <linux/bitmap.h> 7257c8a661SMike Rapoport #include <linux/memblock.h> 73fd1e8a1fSTejun Heo #include <linux/err.h> 74ca460b3cSDennis Zhou (Facebook) #include <linux/lcm.h> 75fbf59bc9STejun Heo #include <linux/list.h> 76a530b795STejun Heo #include <linux/log2.h> 77fbf59bc9STejun Heo #include <linux/mm.h> 78fbf59bc9STejun Heo #include <linux/module.h> 79fbf59bc9STejun Heo #include <linux/mutex.h> 80fbf59bc9STejun Heo #include <linux/percpu.h> 81fbf59bc9STejun Heo #include <linux/pfn.h> 82fbf59bc9STejun Heo #include <linux/slab.h> 83ccea34b5STejun Heo #include <linux/spinlock.h> 84fbf59bc9STejun Heo #include <linux/vmalloc.h> 85a56dbddfSTejun Heo #include <linux/workqueue.h> 86f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 8771546d10STejun Heo #include <linux/sched.h> 8828307d93SFilipe Manana #include <linux/sched/mm.h> 893c7be18aSRoman Gushchin #include <linux/memcontrol.h> 90fbf59bc9STejun Heo 91fbf59bc9STejun Heo #include <asm/cacheflush.h> 92e0100983STejun Heo #include <asm/sections.h> 93fbf59bc9STejun Heo #include <asm/tlbflush.h> 943b034b0dSVivek Goyal #include <asm/io.h> 95fbf59bc9STejun Heo 96df95e795SDennis Zhou #define CREATE_TRACE_POINTS 97df95e795SDennis Zhou #include <trace/events/percpu.h> 98df95e795SDennis Zhou 998fa3ed80SDennis Zhou #include "percpu-internal.h" 1008fa3ed80SDennis Zhou 10140064aecSDennis Zhou (Facebook) /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */ 10240064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT 5 1038744d859SDennis Zhou /* chunks in slots below this are subject to being sidelined on failed alloc */ 1048744d859SDennis Zhou #define PCPU_SLOT_FAIL_THRESHOLD 3 10540064aecSDennis Zhou (Facebook) 1061a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2 1071a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4 108fbf59bc9STejun Heo 109bbddff05STejun Heo #ifdef CONFIG_SMP 110e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 111e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 112e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 11343cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 11443cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 11543cf38ebSTejun Heo (unsigned long)__per_cpu_start) 116e0100983STejun Heo #endif 117e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 118e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 11943cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 12043cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 12143cf38ebSTejun Heo (unsigned long)__per_cpu_start) 122e0100983STejun Heo #endif 123bbddff05STejun Heo #else /* CONFIG_SMP */ 124bbddff05STejun Heo /* on UP, it's always identity mapped */ 125bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 126bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 127bbddff05STejun Heo #endif /* CONFIG_SMP */ 128e0100983STejun Heo 1291328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init; 1301328710bSDaniel Micay static int pcpu_unit_size __ro_after_init; 1311328710bSDaniel Micay static int pcpu_nr_units __ro_after_init; 1321328710bSDaniel Micay static int pcpu_atom_size __ro_after_init; 1338fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init; 1341328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init; 135fbf59bc9STejun Heo 136a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 1371328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init; 1381328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init; 1392f39e637STejun Heo 140fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 1411328710bSDaniel Micay void *pcpu_base_addr __ro_after_init; 142fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 143fbf59bc9STejun Heo 1441328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 1451328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 1462f39e637STejun Heo 1476563297cSTejun Heo /* group information, used for vm allocation */ 1481328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init; 1491328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init; 1501328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init; 1516563297cSTejun Heo 152ae9e6bc9STejun Heo /* 153ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 154ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 155ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 156ae9e6bc9STejun Heo */ 1578fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 158ae9e6bc9STejun Heo 159ae9e6bc9STejun Heo /* 160ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 161e2266705SDennis Zhou (Facebook) * chunk and serves it for reserved allocations. When the reserved 162e2266705SDennis Zhou (Facebook) * region doesn't exist, the following variable is NULL. 163ae9e6bc9STejun Heo */ 1648fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 165edcb4639STejun Heo 1668fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 1676710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 168fbf59bc9STejun Heo 1693c7be18aSRoman Gushchin struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ 170fbf59bc9STejun Heo 1714f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */ 1724f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks); 1734f996e23STejun Heo 174b539b87fSTejun Heo /* 175b539b87fSTejun Heo * The number of empty populated pages, protected by pcpu_lock. The 176b539b87fSTejun Heo * reserved chunk doesn't contribute to the count. 177b539b87fSTejun Heo */ 1786b9b6f39SDennis Zhou (Facebook) int pcpu_nr_empty_pop_pages; 179b539b87fSTejun Heo 1801a4d7607STejun Heo /* 1817e8a6304SDennis Zhou (Facebook) * The number of populated pages in use by the allocator, protected by 1827e8a6304SDennis Zhou (Facebook) * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets 1837e8a6304SDennis Zhou (Facebook) * allocated/deallocated, it is allocated/deallocated in all units of a chunk 1847e8a6304SDennis Zhou (Facebook) * and increments/decrements this count by 1). 1857e8a6304SDennis Zhou (Facebook) */ 1867e8a6304SDennis Zhou (Facebook) static unsigned long pcpu_nr_populated; 1877e8a6304SDennis Zhou (Facebook) 1887e8a6304SDennis Zhou (Facebook) /* 1891a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We 1901a4d7607STejun Heo * try to keep the number of populated free pages between 1911a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 1921a4d7607STejun Heo * empty chunk. 1931a4d7607STejun Heo */ 194fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 195fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 1961a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly; 1971a4d7607STejun Heo static bool pcpu_atomic_alloc_failed; 1981a4d7607STejun Heo 1991a4d7607STejun Heo static void pcpu_schedule_balance_work(void) 2001a4d7607STejun Heo { 2011a4d7607STejun Heo if (pcpu_async_enabled) 2021a4d7607STejun Heo schedule_work(&pcpu_balance_work); 2031a4d7607STejun Heo } 204a56dbddfSTejun Heo 205c0ebfdc3SDennis Zhou (Facebook) /** 206560f2c23SDennis Zhou (Facebook) * pcpu_addr_in_chunk - check if the address is served from this chunk 207560f2c23SDennis Zhou (Facebook) * @chunk: chunk of interest 208560f2c23SDennis Zhou (Facebook) * @addr: percpu address 209c0ebfdc3SDennis Zhou (Facebook) * 210c0ebfdc3SDennis Zhou (Facebook) * RETURNS: 211560f2c23SDennis Zhou (Facebook) * True if the address is served from this chunk. 212c0ebfdc3SDennis Zhou (Facebook) */ 213560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) 214020ec653STejun Heo { 215c0ebfdc3SDennis Zhou (Facebook) void *start_addr, *end_addr; 216020ec653STejun Heo 217560f2c23SDennis Zhou (Facebook) if (!chunk) 218c0ebfdc3SDennis Zhou (Facebook) return false; 219c0ebfdc3SDennis Zhou (Facebook) 220560f2c23SDennis Zhou (Facebook) start_addr = chunk->base_addr + chunk->start_offset; 221560f2c23SDennis Zhou (Facebook) end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - 222560f2c23SDennis Zhou (Facebook) chunk->end_offset; 223c0ebfdc3SDennis Zhou (Facebook) 224c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr; 225020ec653STejun Heo } 226020ec653STejun Heo 227d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 228fbf59bc9STejun Heo { 229cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 230fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 231fbf59bc9STejun Heo } 232fbf59bc9STejun Heo 233d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 234d9b55eebSTejun Heo { 235d9b55eebSTejun Heo if (size == pcpu_unit_size) 236d9b55eebSTejun Heo return pcpu_nr_slots - 1; 237d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 238d9b55eebSTejun Heo } 239d9b55eebSTejun Heo 240fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 241fbf59bc9STejun Heo { 24292c14cabSDennis Zhou const struct pcpu_block_md *chunk_md = &chunk->chunk_md; 24392c14cabSDennis Zhou 24492c14cabSDennis Zhou if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || 24592c14cabSDennis Zhou chunk_md->contig_hint == 0) 246fbf59bc9STejun Heo return 0; 247fbf59bc9STejun Heo 24892c14cabSDennis Zhou return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); 249fbf59bc9STejun Heo } 250fbf59bc9STejun Heo 25188999a89STejun Heo /* set the pointer to a chunk in a page struct */ 25288999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 25388999a89STejun Heo { 25488999a89STejun Heo page->index = (unsigned long)pcpu; 25588999a89STejun Heo } 25688999a89STejun Heo 25788999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 25888999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 25988999a89STejun Heo { 26088999a89STejun Heo return (struct pcpu_chunk *)page->index; 26188999a89STejun Heo } 26288999a89STejun Heo 26388999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 264fbf59bc9STejun Heo { 2652f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 266fbf59bc9STejun Heo } 267fbf59bc9STejun Heo 268c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 269c0ebfdc3SDennis Zhou (Facebook) { 270c0ebfdc3SDennis Zhou (Facebook) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 271c0ebfdc3SDennis Zhou (Facebook) } 272c0ebfdc3SDennis Zhou (Facebook) 2739983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 274fbf59bc9STejun Heo unsigned int cpu, int page_idx) 275fbf59bc9STejun Heo { 276c0ebfdc3SDennis Zhou (Facebook) return (unsigned long)chunk->base_addr + 277c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(cpu, page_idx); 278fbf59bc9STejun Heo } 279fbf59bc9STejun Heo 280ca460b3cSDennis Zhou (Facebook) /* 281ca460b3cSDennis Zhou (Facebook) * The following are helper functions to help access bitmaps and convert 282ca460b3cSDennis Zhou (Facebook) * between bitmap offsets to address offsets. 283ca460b3cSDennis Zhou (Facebook) */ 284ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) 285ca460b3cSDennis Zhou (Facebook) { 286ca460b3cSDennis Zhou (Facebook) return chunk->alloc_map + 287ca460b3cSDennis Zhou (Facebook) (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); 288ca460b3cSDennis Zhou (Facebook) } 289ca460b3cSDennis Zhou (Facebook) 290ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off) 291ca460b3cSDennis Zhou (Facebook) { 292ca460b3cSDennis Zhou (Facebook) return off / PCPU_BITMAP_BLOCK_BITS; 293ca460b3cSDennis Zhou (Facebook) } 294ca460b3cSDennis Zhou (Facebook) 295ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off) 296ca460b3cSDennis Zhou (Facebook) { 297ca460b3cSDennis Zhou (Facebook) return off & (PCPU_BITMAP_BLOCK_BITS - 1); 298ca460b3cSDennis Zhou (Facebook) } 299ca460b3cSDennis Zhou (Facebook) 300b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off) 301b185cd0dSDennis Zhou (Facebook) { 302b185cd0dSDennis Zhou (Facebook) return index * PCPU_BITMAP_BLOCK_BITS + off; 303b185cd0dSDennis Zhou (Facebook) } 304b185cd0dSDennis Zhou (Facebook) 305382b88e9SDennis Zhou /* 306382b88e9SDennis Zhou * pcpu_next_hint - determine which hint to use 307382b88e9SDennis Zhou * @block: block of interest 308382b88e9SDennis Zhou * @alloc_bits: size of allocation 309382b88e9SDennis Zhou * 310382b88e9SDennis Zhou * This determines if we should scan based on the scan_hint or first_free. 311382b88e9SDennis Zhou * In general, we want to scan from first_free to fulfill allocations by 312382b88e9SDennis Zhou * first fit. However, if we know a scan_hint at position scan_hint_start 313382b88e9SDennis Zhou * cannot fulfill an allocation, we can begin scanning from there knowing 314382b88e9SDennis Zhou * the contig_hint will be our fallback. 315382b88e9SDennis Zhou */ 316382b88e9SDennis Zhou static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) 317382b88e9SDennis Zhou { 318382b88e9SDennis Zhou /* 319382b88e9SDennis Zhou * The three conditions below determine if we can skip past the 320382b88e9SDennis Zhou * scan_hint. First, does the scan hint exist. Second, is the 321382b88e9SDennis Zhou * contig_hint after the scan_hint (possibly not true iff 322382b88e9SDennis Zhou * contig_hint == scan_hint). Third, is the allocation request 323382b88e9SDennis Zhou * larger than the scan_hint. 324382b88e9SDennis Zhou */ 325382b88e9SDennis Zhou if (block->scan_hint && 326382b88e9SDennis Zhou block->contig_hint_start > block->scan_hint_start && 327382b88e9SDennis Zhou alloc_bits > block->scan_hint) 328382b88e9SDennis Zhou return block->scan_hint_start + block->scan_hint; 329382b88e9SDennis Zhou 330382b88e9SDennis Zhou return block->first_free; 331382b88e9SDennis Zhou } 332382b88e9SDennis Zhou 333fbf59bc9STejun Heo /** 334525ca84dSDennis Zhou (Facebook) * pcpu_next_md_free_region - finds the next hint free area 335525ca84dSDennis Zhou (Facebook) * @chunk: chunk of interest 336525ca84dSDennis Zhou (Facebook) * @bit_off: chunk offset 337525ca84dSDennis Zhou (Facebook) * @bits: size of free area 338525ca84dSDennis Zhou (Facebook) * 339525ca84dSDennis Zhou (Facebook) * Helper function for pcpu_for_each_md_free_region. It checks 340525ca84dSDennis Zhou (Facebook) * block->contig_hint and performs aggregation across blocks to find the 341525ca84dSDennis Zhou (Facebook) * next hint. It modifies bit_off and bits in-place to be consumed in the 342525ca84dSDennis Zhou (Facebook) * loop. 343525ca84dSDennis Zhou (Facebook) */ 344525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, 345525ca84dSDennis Zhou (Facebook) int *bits) 346525ca84dSDennis Zhou (Facebook) { 347525ca84dSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 348525ca84dSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 349525ca84dSDennis Zhou (Facebook) struct pcpu_block_md *block; 350525ca84dSDennis Zhou (Facebook) 351525ca84dSDennis Zhou (Facebook) *bits = 0; 352525ca84dSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 353525ca84dSDennis Zhou (Facebook) block++, i++) { 354525ca84dSDennis Zhou (Facebook) /* handles contig area across blocks */ 355525ca84dSDennis Zhou (Facebook) if (*bits) { 356525ca84dSDennis Zhou (Facebook) *bits += block->left_free; 357525ca84dSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 358525ca84dSDennis Zhou (Facebook) continue; 359525ca84dSDennis Zhou (Facebook) return; 360525ca84dSDennis Zhou (Facebook) } 361525ca84dSDennis Zhou (Facebook) 362525ca84dSDennis Zhou (Facebook) /* 363525ca84dSDennis Zhou (Facebook) * This checks three things. First is there a contig_hint to 364525ca84dSDennis Zhou (Facebook) * check. Second, have we checked this hint before by 365525ca84dSDennis Zhou (Facebook) * comparing the block_off. Third, is this the same as the 366525ca84dSDennis Zhou (Facebook) * right contig hint. In the last case, it spills over into 367525ca84dSDennis Zhou (Facebook) * the next block and should be handled by the contig area 368525ca84dSDennis Zhou (Facebook) * across blocks code. 369525ca84dSDennis Zhou (Facebook) */ 370525ca84dSDennis Zhou (Facebook) *bits = block->contig_hint; 371525ca84dSDennis Zhou (Facebook) if (*bits && block->contig_hint_start >= block_off && 372525ca84dSDennis Zhou (Facebook) *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { 373525ca84dSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, 374525ca84dSDennis Zhou (Facebook) block->contig_hint_start); 375525ca84dSDennis Zhou (Facebook) return; 376525ca84dSDennis Zhou (Facebook) } 3771fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 3781fa4df3eSDennis Zhou block_off = 0; 379525ca84dSDennis Zhou (Facebook) 380525ca84dSDennis Zhou (Facebook) *bits = block->right_free; 381525ca84dSDennis Zhou (Facebook) *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 382525ca84dSDennis Zhou (Facebook) } 383525ca84dSDennis Zhou (Facebook) } 384525ca84dSDennis Zhou (Facebook) 385b4c2116cSDennis Zhou (Facebook) /** 386b4c2116cSDennis Zhou (Facebook) * pcpu_next_fit_region - finds fit areas for a given allocation request 387b4c2116cSDennis Zhou (Facebook) * @chunk: chunk of interest 388b4c2116cSDennis Zhou (Facebook) * @alloc_bits: size of allocation 389b4c2116cSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 390b4c2116cSDennis Zhou (Facebook) * @bit_off: chunk offset 391b4c2116cSDennis Zhou (Facebook) * @bits: size of free area 392b4c2116cSDennis Zhou (Facebook) * 393b4c2116cSDennis Zhou (Facebook) * Finds the next free region that is viable for use with a given size and 394b4c2116cSDennis Zhou (Facebook) * alignment. This only returns if there is a valid area to be used for this 395b4c2116cSDennis Zhou (Facebook) * allocation. block->first_free is returned if the allocation request fits 396b4c2116cSDennis Zhou (Facebook) * within the block to see if the request can be fulfilled prior to the contig 397b4c2116cSDennis Zhou (Facebook) * hint. 398b4c2116cSDennis Zhou (Facebook) */ 399b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, 400b4c2116cSDennis Zhou (Facebook) int align, int *bit_off, int *bits) 401b4c2116cSDennis Zhou (Facebook) { 402b4c2116cSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 403b4c2116cSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 404b4c2116cSDennis Zhou (Facebook) struct pcpu_block_md *block; 405b4c2116cSDennis Zhou (Facebook) 406b4c2116cSDennis Zhou (Facebook) *bits = 0; 407b4c2116cSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 408b4c2116cSDennis Zhou (Facebook) block++, i++) { 409b4c2116cSDennis Zhou (Facebook) /* handles contig area across blocks */ 410b4c2116cSDennis Zhou (Facebook) if (*bits) { 411b4c2116cSDennis Zhou (Facebook) *bits += block->left_free; 412b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 413b4c2116cSDennis Zhou (Facebook) return; 414b4c2116cSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 415b4c2116cSDennis Zhou (Facebook) continue; 416b4c2116cSDennis Zhou (Facebook) } 417b4c2116cSDennis Zhou (Facebook) 418b4c2116cSDennis Zhou (Facebook) /* check block->contig_hint */ 419b4c2116cSDennis Zhou (Facebook) *bits = ALIGN(block->contig_hint_start, align) - 420b4c2116cSDennis Zhou (Facebook) block->contig_hint_start; 421b4c2116cSDennis Zhou (Facebook) /* 422b4c2116cSDennis Zhou (Facebook) * This uses the block offset to determine if this has been 423b4c2116cSDennis Zhou (Facebook) * checked in the prior iteration. 424b4c2116cSDennis Zhou (Facebook) */ 425b4c2116cSDennis Zhou (Facebook) if (block->contig_hint && 426b4c2116cSDennis Zhou (Facebook) block->contig_hint_start >= block_off && 427b4c2116cSDennis Zhou (Facebook) block->contig_hint >= *bits + alloc_bits) { 428382b88e9SDennis Zhou int start = pcpu_next_hint(block, alloc_bits); 429382b88e9SDennis Zhou 430b4c2116cSDennis Zhou (Facebook) *bits += alloc_bits + block->contig_hint_start - 431382b88e9SDennis Zhou start; 432382b88e9SDennis Zhou *bit_off = pcpu_block_off_to_off(i, start); 433b4c2116cSDennis Zhou (Facebook) return; 434b4c2116cSDennis Zhou (Facebook) } 4351fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 4361fa4df3eSDennis Zhou block_off = 0; 437b4c2116cSDennis Zhou (Facebook) 438b4c2116cSDennis Zhou (Facebook) *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 439b4c2116cSDennis Zhou (Facebook) align); 440b4c2116cSDennis Zhou (Facebook) *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; 441b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, *bit_off); 442b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 443b4c2116cSDennis Zhou (Facebook) return; 444b4c2116cSDennis Zhou (Facebook) } 445b4c2116cSDennis Zhou (Facebook) 446b4c2116cSDennis Zhou (Facebook) /* no valid offsets were found - fail condition */ 447b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_chunk_map_bits(chunk); 448b4c2116cSDennis Zhou (Facebook) } 449b4c2116cSDennis Zhou (Facebook) 450525ca84dSDennis Zhou (Facebook) /* 451525ca84dSDennis Zhou (Facebook) * Metadata free area iterators. These perform aggregation of free areas 452525ca84dSDennis Zhou (Facebook) * based on the metadata blocks and return the offset @bit_off and size in 453b4c2116cSDennis Zhou (Facebook) * bits of the free area @bits. pcpu_for_each_fit_region only returns when 454b4c2116cSDennis Zhou (Facebook) * a fit is found for the allocation request. 455525ca84dSDennis Zhou (Facebook) */ 456525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ 457525ca84dSDennis Zhou (Facebook) for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ 458525ca84dSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 459525ca84dSDennis Zhou (Facebook) (bit_off) += (bits) + 1, \ 460525ca84dSDennis Zhou (Facebook) pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) 461525ca84dSDennis Zhou (Facebook) 462b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ 463b4c2116cSDennis Zhou (Facebook) for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 464b4c2116cSDennis Zhou (Facebook) &(bits)); \ 465b4c2116cSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 466b4c2116cSDennis Zhou (Facebook) (bit_off) += (bits), \ 467b4c2116cSDennis Zhou (Facebook) pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 468b4c2116cSDennis Zhou (Facebook) &(bits))) 469b4c2116cSDennis Zhou (Facebook) 470525ca84dSDennis Zhou (Facebook) /** 47190459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 4721880d93bSTejun Heo * @size: bytes to allocate 47347504ee0SDennis Zhou * @gfp: allocation flags 474fbf59bc9STejun Heo * 4751880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 47647504ee0SDennis Zhou * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. 47747504ee0SDennis Zhou * This is to facilitate passing through whitelisted flags. The 47847504ee0SDennis Zhou * returned memory is always zeroed. 479fbf59bc9STejun Heo * 480fbf59bc9STejun Heo * RETURNS: 4811880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 482fbf59bc9STejun Heo */ 48347504ee0SDennis Zhou static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) 484fbf59bc9STejun Heo { 485099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 486099a19d9STejun Heo return NULL; 487099a19d9STejun Heo 488fbf59bc9STejun Heo if (size <= PAGE_SIZE) 489554fef1cSDennis Zhou return kzalloc(size, gfp); 4907af4c093SJesper Juhl else 49188dca4caSChristoph Hellwig return __vmalloc(size, gfp | __GFP_ZERO); 4921880d93bSTejun Heo } 493fbf59bc9STejun Heo 4941880d93bSTejun Heo /** 4951880d93bSTejun Heo * pcpu_mem_free - free memory 4961880d93bSTejun Heo * @ptr: memory to free 4971880d93bSTejun Heo * 49890459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 4991880d93bSTejun Heo */ 5001d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr) 5011880d93bSTejun Heo { 5021d5cfdb0STetsuo Handa kvfree(ptr); 503fbf59bc9STejun Heo } 504fbf59bc9STejun Heo 5058744d859SDennis Zhou static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, 5068744d859SDennis Zhou bool move_front) 5078744d859SDennis Zhou { 5088744d859SDennis Zhou if (chunk != pcpu_reserved_chunk) { 5093c7be18aSRoman Gushchin struct list_head *pcpu_slot; 5103c7be18aSRoman Gushchin 5113c7be18aSRoman Gushchin pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk)); 5128744d859SDennis Zhou if (move_front) 5138744d859SDennis Zhou list_move(&chunk->list, &pcpu_slot[slot]); 5148744d859SDennis Zhou else 5158744d859SDennis Zhou list_move_tail(&chunk->list, &pcpu_slot[slot]); 5168744d859SDennis Zhou } 5178744d859SDennis Zhou } 5188744d859SDennis Zhou 5198744d859SDennis Zhou static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) 5208744d859SDennis Zhou { 5218744d859SDennis Zhou __pcpu_chunk_move(chunk, slot, true); 5228744d859SDennis Zhou } 5238744d859SDennis Zhou 524fbf59bc9STejun Heo /** 525fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 526fbf59bc9STejun Heo * @chunk: chunk of interest 527fbf59bc9STejun Heo * @oslot: the previous slot it was on 528fbf59bc9STejun Heo * 529fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 530fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 531edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 532edcb4639STejun Heo * chunk slots. 533ccea34b5STejun Heo * 534ccea34b5STejun Heo * CONTEXT: 535ccea34b5STejun Heo * pcpu_lock. 536fbf59bc9STejun Heo */ 537fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 538fbf59bc9STejun Heo { 539fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 540fbf59bc9STejun Heo 5418744d859SDennis Zhou if (oslot != nslot) 5428744d859SDennis Zhou __pcpu_chunk_move(chunk, nslot, oslot < nslot); 54340064aecSDennis Zhou (Facebook) } 54440064aecSDennis Zhou (Facebook) 54540064aecSDennis Zhou (Facebook) /* 546b239f7daSDennis Zhou * pcpu_update_empty_pages - update empty page counters 547b239f7daSDennis Zhou * @chunk: chunk of interest 548b239f7daSDennis Zhou * @nr: nr of empty pages 54940064aecSDennis Zhou (Facebook) * 550b239f7daSDennis Zhou * This is used to keep track of the empty pages now based on the premise 551b239f7daSDennis Zhou * a md_block covers a page. The hint update functions recognize if a block 552b239f7daSDennis Zhou * is made full or broken to calculate deltas for keeping track of free pages. 55340064aecSDennis Zhou (Facebook) */ 554b239f7daSDennis Zhou static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) 555b239f7daSDennis Zhou { 556b239f7daSDennis Zhou chunk->nr_empty_pop_pages += nr; 55740064aecSDennis Zhou (Facebook) if (chunk != pcpu_reserved_chunk) 558b239f7daSDennis Zhou pcpu_nr_empty_pop_pages += nr; 55940064aecSDennis Zhou (Facebook) } 56040064aecSDennis Zhou (Facebook) 561d9f3a01eSDennis Zhou /* 562d9f3a01eSDennis Zhou * pcpu_region_overlap - determines if two regions overlap 563d9f3a01eSDennis Zhou * @a: start of first region, inclusive 564d9f3a01eSDennis Zhou * @b: end of first region, exclusive 565d9f3a01eSDennis Zhou * @x: start of second region, inclusive 566d9f3a01eSDennis Zhou * @y: end of second region, exclusive 567d9f3a01eSDennis Zhou * 568d9f3a01eSDennis Zhou * This is used to determine if the hint region [a, b) overlaps with the 569d9f3a01eSDennis Zhou * allocated region [x, y). 570d9f3a01eSDennis Zhou */ 571d9f3a01eSDennis Zhou static inline bool pcpu_region_overlap(int a, int b, int x, int y) 572d9f3a01eSDennis Zhou { 573d9f3a01eSDennis Zhou return (a < y) && (x < b); 57440064aecSDennis Zhou (Facebook) } 57540064aecSDennis Zhou (Facebook) 57640064aecSDennis Zhou (Facebook) /** 577ca460b3cSDennis Zhou (Facebook) * pcpu_block_update - updates a block given a free area 578ca460b3cSDennis Zhou (Facebook) * @block: block of interest 579ca460b3cSDennis Zhou (Facebook) * @start: start offset in block 580ca460b3cSDennis Zhou (Facebook) * @end: end offset in block 581ca460b3cSDennis Zhou (Facebook) * 582ca460b3cSDennis Zhou (Facebook) * Updates a block given a known free area. The region [start, end) is 583268625a6SDennis Zhou (Facebook) * expected to be the entirety of the free area within a block. Chooses 584268625a6SDennis Zhou (Facebook) * the best starting offset if the contig hints are equal. 585ca460b3cSDennis Zhou (Facebook) */ 586ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) 587ca460b3cSDennis Zhou (Facebook) { 588ca460b3cSDennis Zhou (Facebook) int contig = end - start; 589ca460b3cSDennis Zhou (Facebook) 590ca460b3cSDennis Zhou (Facebook) block->first_free = min(block->first_free, start); 591ca460b3cSDennis Zhou (Facebook) if (start == 0) 592ca460b3cSDennis Zhou (Facebook) block->left_free = contig; 593ca460b3cSDennis Zhou (Facebook) 594047924c9SDennis Zhou if (end == block->nr_bits) 595ca460b3cSDennis Zhou (Facebook) block->right_free = contig; 596ca460b3cSDennis Zhou (Facebook) 597ca460b3cSDennis Zhou (Facebook) if (contig > block->contig_hint) { 598382b88e9SDennis Zhou /* promote the old contig_hint to be the new scan_hint */ 599382b88e9SDennis Zhou if (start > block->contig_hint_start) { 600382b88e9SDennis Zhou if (block->contig_hint > block->scan_hint) { 601382b88e9SDennis Zhou block->scan_hint_start = 602382b88e9SDennis Zhou block->contig_hint_start; 603382b88e9SDennis Zhou block->scan_hint = block->contig_hint; 604382b88e9SDennis Zhou } else if (start < block->scan_hint_start) { 605382b88e9SDennis Zhou /* 606382b88e9SDennis Zhou * The old contig_hint == scan_hint. But, the 607382b88e9SDennis Zhou * new contig is larger so hold the invariant 608382b88e9SDennis Zhou * scan_hint_start < contig_hint_start. 609382b88e9SDennis Zhou */ 610382b88e9SDennis Zhou block->scan_hint = 0; 611382b88e9SDennis Zhou } 612382b88e9SDennis Zhou } else { 613382b88e9SDennis Zhou block->scan_hint = 0; 614382b88e9SDennis Zhou } 615ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = start; 616ca460b3cSDennis Zhou (Facebook) block->contig_hint = contig; 617382b88e9SDennis Zhou } else if (contig == block->contig_hint) { 618382b88e9SDennis Zhou if (block->contig_hint_start && 619382b88e9SDennis Zhou (!start || 620382b88e9SDennis Zhou __ffs(start) > __ffs(block->contig_hint_start))) { 621382b88e9SDennis Zhou /* start has a better alignment so use it */ 622268625a6SDennis Zhou (Facebook) block->contig_hint_start = start; 623382b88e9SDennis Zhou if (start < block->scan_hint_start && 624382b88e9SDennis Zhou block->contig_hint > block->scan_hint) 625382b88e9SDennis Zhou block->scan_hint = 0; 626382b88e9SDennis Zhou } else if (start > block->scan_hint_start || 627382b88e9SDennis Zhou block->contig_hint > block->scan_hint) { 628382b88e9SDennis Zhou /* 629382b88e9SDennis Zhou * Knowing contig == contig_hint, update the scan_hint 630382b88e9SDennis Zhou * if it is farther than or larger than the current 631382b88e9SDennis Zhou * scan_hint. 632382b88e9SDennis Zhou */ 633382b88e9SDennis Zhou block->scan_hint_start = start; 634382b88e9SDennis Zhou block->scan_hint = contig; 635382b88e9SDennis Zhou } 636382b88e9SDennis Zhou } else { 637382b88e9SDennis Zhou /* 638382b88e9SDennis Zhou * The region is smaller than the contig_hint. So only update 639382b88e9SDennis Zhou * the scan_hint if it is larger than or equal and farther than 640382b88e9SDennis Zhou * the current scan_hint. 641382b88e9SDennis Zhou */ 642382b88e9SDennis Zhou if ((start < block->contig_hint_start && 643382b88e9SDennis Zhou (contig > block->scan_hint || 644382b88e9SDennis Zhou (contig == block->scan_hint && 645382b88e9SDennis Zhou start > block->scan_hint_start)))) { 646382b88e9SDennis Zhou block->scan_hint_start = start; 647382b88e9SDennis Zhou block->scan_hint = contig; 648382b88e9SDennis Zhou } 649ca460b3cSDennis Zhou (Facebook) } 650ca460b3cSDennis Zhou (Facebook) } 651ca460b3cSDennis Zhou (Facebook) 652b89462a9SDennis Zhou /* 653b89462a9SDennis Zhou * pcpu_block_update_scan - update a block given a free area from a scan 654b89462a9SDennis Zhou * @chunk: chunk of interest 655b89462a9SDennis Zhou * @bit_off: chunk offset 656b89462a9SDennis Zhou * @bits: size of free area 657b89462a9SDennis Zhou * 658b89462a9SDennis Zhou * Finding the final allocation spot first goes through pcpu_find_block_fit() 659b89462a9SDennis Zhou * to find a block that can hold the allocation and then pcpu_alloc_area() 660b89462a9SDennis Zhou * where a scan is used. When allocations require specific alignments, 661b89462a9SDennis Zhou * we can inadvertently create holes which will not be seen in the alloc 662b89462a9SDennis Zhou * or free paths. 663b89462a9SDennis Zhou * 664b89462a9SDennis Zhou * This takes a given free area hole and updates a block as it may change the 665b89462a9SDennis Zhou * scan_hint. We need to scan backwards to ensure we don't miss free bits 666b89462a9SDennis Zhou * from alignment. 667b89462a9SDennis Zhou */ 668b89462a9SDennis Zhou static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, 669b89462a9SDennis Zhou int bits) 670b89462a9SDennis Zhou { 671b89462a9SDennis Zhou int s_off = pcpu_off_to_block_off(bit_off); 672b89462a9SDennis Zhou int e_off = s_off + bits; 673b89462a9SDennis Zhou int s_index, l_bit; 674b89462a9SDennis Zhou struct pcpu_block_md *block; 675b89462a9SDennis Zhou 676b89462a9SDennis Zhou if (e_off > PCPU_BITMAP_BLOCK_BITS) 677b89462a9SDennis Zhou return; 678b89462a9SDennis Zhou 679b89462a9SDennis Zhou s_index = pcpu_off_to_block_index(bit_off); 680b89462a9SDennis Zhou block = chunk->md_blocks + s_index; 681b89462a9SDennis Zhou 682b89462a9SDennis Zhou /* scan backwards in case of alignment skipping free bits */ 683b89462a9SDennis Zhou l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); 684b89462a9SDennis Zhou s_off = (s_off == l_bit) ? 0 : l_bit + 1; 685b89462a9SDennis Zhou 686b89462a9SDennis Zhou pcpu_block_update(block, s_off, e_off); 687b89462a9SDennis Zhou } 688b89462a9SDennis Zhou 689ca460b3cSDennis Zhou (Facebook) /** 69092c14cabSDennis Zhou * pcpu_chunk_refresh_hint - updates metadata about a chunk 69192c14cabSDennis Zhou * @chunk: chunk of interest 692d33d9f3dSDennis Zhou * @full_scan: if we should scan from the beginning 69392c14cabSDennis Zhou * 69492c14cabSDennis Zhou * Iterates over the metadata blocks to find the largest contig area. 695d33d9f3dSDennis Zhou * A full scan can be avoided on the allocation path as this is triggered 696d33d9f3dSDennis Zhou * if we broke the contig_hint. In doing so, the scan_hint will be before 697d33d9f3dSDennis Zhou * the contig_hint or after if the scan_hint == contig_hint. This cannot 698d33d9f3dSDennis Zhou * be prevented on freeing as we want to find the largest area possibly 699d33d9f3dSDennis Zhou * spanning blocks. 70092c14cabSDennis Zhou */ 701d33d9f3dSDennis Zhou static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) 70292c14cabSDennis Zhou { 70392c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 70492c14cabSDennis Zhou int bit_off, bits; 70592c14cabSDennis Zhou 706d33d9f3dSDennis Zhou /* promote scan_hint to contig_hint */ 707d33d9f3dSDennis Zhou if (!full_scan && chunk_md->scan_hint) { 708d33d9f3dSDennis Zhou bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; 709d33d9f3dSDennis Zhou chunk_md->contig_hint_start = chunk_md->scan_hint_start; 710d33d9f3dSDennis Zhou chunk_md->contig_hint = chunk_md->scan_hint; 711d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 712d33d9f3dSDennis Zhou } else { 71392c14cabSDennis Zhou bit_off = chunk_md->first_free; 714d33d9f3dSDennis Zhou chunk_md->contig_hint = 0; 715d33d9f3dSDennis Zhou } 716d33d9f3dSDennis Zhou 71792c14cabSDennis Zhou bits = 0; 718e837dfdeSDennis Zhou pcpu_for_each_md_free_region(chunk, bit_off, bits) 71992c14cabSDennis Zhou pcpu_block_update(chunk_md, bit_off, bit_off + bits); 720ca460b3cSDennis Zhou (Facebook) } 721ca460b3cSDennis Zhou (Facebook) 722ca460b3cSDennis Zhou (Facebook) /** 723ca460b3cSDennis Zhou (Facebook) * pcpu_block_refresh_hint 724ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 725ca460b3cSDennis Zhou (Facebook) * @index: index of the metadata block 726ca460b3cSDennis Zhou (Facebook) * 727ca460b3cSDennis Zhou (Facebook) * Scans over the block beginning at first_free and updates the block 728ca460b3cSDennis Zhou (Facebook) * metadata accordingly. 729ca460b3cSDennis Zhou (Facebook) */ 730ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) 731ca460b3cSDennis Zhou (Facebook) { 732ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *block = chunk->md_blocks + index; 733ca460b3cSDennis Zhou (Facebook) unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 734e837dfdeSDennis Zhou unsigned int rs, re, start; /* region start, region end */ 735ca460b3cSDennis Zhou (Facebook) 736da3afdd5SDennis Zhou /* promote scan_hint to contig_hint */ 737da3afdd5SDennis Zhou if (block->scan_hint) { 738da3afdd5SDennis Zhou start = block->scan_hint_start + block->scan_hint; 739da3afdd5SDennis Zhou block->contig_hint_start = block->scan_hint_start; 740da3afdd5SDennis Zhou block->contig_hint = block->scan_hint; 741da3afdd5SDennis Zhou block->scan_hint = 0; 742da3afdd5SDennis Zhou } else { 743da3afdd5SDennis Zhou start = block->first_free; 744ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 745da3afdd5SDennis Zhou } 746da3afdd5SDennis Zhou 747da3afdd5SDennis Zhou block->right_free = 0; 748ca460b3cSDennis Zhou (Facebook) 749ca460b3cSDennis Zhou (Facebook) /* iterate over free areas and update the contig hints */ 750e837dfdeSDennis Zhou bitmap_for_each_clear_region(alloc_map, rs, re, start, 751e837dfdeSDennis Zhou PCPU_BITMAP_BLOCK_BITS) 752ca460b3cSDennis Zhou (Facebook) pcpu_block_update(block, rs, re); 753ca460b3cSDennis Zhou (Facebook) } 754ca460b3cSDennis Zhou (Facebook) 755ca460b3cSDennis Zhou (Facebook) /** 756ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_alloc - update hint on allocation path 757ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 758ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 759ca460b3cSDennis Zhou (Facebook) * @bits: size of request 760fc304334SDennis Zhou (Facebook) * 761fc304334SDennis Zhou (Facebook) * Updates metadata for the allocation path. The metadata only has to be 762fc304334SDennis Zhou (Facebook) * refreshed by a full scan iff the chunk's contig hint is broken. Block level 763fc304334SDennis Zhou (Facebook) * scans are required if the block's contig hint is broken. 764ca460b3cSDennis Zhou (Facebook) */ 765ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, 766ca460b3cSDennis Zhou (Facebook) int bits) 767ca460b3cSDennis Zhou (Facebook) { 76892c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 769b239f7daSDennis Zhou int nr_empty_pages = 0; 770ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 771ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 772ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 773ca460b3cSDennis Zhou (Facebook) 774ca460b3cSDennis Zhou (Facebook) /* 775ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 776ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 777ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 778ca460b3cSDennis Zhou (Facebook) * range. 779ca460b3cSDennis Zhou (Facebook) */ 780ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 781ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 782ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 783ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 784ca460b3cSDennis Zhou (Facebook) 785ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 786ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 787ca460b3cSDennis Zhou (Facebook) 788ca460b3cSDennis Zhou (Facebook) /* 789ca460b3cSDennis Zhou (Facebook) * Update s_block. 790fc304334SDennis Zhou (Facebook) * block->first_free must be updated if the allocation takes its place. 791fc304334SDennis Zhou (Facebook) * If the allocation breaks the contig_hint, a scan is required to 792fc304334SDennis Zhou (Facebook) * restore this hint. 793ca460b3cSDennis Zhou (Facebook) */ 794b239f7daSDennis Zhou if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 795b239f7daSDennis Zhou nr_empty_pages++; 796b239f7daSDennis Zhou 797fc304334SDennis Zhou (Facebook) if (s_off == s_block->first_free) 798fc304334SDennis Zhou (Facebook) s_block->first_free = find_next_zero_bit( 799fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, s_index), 800fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, 801fc304334SDennis Zhou (Facebook) s_off + bits); 802fc304334SDennis Zhou (Facebook) 803382b88e9SDennis Zhou if (pcpu_region_overlap(s_block->scan_hint_start, 804382b88e9SDennis Zhou s_block->scan_hint_start + s_block->scan_hint, 805382b88e9SDennis Zhou s_off, 806382b88e9SDennis Zhou s_off + bits)) 807382b88e9SDennis Zhou s_block->scan_hint = 0; 808382b88e9SDennis Zhou 809d9f3a01eSDennis Zhou if (pcpu_region_overlap(s_block->contig_hint_start, 810d9f3a01eSDennis Zhou s_block->contig_hint_start + 811d9f3a01eSDennis Zhou s_block->contig_hint, 812d9f3a01eSDennis Zhou s_off, 813d9f3a01eSDennis Zhou s_off + bits)) { 814fc304334SDennis Zhou (Facebook) /* block contig hint is broken - scan to fix it */ 815da3afdd5SDennis Zhou if (!s_off) 816da3afdd5SDennis Zhou s_block->left_free = 0; 817ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, s_index); 818fc304334SDennis Zhou (Facebook) } else { 819fc304334SDennis Zhou (Facebook) /* update left and right contig manually */ 820fc304334SDennis Zhou (Facebook) s_block->left_free = min(s_block->left_free, s_off); 821fc304334SDennis Zhou (Facebook) if (s_index == e_index) 822fc304334SDennis Zhou (Facebook) s_block->right_free = min_t(int, s_block->right_free, 823fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 824fc304334SDennis Zhou (Facebook) else 825fc304334SDennis Zhou (Facebook) s_block->right_free = 0; 826fc304334SDennis Zhou (Facebook) } 827ca460b3cSDennis Zhou (Facebook) 828ca460b3cSDennis Zhou (Facebook) /* 829ca460b3cSDennis Zhou (Facebook) * Update e_block. 830ca460b3cSDennis Zhou (Facebook) */ 831ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 832b239f7daSDennis Zhou if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 833b239f7daSDennis Zhou nr_empty_pages++; 834b239f7daSDennis Zhou 835fc304334SDennis Zhou (Facebook) /* 836fc304334SDennis Zhou (Facebook) * When the allocation is across blocks, the end is along 837fc304334SDennis Zhou (Facebook) * the left part of the e_block. 838fc304334SDennis Zhou (Facebook) */ 839fc304334SDennis Zhou (Facebook) e_block->first_free = find_next_zero_bit( 840fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, e_index), 841fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, e_off); 842fc304334SDennis Zhou (Facebook) 843fc304334SDennis Zhou (Facebook) if (e_off == PCPU_BITMAP_BLOCK_BITS) { 844fc304334SDennis Zhou (Facebook) /* reset the block */ 845fc304334SDennis Zhou (Facebook) e_block++; 846fc304334SDennis Zhou (Facebook) } else { 847382b88e9SDennis Zhou if (e_off > e_block->scan_hint_start) 848382b88e9SDennis Zhou e_block->scan_hint = 0; 849382b88e9SDennis Zhou 850da3afdd5SDennis Zhou e_block->left_free = 0; 851fc304334SDennis Zhou (Facebook) if (e_off > e_block->contig_hint_start) { 852fc304334SDennis Zhou (Facebook) /* contig hint is broken - scan to fix it */ 853ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, e_index); 854fc304334SDennis Zhou (Facebook) } else { 855fc304334SDennis Zhou (Facebook) e_block->right_free = 856fc304334SDennis Zhou (Facebook) min_t(int, e_block->right_free, 857fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 858fc304334SDennis Zhou (Facebook) } 859fc304334SDennis Zhou (Facebook) } 860ca460b3cSDennis Zhou (Facebook) 861ca460b3cSDennis Zhou (Facebook) /* update in-between md_blocks */ 862b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 863ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 864382b88e9SDennis Zhou block->scan_hint = 0; 865ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 866ca460b3cSDennis Zhou (Facebook) block->left_free = 0; 867ca460b3cSDennis Zhou (Facebook) block->right_free = 0; 868ca460b3cSDennis Zhou (Facebook) } 869ca460b3cSDennis Zhou (Facebook) } 870ca460b3cSDennis Zhou (Facebook) 871b239f7daSDennis Zhou if (nr_empty_pages) 872b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr_empty_pages); 873b239f7daSDennis Zhou 874d33d9f3dSDennis Zhou if (pcpu_region_overlap(chunk_md->scan_hint_start, 875d33d9f3dSDennis Zhou chunk_md->scan_hint_start + 876d33d9f3dSDennis Zhou chunk_md->scan_hint, 877d33d9f3dSDennis Zhou bit_off, 878d33d9f3dSDennis Zhou bit_off + bits)) 879d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 880d33d9f3dSDennis Zhou 881fc304334SDennis Zhou (Facebook) /* 882fc304334SDennis Zhou (Facebook) * The only time a full chunk scan is required is if the chunk 883fc304334SDennis Zhou (Facebook) * contig hint is broken. Otherwise, it means a smaller space 884fc304334SDennis Zhou (Facebook) * was used and therefore the chunk contig hint is still correct. 885fc304334SDennis Zhou (Facebook) */ 88692c14cabSDennis Zhou if (pcpu_region_overlap(chunk_md->contig_hint_start, 88792c14cabSDennis Zhou chunk_md->contig_hint_start + 88892c14cabSDennis Zhou chunk_md->contig_hint, 889d9f3a01eSDennis Zhou bit_off, 890d9f3a01eSDennis Zhou bit_off + bits)) 891d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, false); 892ca460b3cSDennis Zhou (Facebook) } 893ca460b3cSDennis Zhou (Facebook) 894ca460b3cSDennis Zhou (Facebook) /** 895ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_free - updates the block hints on the free path 896ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 897ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 898ca460b3cSDennis Zhou (Facebook) * @bits: size of request 899b185cd0dSDennis Zhou (Facebook) * 900b185cd0dSDennis Zhou (Facebook) * Updates metadata for the allocation path. This avoids a blind block 901b185cd0dSDennis Zhou (Facebook) * refresh by making use of the block contig hints. If this fails, it scans 902b185cd0dSDennis Zhou (Facebook) * forward and backward to determine the extent of the free area. This is 903b185cd0dSDennis Zhou (Facebook) * capped at the boundary of blocks. 904b185cd0dSDennis Zhou (Facebook) * 905b185cd0dSDennis Zhou (Facebook) * A chunk update is triggered if a page becomes free, a block becomes free, 906b185cd0dSDennis Zhou (Facebook) * or the free spans across blocks. This tradeoff is to minimize iterating 90792c14cabSDennis Zhou * over the block metadata to update chunk_md->contig_hint. 90892c14cabSDennis Zhou * chunk_md->contig_hint may be off by up to a page, but it will never be more 90992c14cabSDennis Zhou * than the available space. If the contig hint is contained in one block, it 91092c14cabSDennis Zhou * will be accurate. 911ca460b3cSDennis Zhou (Facebook) */ 912ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, 913ca460b3cSDennis Zhou (Facebook) int bits) 914ca460b3cSDennis Zhou (Facebook) { 915b239f7daSDennis Zhou int nr_empty_pages = 0; 916ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 917ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 918ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 919b185cd0dSDennis Zhou (Facebook) int start, end; /* start and end of the whole free area */ 920ca460b3cSDennis Zhou (Facebook) 921ca460b3cSDennis Zhou (Facebook) /* 922ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 923ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 924ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 925ca460b3cSDennis Zhou (Facebook) * range. 926ca460b3cSDennis Zhou (Facebook) */ 927ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 928ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 929ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 930ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 931ca460b3cSDennis Zhou (Facebook) 932ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 933ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 934ca460b3cSDennis Zhou (Facebook) 935b185cd0dSDennis Zhou (Facebook) /* 936b185cd0dSDennis Zhou (Facebook) * Check if the freed area aligns with the block->contig_hint. 937b185cd0dSDennis Zhou (Facebook) * If it does, then the scan to find the beginning/end of the 938b185cd0dSDennis Zhou (Facebook) * larger free area can be avoided. 939b185cd0dSDennis Zhou (Facebook) * 940b185cd0dSDennis Zhou (Facebook) * start and end refer to beginning and end of the free area 941b185cd0dSDennis Zhou (Facebook) * within each their respective blocks. This is not necessarily 942b185cd0dSDennis Zhou (Facebook) * the entire free area as it may span blocks past the beginning 943b185cd0dSDennis Zhou (Facebook) * or end of the block. 944b185cd0dSDennis Zhou (Facebook) */ 945b185cd0dSDennis Zhou (Facebook) start = s_off; 946b185cd0dSDennis Zhou (Facebook) if (s_off == s_block->contig_hint + s_block->contig_hint_start) { 947b185cd0dSDennis Zhou (Facebook) start = s_block->contig_hint_start; 948b185cd0dSDennis Zhou (Facebook) } else { 949b185cd0dSDennis Zhou (Facebook) /* 950b185cd0dSDennis Zhou (Facebook) * Scan backwards to find the extent of the free area. 951b185cd0dSDennis Zhou (Facebook) * find_last_bit returns the starting bit, so if the start bit 952b185cd0dSDennis Zhou (Facebook) * is returned, that means there was no last bit and the 953b185cd0dSDennis Zhou (Facebook) * remainder of the chunk is free. 954b185cd0dSDennis Zhou (Facebook) */ 955b185cd0dSDennis Zhou (Facebook) int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), 956b185cd0dSDennis Zhou (Facebook) start); 957b185cd0dSDennis Zhou (Facebook) start = (start == l_bit) ? 0 : l_bit + 1; 958b185cd0dSDennis Zhou (Facebook) } 959b185cd0dSDennis Zhou (Facebook) 960b185cd0dSDennis Zhou (Facebook) end = e_off; 961b185cd0dSDennis Zhou (Facebook) if (e_off == e_block->contig_hint_start) 962b185cd0dSDennis Zhou (Facebook) end = e_block->contig_hint_start + e_block->contig_hint; 963b185cd0dSDennis Zhou (Facebook) else 964b185cd0dSDennis Zhou (Facebook) end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), 965b185cd0dSDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, end); 966b185cd0dSDennis Zhou (Facebook) 967ca460b3cSDennis Zhou (Facebook) /* update s_block */ 968b185cd0dSDennis Zhou (Facebook) e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; 969b239f7daSDennis Zhou if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) 970b239f7daSDennis Zhou nr_empty_pages++; 971b185cd0dSDennis Zhou (Facebook) pcpu_block_update(s_block, start, e_off); 972ca460b3cSDennis Zhou (Facebook) 973ca460b3cSDennis Zhou (Facebook) /* freeing in the same block */ 974ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 975ca460b3cSDennis Zhou (Facebook) /* update e_block */ 976b239f7daSDennis Zhou if (end == PCPU_BITMAP_BLOCK_BITS) 977b239f7daSDennis Zhou nr_empty_pages++; 978b185cd0dSDennis Zhou (Facebook) pcpu_block_update(e_block, 0, end); 979ca460b3cSDennis Zhou (Facebook) 980ca460b3cSDennis Zhou (Facebook) /* reset md_blocks in the middle */ 981b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 982ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 983ca460b3cSDennis Zhou (Facebook) block->first_free = 0; 984382b88e9SDennis Zhou block->scan_hint = 0; 985ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = 0; 986ca460b3cSDennis Zhou (Facebook) block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 987ca460b3cSDennis Zhou (Facebook) block->left_free = PCPU_BITMAP_BLOCK_BITS; 988ca460b3cSDennis Zhou (Facebook) block->right_free = PCPU_BITMAP_BLOCK_BITS; 989ca460b3cSDennis Zhou (Facebook) } 990ca460b3cSDennis Zhou (Facebook) } 991ca460b3cSDennis Zhou (Facebook) 992b239f7daSDennis Zhou if (nr_empty_pages) 993b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr_empty_pages); 994b239f7daSDennis Zhou 995b185cd0dSDennis Zhou (Facebook) /* 996b239f7daSDennis Zhou * Refresh chunk metadata when the free makes a block free or spans 997b239f7daSDennis Zhou * across blocks. The contig_hint may be off by up to a page, but if 998b239f7daSDennis Zhou * the contig_hint is contained in a block, it will be accurate with 999b239f7daSDennis Zhou * the else condition below. 1000b185cd0dSDennis Zhou (Facebook) */ 1001b239f7daSDennis Zhou if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) 1002d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, true); 1003b185cd0dSDennis Zhou (Facebook) else 100492c14cabSDennis Zhou pcpu_block_update(&chunk->chunk_md, 100592c14cabSDennis Zhou pcpu_block_off_to_off(s_index, start), 100692c14cabSDennis Zhou end); 1007ca460b3cSDennis Zhou (Facebook) } 1008ca460b3cSDennis Zhou (Facebook) 1009ca460b3cSDennis Zhou (Facebook) /** 101040064aecSDennis Zhou (Facebook) * pcpu_is_populated - determines if the region is populated 101140064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 101240064aecSDennis Zhou (Facebook) * @bit_off: chunk offset 101340064aecSDennis Zhou (Facebook) * @bits: size of area 101440064aecSDennis Zhou (Facebook) * @next_off: return value for the next offset to start searching 101540064aecSDennis Zhou (Facebook) * 101640064aecSDennis Zhou (Facebook) * For atomic allocations, check if the backing pages are populated. 101740064aecSDennis Zhou (Facebook) * 101840064aecSDennis Zhou (Facebook) * RETURNS: 101940064aecSDennis Zhou (Facebook) * Bool if the backing pages are populated. 102040064aecSDennis Zhou (Facebook) * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. 102140064aecSDennis Zhou (Facebook) */ 102240064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 102340064aecSDennis Zhou (Facebook) int *next_off) 102440064aecSDennis Zhou (Facebook) { 1025e837dfdeSDennis Zhou unsigned int page_start, page_end, rs, re; 102640064aecSDennis Zhou (Facebook) 102740064aecSDennis Zhou (Facebook) page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 102840064aecSDennis Zhou (Facebook) page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 102940064aecSDennis Zhou (Facebook) 103040064aecSDennis Zhou (Facebook) rs = page_start; 1031e837dfdeSDennis Zhou bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); 103240064aecSDennis Zhou (Facebook) if (rs >= page_end) 103340064aecSDennis Zhou (Facebook) return true; 103440064aecSDennis Zhou (Facebook) 103540064aecSDennis Zhou (Facebook) *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 103640064aecSDennis Zhou (Facebook) return false; 103740064aecSDennis Zhou (Facebook) } 103840064aecSDennis Zhou (Facebook) 103940064aecSDennis Zhou (Facebook) /** 104040064aecSDennis Zhou (Facebook) * pcpu_find_block_fit - finds the block index to start searching 104140064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 104240064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 104340064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE bytes) 104440064aecSDennis Zhou (Facebook) * @pop_only: use populated regions only 104540064aecSDennis Zhou (Facebook) * 1046b4c2116cSDennis Zhou (Facebook) * Given a chunk and an allocation spec, find the offset to begin searching 1047b4c2116cSDennis Zhou (Facebook) * for a free region. This iterates over the bitmap metadata blocks to 1048b4c2116cSDennis Zhou (Facebook) * find an offset that will be guaranteed to fit the requirements. It is 1049b4c2116cSDennis Zhou (Facebook) * not quite first fit as if the allocation does not fit in the contig hint 1050b4c2116cSDennis Zhou (Facebook) * of a block or chunk, it is skipped. This errs on the side of caution 1051b4c2116cSDennis Zhou (Facebook) * to prevent excess iteration. Poor alignment can cause the allocator to 1052b4c2116cSDennis Zhou (Facebook) * skip over blocks and chunks that have valid free areas. 1053b4c2116cSDennis Zhou (Facebook) * 105440064aecSDennis Zhou (Facebook) * RETURNS: 105540064aecSDennis Zhou (Facebook) * The offset in the bitmap to begin searching. 105640064aecSDennis Zhou (Facebook) * -1 if no offset is found. 105740064aecSDennis Zhou (Facebook) */ 105840064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, 105940064aecSDennis Zhou (Facebook) size_t align, bool pop_only) 106040064aecSDennis Zhou (Facebook) { 106192c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1062b4c2116cSDennis Zhou (Facebook) int bit_off, bits, next_off; 106340064aecSDennis Zhou (Facebook) 106413f96637SDennis Zhou (Facebook) /* 106513f96637SDennis Zhou (Facebook) * Check to see if the allocation can fit in the chunk's contig hint. 106613f96637SDennis Zhou (Facebook) * This is an optimization to prevent scanning by assuming if it 106713f96637SDennis Zhou (Facebook) * cannot fit in the global hint, there is memory pressure and creating 106813f96637SDennis Zhou (Facebook) * a new chunk would happen soon. 106913f96637SDennis Zhou (Facebook) */ 107092c14cabSDennis Zhou bit_off = ALIGN(chunk_md->contig_hint_start, align) - 107192c14cabSDennis Zhou chunk_md->contig_hint_start; 107292c14cabSDennis Zhou if (bit_off + alloc_bits > chunk_md->contig_hint) 107313f96637SDennis Zhou (Facebook) return -1; 107413f96637SDennis Zhou (Facebook) 1075d33d9f3dSDennis Zhou bit_off = pcpu_next_hint(chunk_md, alloc_bits); 1076b4c2116cSDennis Zhou (Facebook) bits = 0; 1077b4c2116cSDennis Zhou (Facebook) pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { 107840064aecSDennis Zhou (Facebook) if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, 1079b4c2116cSDennis Zhou (Facebook) &next_off)) 108040064aecSDennis Zhou (Facebook) break; 108140064aecSDennis Zhou (Facebook) 1082b4c2116cSDennis Zhou (Facebook) bit_off = next_off; 108340064aecSDennis Zhou (Facebook) bits = 0; 108440064aecSDennis Zhou (Facebook) } 108540064aecSDennis Zhou (Facebook) 108640064aecSDennis Zhou (Facebook) if (bit_off == pcpu_chunk_map_bits(chunk)) 108740064aecSDennis Zhou (Facebook) return -1; 108840064aecSDennis Zhou (Facebook) 108940064aecSDennis Zhou (Facebook) return bit_off; 109040064aecSDennis Zhou (Facebook) } 109140064aecSDennis Zhou (Facebook) 1092b89462a9SDennis Zhou /* 1093b89462a9SDennis Zhou * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() 1094b89462a9SDennis Zhou * @map: the address to base the search on 1095b89462a9SDennis Zhou * @size: the bitmap size in bits 1096b89462a9SDennis Zhou * @start: the bitnumber to start searching at 1097b89462a9SDennis Zhou * @nr: the number of zeroed bits we're looking for 1098b89462a9SDennis Zhou * @align_mask: alignment mask for zero area 1099b89462a9SDennis Zhou * @largest_off: offset of the largest area skipped 1100b89462a9SDennis Zhou * @largest_bits: size of the largest area skipped 1101b89462a9SDennis Zhou * 1102b89462a9SDennis Zhou * The @align_mask should be one less than a power of 2. 1103b89462a9SDennis Zhou * 1104b89462a9SDennis Zhou * This is a modified version of bitmap_find_next_zero_area_off() to remember 1105b89462a9SDennis Zhou * the largest area that was skipped. This is imperfect, but in general is 1106b89462a9SDennis Zhou * good enough. The largest remembered region is the largest failed region 1107b89462a9SDennis Zhou * seen. This does not include anything we possibly skipped due to alignment. 1108b89462a9SDennis Zhou * pcpu_block_update_scan() does scan backwards to try and recover what was 1109b89462a9SDennis Zhou * lost to alignment. While this can cause scanning to miss earlier possible 1110b89462a9SDennis Zhou * free areas, smaller allocations will eventually fill those holes. 1111b89462a9SDennis Zhou */ 1112b89462a9SDennis Zhou static unsigned long pcpu_find_zero_area(unsigned long *map, 1113b89462a9SDennis Zhou unsigned long size, 1114b89462a9SDennis Zhou unsigned long start, 1115b89462a9SDennis Zhou unsigned long nr, 1116b89462a9SDennis Zhou unsigned long align_mask, 1117b89462a9SDennis Zhou unsigned long *largest_off, 1118b89462a9SDennis Zhou unsigned long *largest_bits) 1119b89462a9SDennis Zhou { 1120b89462a9SDennis Zhou unsigned long index, end, i, area_off, area_bits; 1121b89462a9SDennis Zhou again: 1122b89462a9SDennis Zhou index = find_next_zero_bit(map, size, start); 1123b89462a9SDennis Zhou 1124b89462a9SDennis Zhou /* Align allocation */ 1125b89462a9SDennis Zhou index = __ALIGN_MASK(index, align_mask); 1126b89462a9SDennis Zhou area_off = index; 1127b89462a9SDennis Zhou 1128b89462a9SDennis Zhou end = index + nr; 1129b89462a9SDennis Zhou if (end > size) 1130b89462a9SDennis Zhou return end; 1131b89462a9SDennis Zhou i = find_next_bit(map, end, index); 1132b89462a9SDennis Zhou if (i < end) { 1133b89462a9SDennis Zhou area_bits = i - area_off; 1134b89462a9SDennis Zhou /* remember largest unused area with best alignment */ 1135b89462a9SDennis Zhou if (area_bits > *largest_bits || 1136b89462a9SDennis Zhou (area_bits == *largest_bits && *largest_off && 1137b89462a9SDennis Zhou (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { 1138b89462a9SDennis Zhou *largest_off = area_off; 1139b89462a9SDennis Zhou *largest_bits = area_bits; 1140b89462a9SDennis Zhou } 1141b89462a9SDennis Zhou 1142b89462a9SDennis Zhou start = i + 1; 1143b89462a9SDennis Zhou goto again; 1144b89462a9SDennis Zhou } 1145b89462a9SDennis Zhou return index; 1146b89462a9SDennis Zhou } 1147b89462a9SDennis Zhou 114840064aecSDennis Zhou (Facebook) /** 114940064aecSDennis Zhou (Facebook) * pcpu_alloc_area - allocates an area from a pcpu_chunk 115040064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 115140064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 115240064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 115340064aecSDennis Zhou (Facebook) * @start: bit_off to start searching 115440064aecSDennis Zhou (Facebook) * 115540064aecSDennis Zhou (Facebook) * This function takes in a @start offset to begin searching to fit an 1156b4c2116cSDennis Zhou (Facebook) * allocation of @alloc_bits with alignment @align. It needs to scan 1157b4c2116cSDennis Zhou (Facebook) * the allocation map because if it fits within the block's contig hint, 1158b4c2116cSDennis Zhou (Facebook) * @start will be block->first_free. This is an attempt to fill the 1159b4c2116cSDennis Zhou (Facebook) * allocation prior to breaking the contig hint. The allocation and 1160b4c2116cSDennis Zhou (Facebook) * boundary maps are updated accordingly if it confirms a valid 1161b4c2116cSDennis Zhou (Facebook) * free area. 116240064aecSDennis Zhou (Facebook) * 116340064aecSDennis Zhou (Facebook) * RETURNS: 116440064aecSDennis Zhou (Facebook) * Allocated addr offset in @chunk on success. 116540064aecSDennis Zhou (Facebook) * -1 if no matching area is found. 116640064aecSDennis Zhou (Facebook) */ 116740064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, 116840064aecSDennis Zhou (Facebook) size_t align, int start) 116940064aecSDennis Zhou (Facebook) { 117092c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 117140064aecSDennis Zhou (Facebook) size_t align_mask = (align) ? (align - 1) : 0; 1172b89462a9SDennis Zhou unsigned long area_off = 0, area_bits = 0; 117340064aecSDennis Zhou (Facebook) int bit_off, end, oslot; 11749f7dcf22STejun Heo 11754f996e23STejun Heo lockdep_assert_held(&pcpu_lock); 11764f996e23STejun Heo 117740064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1178833af842STejun Heo 1179833af842STejun Heo /* 118040064aecSDennis Zhou (Facebook) * Search to find a fit. 1181833af842STejun Heo */ 11828c43004aSDennis Zhou end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, 11838c43004aSDennis Zhou pcpu_chunk_map_bits(chunk)); 1184b89462a9SDennis Zhou bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, 1185b89462a9SDennis Zhou align_mask, &area_off, &area_bits); 118640064aecSDennis Zhou (Facebook) if (bit_off >= end) 1187a16037c8STejun Heo return -1; 1188a16037c8STejun Heo 1189b89462a9SDennis Zhou if (area_bits) 1190b89462a9SDennis Zhou pcpu_block_update_scan(chunk, area_off, area_bits); 1191b89462a9SDennis Zhou 119240064aecSDennis Zhou (Facebook) /* update alloc map */ 119340064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, bit_off, alloc_bits); 1194a16037c8STejun Heo 119540064aecSDennis Zhou (Facebook) /* update boundary map */ 119640064aecSDennis Zhou (Facebook) set_bit(bit_off, chunk->bound_map); 119740064aecSDennis Zhou (Facebook) bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); 119840064aecSDennis Zhou (Facebook) set_bit(bit_off + alloc_bits, chunk->bound_map); 1199a16037c8STejun Heo 120040064aecSDennis Zhou (Facebook) chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; 120140064aecSDennis Zhou (Facebook) 120286b442fbSDennis Zhou (Facebook) /* update first free bit */ 120392c14cabSDennis Zhou if (bit_off == chunk_md->first_free) 120492c14cabSDennis Zhou chunk_md->first_free = find_next_zero_bit( 120586b442fbSDennis Zhou (Facebook) chunk->alloc_map, 120686b442fbSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk), 120786b442fbSDennis Zhou (Facebook) bit_off + alloc_bits); 120886b442fbSDennis Zhou (Facebook) 1209ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); 121040064aecSDennis Zhou (Facebook) 121140064aecSDennis Zhou (Facebook) pcpu_chunk_relocate(chunk, oslot); 121240064aecSDennis Zhou (Facebook) 121340064aecSDennis Zhou (Facebook) return bit_off * PCPU_MIN_ALLOC_SIZE; 1214a16037c8STejun Heo } 1215a16037c8STejun Heo 1216a16037c8STejun Heo /** 121740064aecSDennis Zhou (Facebook) * pcpu_free_area - frees the corresponding offset 1218fbf59bc9STejun Heo * @chunk: chunk of interest 121940064aecSDennis Zhou (Facebook) * @off: addr offset into chunk 1220fbf59bc9STejun Heo * 122140064aecSDennis Zhou (Facebook) * This function determines the size of an allocation to free using 122240064aecSDennis Zhou (Facebook) * the boundary bitmap and clears the allocation map. 12235b32af91SRoman Gushchin * 12245b32af91SRoman Gushchin * RETURNS: 12255b32af91SRoman Gushchin * Number of freed bytes. 1226fbf59bc9STejun Heo */ 12275b32af91SRoman Gushchin static int pcpu_free_area(struct pcpu_chunk *chunk, int off) 1228fbf59bc9STejun Heo { 122992c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 12305b32af91SRoman Gushchin int bit_off, bits, end, oslot, freed; 1231fbf59bc9STejun Heo 12325ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock); 123330a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk); 12345ccd30e4SDennis Zhou 123540064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1236723ad1d9SAl Viro 123740064aecSDennis Zhou (Facebook) bit_off = off / PCPU_MIN_ALLOC_SIZE; 1238fbf59bc9STejun Heo 123940064aecSDennis Zhou (Facebook) /* find end index */ 124040064aecSDennis Zhou (Facebook) end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), 124140064aecSDennis Zhou (Facebook) bit_off + 1); 124240064aecSDennis Zhou (Facebook) bits = end - bit_off; 124340064aecSDennis Zhou (Facebook) bitmap_clear(chunk->alloc_map, bit_off, bits); 12443d331ad7SAl Viro 12455b32af91SRoman Gushchin freed = bits * PCPU_MIN_ALLOC_SIZE; 12465b32af91SRoman Gushchin 124740064aecSDennis Zhou (Facebook) /* update metadata */ 12485b32af91SRoman Gushchin chunk->free_bytes += freed; 1249fbf59bc9STejun Heo 125086b442fbSDennis Zhou (Facebook) /* update first free bit */ 125192c14cabSDennis Zhou chunk_md->first_free = min(chunk_md->first_free, bit_off); 125286b442fbSDennis Zhou (Facebook) 1253ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_free(chunk, bit_off, bits); 1254b539b87fSTejun Heo 1255fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 12565b32af91SRoman Gushchin 12575b32af91SRoman Gushchin return freed; 1258fbf59bc9STejun Heo } 1259fbf59bc9STejun Heo 1260047924c9SDennis Zhou static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) 1261047924c9SDennis Zhou { 1262047924c9SDennis Zhou block->scan_hint = 0; 1263047924c9SDennis Zhou block->contig_hint = nr_bits; 1264047924c9SDennis Zhou block->left_free = nr_bits; 1265047924c9SDennis Zhou block->right_free = nr_bits; 1266047924c9SDennis Zhou block->first_free = 0; 1267047924c9SDennis Zhou block->nr_bits = nr_bits; 1268047924c9SDennis Zhou } 1269047924c9SDennis Zhou 1270ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) 1271ca460b3cSDennis Zhou (Facebook) { 1272ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *md_block; 1273ca460b3cSDennis Zhou (Facebook) 127492c14cabSDennis Zhou /* init the chunk's block */ 127592c14cabSDennis Zhou pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); 127692c14cabSDennis Zhou 1277ca460b3cSDennis Zhou (Facebook) for (md_block = chunk->md_blocks; 1278ca460b3cSDennis Zhou (Facebook) md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); 1279047924c9SDennis Zhou md_block++) 1280047924c9SDennis Zhou pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); 1281ca460b3cSDennis Zhou (Facebook) } 1282ca460b3cSDennis Zhou (Facebook) 128340064aecSDennis Zhou (Facebook) /** 128440064aecSDennis Zhou (Facebook) * pcpu_alloc_first_chunk - creates chunks that serve the first chunk 128540064aecSDennis Zhou (Facebook) * @tmp_addr: the start of the region served 128640064aecSDennis Zhou (Facebook) * @map_size: size of the region served 128740064aecSDennis Zhou (Facebook) * 128840064aecSDennis Zhou (Facebook) * This is responsible for creating the chunks that serve the first chunk. The 128940064aecSDennis Zhou (Facebook) * base_addr is page aligned down of @tmp_addr while the region end is page 129040064aecSDennis Zhou (Facebook) * aligned up. Offsets are kept track of to determine the region served. All 129140064aecSDennis Zhou (Facebook) * this is done to appease the bitmap allocator in avoiding partial blocks. 129240064aecSDennis Zhou (Facebook) * 129340064aecSDennis Zhou (Facebook) * RETURNS: 129440064aecSDennis Zhou (Facebook) * Chunk serving the region at @tmp_addr of @map_size. 129540064aecSDennis Zhou (Facebook) */ 1296c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 129740064aecSDennis Zhou (Facebook) int map_size) 129810edf5b0SDennis Zhou (Facebook) { 129910edf5b0SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 1300ca460b3cSDennis Zhou (Facebook) unsigned long aligned_addr, lcm_align; 130140064aecSDennis Zhou (Facebook) int start_offset, offset_bits, region_size, region_bits; 1302f655f405SMike Rapoport size_t alloc_size; 1303c0ebfdc3SDennis Zhou (Facebook) 1304c0ebfdc3SDennis Zhou (Facebook) /* region calculations */ 1305c0ebfdc3SDennis Zhou (Facebook) aligned_addr = tmp_addr & PAGE_MASK; 1306c0ebfdc3SDennis Zhou (Facebook) 1307c0ebfdc3SDennis Zhou (Facebook) start_offset = tmp_addr - aligned_addr; 13086b9d7c8eSDennis Zhou (Facebook) 1309ca460b3cSDennis Zhou (Facebook) /* 1310ca460b3cSDennis Zhou (Facebook) * Align the end of the region with the LCM of PAGE_SIZE and 1311ca460b3cSDennis Zhou (Facebook) * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of 1312ca460b3cSDennis Zhou (Facebook) * the other. 1313ca460b3cSDennis Zhou (Facebook) */ 1314ca460b3cSDennis Zhou (Facebook) lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); 1315ca460b3cSDennis Zhou (Facebook) region_size = ALIGN(start_offset + map_size, lcm_align); 131610edf5b0SDennis Zhou (Facebook) 1317c0ebfdc3SDennis Zhou (Facebook) /* allocate chunk */ 1318*61cf93d3SDennis Zhou alloc_size = struct_size(chunk, populated, 1319*61cf93d3SDennis Zhou BITS_TO_LONGS(region_size >> PAGE_SHIFT)); 1320f655f405SMike Rapoport chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1321f655f405SMike Rapoport if (!chunk) 1322f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1323f655f405SMike Rapoport alloc_size); 1324c0ebfdc3SDennis Zhou (Facebook) 132510edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->list); 1326c0ebfdc3SDennis Zhou (Facebook) 1327c0ebfdc3SDennis Zhou (Facebook) chunk->base_addr = (void *)aligned_addr; 132810edf5b0SDennis Zhou (Facebook) chunk->start_offset = start_offset; 13296b9d7c8eSDennis Zhou (Facebook) chunk->end_offset = region_size - chunk->start_offset - map_size; 1330c0ebfdc3SDennis Zhou (Facebook) 13318ab16c43SDennis Zhou (Facebook) chunk->nr_pages = region_size >> PAGE_SHIFT; 133240064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 1333c0ebfdc3SDennis Zhou (Facebook) 1334f655f405SMike Rapoport alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); 1335f655f405SMike Rapoport chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1336f655f405SMike Rapoport if (!chunk->alloc_map) 1337f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1338f655f405SMike Rapoport alloc_size); 1339f655f405SMike Rapoport 1340f655f405SMike Rapoport alloc_size = 1341f655f405SMike Rapoport BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); 1342f655f405SMike Rapoport chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1343f655f405SMike Rapoport if (!chunk->bound_map) 1344f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1345f655f405SMike Rapoport alloc_size); 1346f655f405SMike Rapoport 1347f655f405SMike Rapoport alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); 1348f655f405SMike Rapoport chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1349f655f405SMike Rapoport if (!chunk->md_blocks) 1350f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1351f655f405SMike Rapoport alloc_size); 1352f655f405SMike Rapoport 13533c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 13543c7be18aSRoman Gushchin /* first chunk isn't memcg-aware */ 13553c7be18aSRoman Gushchin chunk->obj_cgroups = NULL; 13563c7be18aSRoman Gushchin #endif 1357ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 135810edf5b0SDennis Zhou (Facebook) 135910edf5b0SDennis Zhou (Facebook) /* manage populated page bitmap */ 136010edf5b0SDennis Zhou (Facebook) chunk->immutable = true; 13618ab16c43SDennis Zhou (Facebook) bitmap_fill(chunk->populated, chunk->nr_pages); 13628ab16c43SDennis Zhou (Facebook) chunk->nr_populated = chunk->nr_pages; 1363b239f7daSDennis Zhou chunk->nr_empty_pop_pages = chunk->nr_pages; 136410edf5b0SDennis Zhou (Facebook) 136540064aecSDennis Zhou (Facebook) chunk->free_bytes = map_size; 1366c0ebfdc3SDennis Zhou (Facebook) 1367c0ebfdc3SDennis Zhou (Facebook) if (chunk->start_offset) { 1368c0ebfdc3SDennis Zhou (Facebook) /* hide the beginning of the bitmap */ 136940064aecSDennis Zhou (Facebook) offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; 137040064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 0, offset_bits); 137140064aecSDennis Zhou (Facebook) set_bit(0, chunk->bound_map); 137240064aecSDennis Zhou (Facebook) set_bit(offset_bits, chunk->bound_map); 1373ca460b3cSDennis Zhou (Facebook) 137492c14cabSDennis Zhou chunk->chunk_md.first_free = offset_bits; 137586b442fbSDennis Zhou (Facebook) 1376ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, 0, offset_bits); 1377c0ebfdc3SDennis Zhou (Facebook) } 1378c0ebfdc3SDennis Zhou (Facebook) 13796b9d7c8eSDennis Zhou (Facebook) if (chunk->end_offset) { 13806b9d7c8eSDennis Zhou (Facebook) /* hide the end of the bitmap */ 138140064aecSDennis Zhou (Facebook) offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; 138240064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 138340064aecSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk) - offset_bits, 138440064aecSDennis Zhou (Facebook) offset_bits); 138540064aecSDennis Zhou (Facebook) set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, 138640064aecSDennis Zhou (Facebook) chunk->bound_map); 138740064aecSDennis Zhou (Facebook) set_bit(region_bits, chunk->bound_map); 13886b9d7c8eSDennis Zhou (Facebook) 1389ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) 1390ca460b3cSDennis Zhou (Facebook) - offset_bits, offset_bits); 1391ca460b3cSDennis Zhou (Facebook) } 139240064aecSDennis Zhou (Facebook) 139310edf5b0SDennis Zhou (Facebook) return chunk; 139410edf5b0SDennis Zhou (Facebook) } 139510edf5b0SDennis Zhou (Facebook) 13963c7be18aSRoman Gushchin static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp) 13976081089fSTejun Heo { 13986081089fSTejun Heo struct pcpu_chunk *chunk; 139940064aecSDennis Zhou (Facebook) int region_bits; 14006081089fSTejun Heo 140147504ee0SDennis Zhou chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); 14026081089fSTejun Heo if (!chunk) 14036081089fSTejun Heo return NULL; 14046081089fSTejun Heo 14056081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 1406c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages; 140740064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 140840064aecSDennis Zhou (Facebook) 140940064aecSDennis Zhou (Facebook) chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 141047504ee0SDennis Zhou sizeof(chunk->alloc_map[0]), gfp); 141140064aecSDennis Zhou (Facebook) if (!chunk->alloc_map) 141240064aecSDennis Zhou (Facebook) goto alloc_map_fail; 141340064aecSDennis Zhou (Facebook) 141440064aecSDennis Zhou (Facebook) chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 141547504ee0SDennis Zhou sizeof(chunk->bound_map[0]), gfp); 141640064aecSDennis Zhou (Facebook) if (!chunk->bound_map) 141740064aecSDennis Zhou (Facebook) goto bound_map_fail; 141840064aecSDennis Zhou (Facebook) 1419ca460b3cSDennis Zhou (Facebook) chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 142047504ee0SDennis Zhou sizeof(chunk->md_blocks[0]), gfp); 1421ca460b3cSDennis Zhou (Facebook) if (!chunk->md_blocks) 1422ca460b3cSDennis Zhou (Facebook) goto md_blocks_fail; 1423ca460b3cSDennis Zhou (Facebook) 14243c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 14253c7be18aSRoman Gushchin if (pcpu_is_memcg_chunk(type)) { 14263c7be18aSRoman Gushchin chunk->obj_cgroups = 14273c7be18aSRoman Gushchin pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * 14283c7be18aSRoman Gushchin sizeof(struct obj_cgroup *), gfp); 14293c7be18aSRoman Gushchin if (!chunk->obj_cgroups) 14303c7be18aSRoman Gushchin goto objcg_fail; 14313c7be18aSRoman Gushchin } 14323c7be18aSRoman Gushchin #endif 14333c7be18aSRoman Gushchin 1434ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 1435ca460b3cSDennis Zhou (Facebook) 143640064aecSDennis Zhou (Facebook) /* init metadata */ 143740064aecSDennis Zhou (Facebook) chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; 1438c0ebfdc3SDennis Zhou (Facebook) 14396081089fSTejun Heo return chunk; 144040064aecSDennis Zhou (Facebook) 14413c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 14423c7be18aSRoman Gushchin objcg_fail: 14433c7be18aSRoman Gushchin pcpu_mem_free(chunk->md_blocks); 14443c7be18aSRoman Gushchin #endif 1445ca460b3cSDennis Zhou (Facebook) md_blocks_fail: 1446ca460b3cSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 144740064aecSDennis Zhou (Facebook) bound_map_fail: 144840064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 144940064aecSDennis Zhou (Facebook) alloc_map_fail: 145040064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk); 145140064aecSDennis Zhou (Facebook) 145240064aecSDennis Zhou (Facebook) return NULL; 14536081089fSTejun Heo } 14546081089fSTejun Heo 14556081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 14566081089fSTejun Heo { 14576081089fSTejun Heo if (!chunk) 14586081089fSTejun Heo return; 14593c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 14603c7be18aSRoman Gushchin pcpu_mem_free(chunk->obj_cgroups); 14613c7be18aSRoman Gushchin #endif 14626685b357SMike Rapoport pcpu_mem_free(chunk->md_blocks); 146340064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 146440064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 14651d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 14666081089fSTejun Heo } 14676081089fSTejun Heo 1468b539b87fSTejun Heo /** 1469b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 1470b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 1471b539b87fSTejun Heo * @page_start: the start page 1472b539b87fSTejun Heo * @page_end: the end page 1473b539b87fSTejun Heo * 1474b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1475b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 1476b539b87fSTejun Heo * successful population. 147740064aecSDennis Zhou (Facebook) * 147840064aecSDennis Zhou (Facebook) * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it 147940064aecSDennis Zhou (Facebook) * is to serve an allocation in that area. 1480b539b87fSTejun Heo */ 148140064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, 1482b239f7daSDennis Zhou int page_end) 1483b539b87fSTejun Heo { 1484b539b87fSTejun Heo int nr = page_end - page_start; 1485b539b87fSTejun Heo 1486b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1487b539b87fSTejun Heo 1488b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 1489b539b87fSTejun Heo chunk->nr_populated += nr; 14907e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += nr; 149140064aecSDennis Zhou (Facebook) 1492b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr); 149340064aecSDennis Zhou (Facebook) } 1494b539b87fSTejun Heo 1495b539b87fSTejun Heo /** 1496b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 1497b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 1498b539b87fSTejun Heo * @page_start: the start page 1499b539b87fSTejun Heo * @page_end: the end page 1500b539b87fSTejun Heo * 1501b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1502b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 1503b539b87fSTejun Heo * each successful depopulation. 1504b539b87fSTejun Heo */ 1505b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 1506b539b87fSTejun Heo int page_start, int page_end) 1507b539b87fSTejun Heo { 1508b539b87fSTejun Heo int nr = page_end - page_start; 1509b539b87fSTejun Heo 1510b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1511b539b87fSTejun Heo 1512b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 1513b539b87fSTejun Heo chunk->nr_populated -= nr; 15147e8a6304SDennis Zhou (Facebook) pcpu_nr_populated -= nr; 1515b239f7daSDennis Zhou 1516b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr); 1517b539b87fSTejun Heo } 1518b539b87fSTejun Heo 1519fbf59bc9STejun Heo /* 15209f645532STejun Heo * Chunk management implementation. 1521fbf59bc9STejun Heo * 15229f645532STejun Heo * To allow different implementations, chunk alloc/free and 15239f645532STejun Heo * [de]population are implemented in a separate file which is pulled 15249f645532STejun Heo * into this file and compiled together. The following functions 15259f645532STejun Heo * should be implemented. 1526ccea34b5STejun Heo * 15279f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 15289f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 15299f645532STejun Heo * pcpu_create_chunk - create a new chunk 15309f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 15319f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 15329f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1533fbf59bc9STejun Heo */ 153415d9f3d1SDennis Zhou static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 153547504ee0SDennis Zhou int page_start, int page_end, gfp_t gfp); 153615d9f3d1SDennis Zhou static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 153715d9f3d1SDennis Zhou int page_start, int page_end); 15383c7be18aSRoman Gushchin static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type, 15393c7be18aSRoman Gushchin gfp_t gfp); 15409f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 15419f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 15429f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1543fbf59bc9STejun Heo 1544b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 1545b0c9778bSTejun Heo #include "percpu-km.c" 1546b0c9778bSTejun Heo #else 15479f645532STejun Heo #include "percpu-vm.c" 1548b0c9778bSTejun Heo #endif 1549fbf59bc9STejun Heo 1550fbf59bc9STejun Heo /** 155188999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 155288999a89STejun Heo * @addr: address for which the chunk needs to be determined. 155388999a89STejun Heo * 1554c0ebfdc3SDennis Zhou (Facebook) * This is an internal function that handles all but static allocations. 1555c0ebfdc3SDennis Zhou (Facebook) * Static percpu address values should never be passed into the allocator. 1556c0ebfdc3SDennis Zhou (Facebook) * 155788999a89STejun Heo * RETURNS: 155888999a89STejun Heo * The address of the found chunk. 155988999a89STejun Heo */ 156088999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 156188999a89STejun Heo { 1562c0ebfdc3SDennis Zhou (Facebook) /* is it in the dynamic region (first chunk)? */ 1563560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) 1564c0ebfdc3SDennis Zhou (Facebook) return pcpu_first_chunk; 1565c0ebfdc3SDennis Zhou (Facebook) 1566c0ebfdc3SDennis Zhou (Facebook) /* is it in the reserved region? */ 1567560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) 156888999a89STejun Heo return pcpu_reserved_chunk; 156988999a89STejun Heo 157088999a89STejun Heo /* 157188999a89STejun Heo * The address is relative to unit0 which might be unused and 157288999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 157388999a89STejun Heo * current processor before looking it up in the vmalloc 157488999a89STejun Heo * space. Note that any possible cpu id can be used here, so 157588999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 157688999a89STejun Heo */ 157788999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 15789f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 157988999a89STejun Heo } 158088999a89STejun Heo 15813c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 15823c7be18aSRoman Gushchin static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, 15833c7be18aSRoman Gushchin struct obj_cgroup **objcgp) 15843c7be18aSRoman Gushchin { 15853c7be18aSRoman Gushchin struct obj_cgroup *objcg; 15863c7be18aSRoman Gushchin 1587279c3393SRoman Gushchin if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT)) 15883c7be18aSRoman Gushchin return PCPU_CHUNK_ROOT; 15893c7be18aSRoman Gushchin 15903c7be18aSRoman Gushchin objcg = get_obj_cgroup_from_current(); 15913c7be18aSRoman Gushchin if (!objcg) 15923c7be18aSRoman Gushchin return PCPU_CHUNK_ROOT; 15933c7be18aSRoman Gushchin 15943c7be18aSRoman Gushchin if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) { 15953c7be18aSRoman Gushchin obj_cgroup_put(objcg); 15963c7be18aSRoman Gushchin return PCPU_FAIL_ALLOC; 15973c7be18aSRoman Gushchin } 15983c7be18aSRoman Gushchin 15993c7be18aSRoman Gushchin *objcgp = objcg; 16003c7be18aSRoman Gushchin return PCPU_CHUNK_MEMCG; 16013c7be18aSRoman Gushchin } 16023c7be18aSRoman Gushchin 16033c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 16043c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off, 16053c7be18aSRoman Gushchin size_t size) 16063c7be18aSRoman Gushchin { 16073c7be18aSRoman Gushchin if (!objcg) 16083c7be18aSRoman Gushchin return; 16093c7be18aSRoman Gushchin 16103c7be18aSRoman Gushchin if (chunk) { 16113c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; 1612772616b0SRoman Gushchin 1613772616b0SRoman Gushchin rcu_read_lock(); 1614772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 1615772616b0SRoman Gushchin size * num_possible_cpus()); 1616772616b0SRoman Gushchin rcu_read_unlock(); 16173c7be18aSRoman Gushchin } else { 16183c7be18aSRoman Gushchin obj_cgroup_uncharge(objcg, size * num_possible_cpus()); 16193c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16203c7be18aSRoman Gushchin } 16213c7be18aSRoman Gushchin } 16223c7be18aSRoman Gushchin 16233c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 16243c7be18aSRoman Gushchin { 16253c7be18aSRoman Gushchin struct obj_cgroup *objcg; 16263c7be18aSRoman Gushchin 16273c7be18aSRoman Gushchin if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk))) 16283c7be18aSRoman Gushchin return; 16293c7be18aSRoman Gushchin 16303c7be18aSRoman Gushchin objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; 16313c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; 16323c7be18aSRoman Gushchin 16333c7be18aSRoman Gushchin obj_cgroup_uncharge(objcg, size * num_possible_cpus()); 16343c7be18aSRoman Gushchin 1635772616b0SRoman Gushchin rcu_read_lock(); 1636772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 1637772616b0SRoman Gushchin -(size * num_possible_cpus())); 1638772616b0SRoman Gushchin rcu_read_unlock(); 1639772616b0SRoman Gushchin 16403c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16413c7be18aSRoman Gushchin } 16423c7be18aSRoman Gushchin 16433c7be18aSRoman Gushchin #else /* CONFIG_MEMCG_KMEM */ 16443c7be18aSRoman Gushchin static enum pcpu_chunk_type 16453c7be18aSRoman Gushchin pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) 16463c7be18aSRoman Gushchin { 16473c7be18aSRoman Gushchin return PCPU_CHUNK_ROOT; 16483c7be18aSRoman Gushchin } 16493c7be18aSRoman Gushchin 16503c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 16513c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off, 16523c7be18aSRoman Gushchin size_t size) 16533c7be18aSRoman Gushchin { 16543c7be18aSRoman Gushchin } 16553c7be18aSRoman Gushchin 16563c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 16573c7be18aSRoman Gushchin { 16583c7be18aSRoman Gushchin } 16593c7be18aSRoman Gushchin #endif /* CONFIG_MEMCG_KMEM */ 16603c7be18aSRoman Gushchin 166188999a89STejun Heo /** 1662edcb4639STejun Heo * pcpu_alloc - the percpu allocator 1663cae3aeb8STejun Heo * @size: size of area to allocate in bytes 1664fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 1665edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 16665835d96eSTejun Heo * @gfp: allocation flags 1667fbf59bc9STejun Heo * 16685835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 16690ea7eeecSDaniel Borkmann * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN 16700ea7eeecSDaniel Borkmann * then no warning will be triggered on invalid or failed allocation 16710ea7eeecSDaniel Borkmann * requests. 1672fbf59bc9STejun Heo * 1673fbf59bc9STejun Heo * RETURNS: 1674fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1675fbf59bc9STejun Heo */ 16765835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 16775835d96eSTejun Heo gfp_t gfp) 1678fbf59bc9STejun Heo { 167928307d93SFilipe Manana gfp_t pcpu_gfp; 168028307d93SFilipe Manana bool is_atomic; 168128307d93SFilipe Manana bool do_warn; 16823c7be18aSRoman Gushchin enum pcpu_chunk_type type; 16833c7be18aSRoman Gushchin struct list_head *pcpu_slot; 16843c7be18aSRoman Gushchin struct obj_cgroup *objcg = NULL; 1685f2badb0cSTejun Heo static int warn_limit = 10; 16868744d859SDennis Zhou struct pcpu_chunk *chunk, *next; 1687f2badb0cSTejun Heo const char *err; 168840064aecSDennis Zhou (Facebook) int slot, off, cpu, ret; 1689403a91b1SJiri Kosina unsigned long flags; 1690f528f0b8SCatalin Marinas void __percpu *ptr; 169140064aecSDennis Zhou (Facebook) size_t bits, bit_align; 1692fbf59bc9STejun Heo 169328307d93SFilipe Manana gfp = current_gfp_context(gfp); 169428307d93SFilipe Manana /* whitelisted flags that can be passed to the backing allocators */ 169528307d93SFilipe Manana pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 169628307d93SFilipe Manana is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 169728307d93SFilipe Manana do_warn = !(gfp & __GFP_NOWARN); 169828307d93SFilipe Manana 1699723ad1d9SAl Viro /* 170040064aecSDennis Zhou (Facebook) * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, 170140064aecSDennis Zhou (Facebook) * therefore alignment must be a minimum of that many bytes. 170240064aecSDennis Zhou (Facebook) * An allocation may have internal fragmentation from rounding up 170340064aecSDennis Zhou (Facebook) * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. 1704723ad1d9SAl Viro */ 1705d2f3c384SDennis Zhou (Facebook) if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) 1706d2f3c384SDennis Zhou (Facebook) align = PCPU_MIN_ALLOC_SIZE; 1707723ad1d9SAl Viro 1708d2f3c384SDennis Zhou (Facebook) size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); 170940064aecSDennis Zhou (Facebook) bits = size >> PCPU_MIN_ALLOC_SHIFT; 171040064aecSDennis Zhou (Facebook) bit_align = align >> PCPU_MIN_ALLOC_SHIFT; 17112f69fa82SViro 17123ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 17133ca45a46Szijun_hu !is_power_of_2(align))) { 17140ea7eeecSDaniel Borkmann WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1715756a025fSJoe Perches size, align); 1716fbf59bc9STejun Heo return NULL; 1717fbf59bc9STejun Heo } 1718fbf59bc9STejun Heo 17193c7be18aSRoman Gushchin type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg); 17203c7be18aSRoman Gushchin if (unlikely(type == PCPU_FAIL_ALLOC)) 17213c7be18aSRoman Gushchin return NULL; 17223c7be18aSRoman Gushchin pcpu_slot = pcpu_chunk_list(type); 17233c7be18aSRoman Gushchin 1724f52ba1feSKirill Tkhai if (!is_atomic) { 1725f52ba1feSKirill Tkhai /* 1726f52ba1feSKirill Tkhai * pcpu_balance_workfn() allocates memory under this mutex, 1727f52ba1feSKirill Tkhai * and it may wait for memory reclaim. Allow current task 1728f52ba1feSKirill Tkhai * to become OOM victim, in case of memory pressure. 1729f52ba1feSKirill Tkhai */ 17303c7be18aSRoman Gushchin if (gfp & __GFP_NOFAIL) { 17316710e594STejun Heo mutex_lock(&pcpu_alloc_mutex); 17323c7be18aSRoman Gushchin } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { 17333c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 1734f52ba1feSKirill Tkhai return NULL; 1735f52ba1feSKirill Tkhai } 17363c7be18aSRoman Gushchin } 17376710e594STejun Heo 1738403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1739fbf59bc9STejun Heo 1740edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 1741edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 1742edcb4639STejun Heo chunk = pcpu_reserved_chunk; 1743833af842STejun Heo 174440064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); 174540064aecSDennis Zhou (Facebook) if (off < 0) { 1746833af842STejun Heo err = "alloc from reserved chunk failed"; 1747ccea34b5STejun Heo goto fail_unlock; 1748f2badb0cSTejun Heo } 1749833af842STejun Heo 175040064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1751edcb4639STejun Heo if (off >= 0) 1752edcb4639STejun Heo goto area_found; 1753833af842STejun Heo 1754f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 1755ccea34b5STejun Heo goto fail_unlock; 1756edcb4639STejun Heo } 1757edcb4639STejun Heo 1758ccea34b5STejun Heo restart: 1759edcb4639STejun Heo /* search through normal chunks */ 1760fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 17618744d859SDennis Zhou list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) { 176240064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, 176340064aecSDennis Zhou (Facebook) is_atomic); 17648744d859SDennis Zhou if (off < 0) { 17658744d859SDennis Zhou if (slot < PCPU_SLOT_FAIL_THRESHOLD) 17668744d859SDennis Zhou pcpu_chunk_move(chunk, 0); 1767fbf59bc9STejun Heo continue; 17688744d859SDennis Zhou } 1769ccea34b5STejun Heo 177040064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1771fbf59bc9STejun Heo if (off >= 0) 1772fbf59bc9STejun Heo goto area_found; 177340064aecSDennis Zhou (Facebook) 1774fbf59bc9STejun Heo } 1775fbf59bc9STejun Heo } 1776fbf59bc9STejun Heo 1777403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1778ccea34b5STejun Heo 1779b38d08f3STejun Heo /* 1780b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 1781b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 1782b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 1783b38d08f3STejun Heo */ 178411df02bfSDennis Zhou if (is_atomic) { 178511df02bfSDennis Zhou err = "atomic alloc failed, no space left"; 17865835d96eSTejun Heo goto fail; 178711df02bfSDennis Zhou } 17885835d96eSTejun Heo 1789b38d08f3STejun Heo if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 17903c7be18aSRoman Gushchin chunk = pcpu_create_chunk(type, pcpu_gfp); 1791f2badb0cSTejun Heo if (!chunk) { 1792f2badb0cSTejun Heo err = "failed to allocate new chunk"; 1793b38d08f3STejun Heo goto fail; 1794f2badb0cSTejun Heo } 1795ccea34b5STejun Heo 1796403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1797fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1798b38d08f3STejun Heo } else { 1799b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1800b38d08f3STejun Heo } 1801b38d08f3STejun Heo 1802ccea34b5STejun Heo goto restart; 1803fbf59bc9STejun Heo 1804fbf59bc9STejun Heo area_found: 180530a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size); 1806403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1807ccea34b5STejun Heo 1808dca49645STejun Heo /* populate if not all pages are already there */ 18095835d96eSTejun Heo if (!is_atomic) { 1810e837dfdeSDennis Zhou unsigned int page_start, page_end, rs, re; 1811e04d3208STejun Heo 1812dca49645STejun Heo page_start = PFN_DOWN(off); 1813dca49645STejun Heo page_end = PFN_UP(off + size); 1814dca49645STejun Heo 1815e837dfdeSDennis Zhou bitmap_for_each_clear_region(chunk->populated, rs, re, 181691e914c5SDennis Zhou (Facebook) page_start, page_end) { 1817dca49645STejun Heo WARN_ON(chunk->immutable); 1818dca49645STejun Heo 1819554fef1cSDennis Zhou ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); 1820b38d08f3STejun Heo 1821403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1822b38d08f3STejun Heo if (ret) { 182340064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off); 1824f2badb0cSTejun Heo err = "failed to populate"; 1825ccea34b5STejun Heo goto fail_unlock; 1826fbf59bc9STejun Heo } 1827b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, re); 1828b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1829dca49645STejun Heo } 1830dca49645STejun Heo 1831ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1832e04d3208STejun Heo } 1833ccea34b5STejun Heo 18341a4d7607STejun Heo if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 18351a4d7607STejun Heo pcpu_schedule_balance_work(); 18361a4d7607STejun Heo 1837dca49645STejun Heo /* clear the areas and return address relative to base address */ 1838dca49645STejun Heo for_each_possible_cpu(cpu) 1839dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1840dca49645STejun Heo 1841f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 18428a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp); 1843df95e795SDennis Zhou 1844df95e795SDennis Zhou trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1845df95e795SDennis Zhou chunk->base_addr, off, ptr); 1846df95e795SDennis Zhou 18473c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); 18483c7be18aSRoman Gushchin 1849f528f0b8SCatalin Marinas return ptr; 1850ccea34b5STejun Heo 1851ccea34b5STejun Heo fail_unlock: 1852403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1853b38d08f3STejun Heo fail: 1854df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1855df95e795SDennis Zhou 18560ea7eeecSDaniel Borkmann if (!is_atomic && do_warn && warn_limit) { 1857870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 18585835d96eSTejun Heo size, align, is_atomic, err); 1859f2badb0cSTejun Heo dump_stack(); 1860f2badb0cSTejun Heo if (!--warn_limit) 1861870d4b12SJoe Perches pr_info("limit reached, disable warning\n"); 1862f2badb0cSTejun Heo } 18631a4d7607STejun Heo if (is_atomic) { 18641a4d7607STejun Heo /* see the flag handling in pcpu_blance_workfn() */ 18651a4d7607STejun Heo pcpu_atomic_alloc_failed = true; 18661a4d7607STejun Heo pcpu_schedule_balance_work(); 18676710e594STejun Heo } else { 18686710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex); 18691a4d7607STejun Heo } 18703c7be18aSRoman Gushchin 18713c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 18723c7be18aSRoman Gushchin 1873ccea34b5STejun Heo return NULL; 1874fbf59bc9STejun Heo } 1875edcb4639STejun Heo 1876edcb4639STejun Heo /** 18775835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1878edcb4639STejun Heo * @size: size of area to allocate in bytes 1879edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 18805835d96eSTejun Heo * @gfp: allocation flags 1881edcb4639STejun Heo * 18825835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 18835835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 18840ea7eeecSDaniel Borkmann * be called from any context but is a lot more likely to fail. If @gfp 18850ea7eeecSDaniel Borkmann * has __GFP_NOWARN then no warning will be triggered on invalid or failed 18860ea7eeecSDaniel Borkmann * allocation requests. 1887ccea34b5STejun Heo * 1888edcb4639STejun Heo * RETURNS: 1889edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1890edcb4639STejun Heo */ 18915835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 18925835d96eSTejun Heo { 18935835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 18945835d96eSTejun Heo } 18955835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 18965835d96eSTejun Heo 18975835d96eSTejun Heo /** 18985835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 18995835d96eSTejun Heo * @size: size of area to allocate in bytes 19005835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 19015835d96eSTejun Heo * 19025835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 19035835d96eSTejun Heo */ 190443cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1905edcb4639STejun Heo { 19065835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1907edcb4639STejun Heo } 1908fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1909fbf59bc9STejun Heo 1910edcb4639STejun Heo /** 1911edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1912edcb4639STejun Heo * @size: size of area to allocate in bytes 1913edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1914edcb4639STejun Heo * 19159329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 19169329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 19179329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 19189329ba97STejun Heo * Might trigger writeouts. 1919edcb4639STejun Heo * 1920ccea34b5STejun Heo * CONTEXT: 1921ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1922ccea34b5STejun Heo * 1923edcb4639STejun Heo * RETURNS: 1924edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1925edcb4639STejun Heo */ 192643cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1927edcb4639STejun Heo { 19285835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1929edcb4639STejun Heo } 1930edcb4639STejun Heo 1931a56dbddfSTejun Heo /** 19323c7be18aSRoman Gushchin * __pcpu_balance_workfn - manage the amount of free chunks and populated pages 19333c7be18aSRoman Gushchin * @type: chunk type 1934a56dbddfSTejun Heo * 193547504ee0SDennis Zhou * Reclaim all fully free chunks except for the first one. This is also 193647504ee0SDennis Zhou * responsible for maintaining the pool of empty populated pages. However, 193747504ee0SDennis Zhou * it is possible that this is called when physical memory is scarce causing 193847504ee0SDennis Zhou * OOM killer to be triggered. We should avoid doing so until an actual 193947504ee0SDennis Zhou * allocation causes the failure as it is possible that requests can be 194047504ee0SDennis Zhou * serviced from already backed regions. 1941a56dbddfSTejun Heo */ 19423c7be18aSRoman Gushchin static void __pcpu_balance_workfn(enum pcpu_chunk_type type) 1943fbf59bc9STejun Heo { 194447504ee0SDennis Zhou /* gfp flags passed to underlying allocators */ 1945554fef1cSDennis Zhou const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 1946fe6bd8c3STejun Heo LIST_HEAD(to_free); 19473c7be18aSRoman Gushchin struct list_head *pcpu_slot = pcpu_chunk_list(type); 1948fe6bd8c3STejun Heo struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1949a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 19501a4d7607STejun Heo int slot, nr_to_pop, ret; 1951a56dbddfSTejun Heo 19521a4d7607STejun Heo /* 19531a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM 19541a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one. 19551a4d7607STejun Heo */ 1956ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1957ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1958a56dbddfSTejun Heo 1959fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 19608d408b4bSTejun Heo WARN_ON(chunk->immutable); 1961a56dbddfSTejun Heo 1962a56dbddfSTejun Heo /* spare the first one */ 1963fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1964a56dbddfSTejun Heo continue; 1965a56dbddfSTejun Heo 1966fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 1967a56dbddfSTejun Heo } 1968a56dbddfSTejun Heo 1969ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1970a56dbddfSTejun Heo 1971fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 1972e837dfdeSDennis Zhou unsigned int rs, re; 1973dca49645STejun Heo 1974e837dfdeSDennis Zhou bitmap_for_each_set_region(chunk->populated, rs, re, 0, 197591e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 1976a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 1977b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 1978b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 1979b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 1980a93ace48STejun Heo } 19816081089fSTejun Heo pcpu_destroy_chunk(chunk); 1982accd4f36SEric Dumazet cond_resched(); 1983fbf59bc9STejun Heo } 1984971f3918STejun Heo 19851a4d7607STejun Heo /* 19861a4d7607STejun Heo * Ensure there are certain number of free populated pages for 19871a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic 19881a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation 19891a4d7607STejun Heo * failed previously, always populate the maximum amount. This 19901a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping 19911a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not 19921a4d7607STejun Heo * something we support properly and can be highly unreliable and 19931a4d7607STejun Heo * inefficient. 19941a4d7607STejun Heo */ 19951a4d7607STejun Heo retry_pop: 19961a4d7607STejun Heo if (pcpu_atomic_alloc_failed) { 19971a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 19981a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */ 19991a4d7607STejun Heo pcpu_atomic_alloc_failed = false; 20001a4d7607STejun Heo } else { 20011a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 20021a4d7607STejun Heo pcpu_nr_empty_pop_pages, 20031a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH); 20041a4d7607STejun Heo } 20051a4d7607STejun Heo 20061a4d7607STejun Heo for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { 2007e837dfdeSDennis Zhou unsigned int nr_unpop = 0, rs, re; 20081a4d7607STejun Heo 20091a4d7607STejun Heo if (!nr_to_pop) 20101a4d7607STejun Heo break; 20111a4d7607STejun Heo 20121a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 20131a4d7607STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 20148ab16c43SDennis Zhou (Facebook) nr_unpop = chunk->nr_pages - chunk->nr_populated; 20151a4d7607STejun Heo if (nr_unpop) 20161a4d7607STejun Heo break; 20171a4d7607STejun Heo } 20181a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 20191a4d7607STejun Heo 20201a4d7607STejun Heo if (!nr_unpop) 20211a4d7607STejun Heo continue; 20221a4d7607STejun Heo 20231a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */ 2024e837dfdeSDennis Zhou bitmap_for_each_clear_region(chunk->populated, rs, re, 0, 202591e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 2026e837dfdeSDennis Zhou int nr = min_t(int, re - rs, nr_to_pop); 20271a4d7607STejun Heo 202847504ee0SDennis Zhou ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); 20291a4d7607STejun Heo if (!ret) { 20301a4d7607STejun Heo nr_to_pop -= nr; 20311a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 2032b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, rs + nr); 20331a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 20341a4d7607STejun Heo } else { 20351a4d7607STejun Heo nr_to_pop = 0; 20361a4d7607STejun Heo } 20371a4d7607STejun Heo 20381a4d7607STejun Heo if (!nr_to_pop) 20391a4d7607STejun Heo break; 20401a4d7607STejun Heo } 20411a4d7607STejun Heo } 20421a4d7607STejun Heo 20431a4d7607STejun Heo if (nr_to_pop) { 20441a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */ 20453c7be18aSRoman Gushchin chunk = pcpu_create_chunk(type, gfp); 20461a4d7607STejun Heo if (chunk) { 20471a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 20481a4d7607STejun Heo pcpu_chunk_relocate(chunk, -1); 20491a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 20501a4d7607STejun Heo goto retry_pop; 20511a4d7607STejun Heo } 20521a4d7607STejun Heo } 20531a4d7607STejun Heo 2054971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 2055a56dbddfSTejun Heo } 2056fbf59bc9STejun Heo 2057fbf59bc9STejun Heo /** 20583c7be18aSRoman Gushchin * pcpu_balance_workfn - manage the amount of free chunks and populated pages 20593c7be18aSRoman Gushchin * @work: unused 20603c7be18aSRoman Gushchin * 20613c7be18aSRoman Gushchin * Call __pcpu_balance_workfn() for each chunk type. 20623c7be18aSRoman Gushchin */ 20633c7be18aSRoman Gushchin static void pcpu_balance_workfn(struct work_struct *work) 20643c7be18aSRoman Gushchin { 20653c7be18aSRoman Gushchin enum pcpu_chunk_type type; 20663c7be18aSRoman Gushchin 20673c7be18aSRoman Gushchin for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) 20683c7be18aSRoman Gushchin __pcpu_balance_workfn(type); 20693c7be18aSRoman Gushchin } 20703c7be18aSRoman Gushchin 20713c7be18aSRoman Gushchin /** 2072fbf59bc9STejun Heo * free_percpu - free percpu area 2073fbf59bc9STejun Heo * @ptr: pointer to area to free 2074fbf59bc9STejun Heo * 2075ccea34b5STejun Heo * Free percpu area @ptr. 2076ccea34b5STejun Heo * 2077ccea34b5STejun Heo * CONTEXT: 2078ccea34b5STejun Heo * Can be called from atomic context. 2079fbf59bc9STejun Heo */ 208043cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 2081fbf59bc9STejun Heo { 2082129182e5SAndrew Morton void *addr; 2083fbf59bc9STejun Heo struct pcpu_chunk *chunk; 2084ccea34b5STejun Heo unsigned long flags; 20853c7be18aSRoman Gushchin int size, off; 2086198790d9SJohn Sperbeck bool need_balance = false; 20873c7be18aSRoman Gushchin struct list_head *pcpu_slot; 2088fbf59bc9STejun Heo 2089fbf59bc9STejun Heo if (!ptr) 2090fbf59bc9STejun Heo return; 2091fbf59bc9STejun Heo 2092f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 2093f528f0b8SCatalin Marinas 2094129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 2095129182e5SAndrew Morton 2096ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 2097fbf59bc9STejun Heo 2098fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 2099bba174f5STejun Heo off = addr - chunk->base_addr; 2100fbf59bc9STejun Heo 21013c7be18aSRoman Gushchin size = pcpu_free_area(chunk, off); 21023c7be18aSRoman Gushchin 21033c7be18aSRoman Gushchin pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk)); 21043c7be18aSRoman Gushchin 21053c7be18aSRoman Gushchin pcpu_memcg_free_hook(chunk, off, size); 2106fbf59bc9STejun Heo 2107a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 210840064aecSDennis Zhou (Facebook) if (chunk->free_bytes == pcpu_unit_size) { 2109fbf59bc9STejun Heo struct pcpu_chunk *pos; 2110fbf59bc9STejun Heo 2111a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 2112fbf59bc9STejun Heo if (pos != chunk) { 2113198790d9SJohn Sperbeck need_balance = true; 2114fbf59bc9STejun Heo break; 2115fbf59bc9STejun Heo } 2116fbf59bc9STejun Heo } 2117fbf59bc9STejun Heo 2118df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr); 2119df95e795SDennis Zhou 2120ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 2121198790d9SJohn Sperbeck 2122198790d9SJohn Sperbeck if (need_balance) 2123198790d9SJohn Sperbeck pcpu_schedule_balance_work(); 2124fbf59bc9STejun Heo } 2125fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 2126fbf59bc9STejun Heo 2127383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 2128383776faSThomas Gleixner { 2129383776faSThomas Gleixner #ifdef CONFIG_SMP 2130383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start; 2131383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 2132383776faSThomas Gleixner unsigned int cpu; 2133383776faSThomas Gleixner 2134383776faSThomas Gleixner for_each_possible_cpu(cpu) { 2135383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu); 2136383776faSThomas Gleixner void *va = (void *)addr; 2137383776faSThomas Gleixner 2138383776faSThomas Gleixner if (va >= start && va < start + static_size) { 21398ce371f9SPeter Zijlstra if (can_addr) { 2140383776faSThomas Gleixner *can_addr = (unsigned long) (va - start); 21418ce371f9SPeter Zijlstra *can_addr += (unsigned long) 21428ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id()); 21438ce371f9SPeter Zijlstra } 2144383776faSThomas Gleixner return true; 2145383776faSThomas Gleixner } 2146383776faSThomas Gleixner } 2147383776faSThomas Gleixner #endif 2148383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */ 2149383776faSThomas Gleixner return false; 2150383776faSThomas Gleixner } 2151383776faSThomas Gleixner 21523b034b0dSVivek Goyal /** 215310fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 215410fad5e4STejun Heo * @addr: address to test 215510fad5e4STejun Heo * 215610fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 215710fad5e4STejun Heo * static percpu areas are not considered. For those, use 215810fad5e4STejun Heo * is_module_percpu_address(). 215910fad5e4STejun Heo * 216010fad5e4STejun Heo * RETURNS: 216110fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 216210fad5e4STejun Heo */ 216310fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 216410fad5e4STejun Heo { 2165383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL); 216610fad5e4STejun Heo } 216710fad5e4STejun Heo 216810fad5e4STejun Heo /** 21693b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 21703b034b0dSVivek Goyal * @addr: the address to be converted to physical address 21713b034b0dSVivek Goyal * 21723b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 21733b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 21743b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 21753b034b0dSVivek Goyal * until this function finishes. 21763b034b0dSVivek Goyal * 217767589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 217867589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 217967589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 218067589c71SDave Young * km) provides translation. 218167589c71SDave Young * 2182bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the 218367589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 218467589c71SDave Young * actually works, and the verification can discover both bugs in percpu 218567589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 218667589c71SDave Young * code. 218767589c71SDave Young * 21883b034b0dSVivek Goyal * RETURNS: 21893b034b0dSVivek Goyal * The physical address for @addr. 21903b034b0dSVivek Goyal */ 21913b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 21923b034b0dSVivek Goyal { 21939983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 21949983b6f0STejun Heo bool in_first_chunk = false; 2195a855b84cSTejun Heo unsigned long first_low, first_high; 21969983b6f0STejun Heo unsigned int cpu; 21979983b6f0STejun Heo 21989983b6f0STejun Heo /* 2199a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 22009983b6f0STejun Heo * necessary but will speed up lookups of addresses which 22019983b6f0STejun Heo * aren't in the first chunk. 2202c0ebfdc3SDennis Zhou (Facebook) * 2203c0ebfdc3SDennis Zhou (Facebook) * The address check is against full chunk sizes. pcpu_base_addr 2204c0ebfdc3SDennis Zhou (Facebook) * points to the beginning of the first chunk including the 2205c0ebfdc3SDennis Zhou (Facebook) * static region. Assumes good intent as the first chunk may 2206c0ebfdc3SDennis Zhou (Facebook) * not be full (ie. < pcpu_unit_pages in size). 22079983b6f0STejun Heo */ 2208c0ebfdc3SDennis Zhou (Facebook) first_low = (unsigned long)pcpu_base_addr + 2209c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 2210c0ebfdc3SDennis Zhou (Facebook) first_high = (unsigned long)pcpu_base_addr + 2211c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 2212a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 2213a855b84cSTejun Heo (unsigned long)addr < first_high) { 22149983b6f0STejun Heo for_each_possible_cpu(cpu) { 22159983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 22169983b6f0STejun Heo 22179983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 22189983b6f0STejun Heo in_first_chunk = true; 22199983b6f0STejun Heo break; 22209983b6f0STejun Heo } 22219983b6f0STejun Heo } 22229983b6f0STejun Heo } 22239983b6f0STejun Heo 22249983b6f0STejun Heo if (in_first_chunk) { 2225eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 22263b034b0dSVivek Goyal return __pa(addr); 22273b034b0dSVivek Goyal else 22289f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 22299f57bd4dSEugene Surovegin offset_in_page(addr); 2230020ec653STejun Heo } else 22319f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 22329f57bd4dSEugene Surovegin offset_in_page(addr); 22333b034b0dSVivek Goyal } 22343b034b0dSVivek Goyal 2235fbf59bc9STejun Heo /** 2236fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 2237fd1e8a1fSTejun Heo * @nr_groups: the number of groups 2238fd1e8a1fSTejun Heo * @nr_units: the number of units 2239033e48fbSTejun Heo * 2240fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 2241fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 2242fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 2243fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 2244fd1e8a1fSTejun Heo * pointer of other groups. 2245033e48fbSTejun Heo * 2246033e48fbSTejun Heo * RETURNS: 2247fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 2248fd1e8a1fSTejun Heo * failure. 2249033e48fbSTejun Heo */ 2250fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 2251fd1e8a1fSTejun Heo int nr_units) 2252fd1e8a1fSTejun Heo { 2253fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 2254fd1e8a1fSTejun Heo size_t base_size, ai_size; 2255fd1e8a1fSTejun Heo void *ptr; 2256fd1e8a1fSTejun Heo int unit; 2257fd1e8a1fSTejun Heo 225814d37612SGustavo A. R. Silva base_size = ALIGN(struct_size(ai, groups, nr_groups), 2259fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 2260fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 2261fd1e8a1fSTejun Heo 226226fb3daeSMike Rapoport ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); 2263fd1e8a1fSTejun Heo if (!ptr) 2264fd1e8a1fSTejun Heo return NULL; 2265fd1e8a1fSTejun Heo ai = ptr; 2266fd1e8a1fSTejun Heo ptr += base_size; 2267fd1e8a1fSTejun Heo 2268fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 2269fd1e8a1fSTejun Heo 2270fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 2271fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 2272fd1e8a1fSTejun Heo 2273fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 2274fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 2275fd1e8a1fSTejun Heo 2276fd1e8a1fSTejun Heo return ai; 2277fd1e8a1fSTejun Heo } 2278fd1e8a1fSTejun Heo 2279fd1e8a1fSTejun Heo /** 2280fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 2281fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 2282fd1e8a1fSTejun Heo * 2283fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 2284fd1e8a1fSTejun Heo */ 2285fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 2286fd1e8a1fSTejun Heo { 2287999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 2288fd1e8a1fSTejun Heo } 2289fd1e8a1fSTejun Heo 2290fd1e8a1fSTejun Heo /** 2291fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 2292fd1e8a1fSTejun Heo * @lvl: loglevel 2293fd1e8a1fSTejun Heo * @ai: allocation info to dump 2294fd1e8a1fSTejun Heo * 2295fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 2296fd1e8a1fSTejun Heo */ 2297fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 2298fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 2299033e48fbSTejun Heo { 2300fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 2301033e48fbSTejun Heo char empty_str[] = "--------"; 2302fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 2303fd1e8a1fSTejun Heo int group, v; 2304fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 2305033e48fbSTejun Heo 2306fd1e8a1fSTejun Heo v = ai->nr_groups; 2307033e48fbSTejun Heo while (v /= 10) 2308fd1e8a1fSTejun Heo group_width++; 2309033e48fbSTejun Heo 2310fd1e8a1fSTejun Heo v = num_possible_cpus(); 2311fd1e8a1fSTejun Heo while (v /= 10) 2312fd1e8a1fSTejun Heo cpu_width++; 2313fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 2314033e48fbSTejun Heo 2315fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 2316fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 2317fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 2318033e48fbSTejun Heo 2319fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 2320fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 2321fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 2322fd1e8a1fSTejun Heo 2323fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 2324fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 2325fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 2326fd1e8a1fSTejun Heo 2327fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 2328fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 2329fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 2330fd1e8a1fSTejun Heo if (!(alloc % apl)) { 23311170532bSJoe Perches pr_cont("\n"); 2332fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 2333033e48fbSTejun Heo } 23341170532bSJoe Perches pr_cont("[%0*d] ", group_width, group); 2335fd1e8a1fSTejun Heo 2336fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 2337fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 23381170532bSJoe Perches pr_cont("%0*d ", 23391170532bSJoe Perches cpu_width, gi->cpu_map[unit]); 2340033e48fbSTejun Heo else 23411170532bSJoe Perches pr_cont("%s ", empty_str); 2342033e48fbSTejun Heo } 2343fd1e8a1fSTejun Heo } 23441170532bSJoe Perches pr_cont("\n"); 2345033e48fbSTejun Heo } 2346033e48fbSTejun Heo 2347fbf59bc9STejun Heo /** 23488d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 2349fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 235038a6be52STejun Heo * @base_addr: mapped address 2351fbf59bc9STejun Heo * 23528d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 235369ab285bSChristophe JAILLET * percpu area. This function is to be called from arch percpu area 235438a6be52STejun Heo * setup path. 23558d408b4bSTejun Heo * 2356fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 2357fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 23588d408b4bSTejun Heo * 2359fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 2360fd1e8a1fSTejun Heo * 2361fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 2362edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 2363edcb4639STejun Heo * the first chunk such that it's available only through reserved 2364edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 2365edcb4639STejun Heo * static areas on architectures where the addressing model has 2366edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 2367edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 2368edcb4639STejun Heo * 2369fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 2370fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 2371fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 23726074d5b0STejun Heo * 2373fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 2374fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 2375fd1e8a1fSTejun Heo * @ai->dyn_size. 23768d408b4bSTejun Heo * 2377fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 2378fd1e8a1fSTejun Heo * for vm areas. 23798d408b4bSTejun Heo * 2380fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 2381fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 2382fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 2383fd1e8a1fSTejun Heo * 2384fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 2385fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 2386fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 2387fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 2388fd1e8a1fSTejun Heo * all units is assumed. 23898d408b4bSTejun Heo * 239038a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 239138a6be52STejun Heo * copied static data to each unit. 2392fbf59bc9STejun Heo * 2393c0ebfdc3SDennis Zhou (Facebook) * The first chunk will always contain a static and a dynamic region. 2394c0ebfdc3SDennis Zhou (Facebook) * However, the static region is not managed by any chunk. If the first 2395c0ebfdc3SDennis Zhou (Facebook) * chunk also contains a reserved region, it is served by two chunks - 2396c0ebfdc3SDennis Zhou (Facebook) * one for the reserved region and one for the dynamic region. They 2397c0ebfdc3SDennis Zhou (Facebook) * share the same vm, but use offset regions in the area allocation map. 2398c0ebfdc3SDennis Zhou (Facebook) * The chunk serving the dynamic region is circulated in the chunk slots 2399c0ebfdc3SDennis Zhou (Facebook) * and available for dynamic allocation like any other chunk. 2400fbf59bc9STejun Heo */ 2401163fa234SKefeng Wang void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 2402fd1e8a1fSTejun Heo void *base_addr) 2403fbf59bc9STejun Heo { 2404b9c39442SDennis Zhou (Facebook) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2405d2f3c384SDennis Zhou (Facebook) size_t static_size, dyn_size; 24060c4169c3SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 24076563297cSTejun Heo unsigned long *group_offsets; 24086563297cSTejun Heo size_t *group_sizes; 2409fb435d52STejun Heo unsigned long *unit_off; 2410fbf59bc9STejun Heo unsigned int cpu; 2411fd1e8a1fSTejun Heo int *unit_map; 2412fd1e8a1fSTejun Heo int group, unit, i; 2413c0ebfdc3SDennis Zhou (Facebook) int map_size; 2414c0ebfdc3SDennis Zhou (Facebook) unsigned long tmp_addr; 2415f655f405SMike Rapoport size_t alloc_size; 24163c7be18aSRoman Gushchin enum pcpu_chunk_type type; 2417fbf59bc9STejun Heo 2418635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 2419635b75fcSTejun Heo if (unlikely(cond)) { \ 2420870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \ 2421870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \ 2422807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \ 2423635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 2424635b75fcSTejun Heo BUG(); \ 2425635b75fcSTejun Heo } \ 2426635b75fcSTejun Heo } while (0) 2427635b75fcSTejun Heo 24282f39e637STejun Heo /* sanity checks */ 2429635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 2430bbddff05STejun Heo #ifdef CONFIG_SMP 2431635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 2432f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 2433bbddff05STejun Heo #endif 2434635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 2435f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 2436635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 2437f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 2438635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 2439ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); 2440099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 2441fb29a2ccSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!ai->dyn_size); 2442d2f3c384SDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); 2443ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || 2444ca460b3cSDennis Zhou (Facebook) IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); 24459f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 24468d408b4bSTejun Heo 24476563297cSTejun Heo /* process group information and build config tables accordingly */ 2448f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_offsets[0]); 2449f655f405SMike Rapoport group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2450f655f405SMike Rapoport if (!group_offsets) 2451f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2452f655f405SMike Rapoport alloc_size); 2453f655f405SMike Rapoport 2454f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_sizes[0]); 2455f655f405SMike Rapoport group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2456f655f405SMike Rapoport if (!group_sizes) 2457f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2458f655f405SMike Rapoport alloc_size); 2459f655f405SMike Rapoport 2460f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_map[0]); 2461f655f405SMike Rapoport unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2462f655f405SMike Rapoport if (!unit_map) 2463f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2464f655f405SMike Rapoport alloc_size); 2465f655f405SMike Rapoport 2466f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_off[0]); 2467f655f405SMike Rapoport unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2468f655f405SMike Rapoport if (!unit_off) 2469f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2470f655f405SMike Rapoport alloc_size); 24712f39e637STejun Heo 2472fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2473ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 2474a855b84cSTejun Heo 2475a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 2476a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 24772f39e637STejun Heo 2478fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 2479fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 24802f39e637STejun Heo 24816563297cSTejun Heo group_offsets[group] = gi->base_offset; 24826563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 24836563297cSTejun Heo 2484fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 2485fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 2486fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 2487fd1e8a1fSTejun Heo continue; 2488fd1e8a1fSTejun Heo 24899f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 2490635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 2491635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 2492fd1e8a1fSTejun Heo 2493fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 2494fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 2495fb435d52STejun Heo 2496a855b84cSTejun Heo /* determine low/high unit_cpu */ 2497a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 2498a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 2499a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 2500a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 2501a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 2502a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 25030fc0531eSLinus Torvalds } 25040fc0531eSLinus Torvalds } 2505fd1e8a1fSTejun Heo pcpu_nr_units = unit; 25062f39e637STejun Heo 25072f39e637STejun Heo for_each_possible_cpu(cpu) 2508635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 2509635b75fcSTejun Heo 2510635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 2511635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 2512bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 25132f39e637STejun Heo 25146563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 25156563297cSTejun Heo pcpu_group_offsets = group_offsets; 25166563297cSTejun Heo pcpu_group_sizes = group_sizes; 2517fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 2518fb435d52STejun Heo pcpu_unit_offsets = unit_off; 25192f39e637STejun Heo 25202f39e637STejun Heo /* determine basic parameters */ 2521fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2522d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 25236563297cSTejun Heo pcpu_atom_size = ai->atom_size; 2524*61cf93d3SDennis Zhou pcpu_chunk_struct_size = struct_size(chunk, populated, 2525*61cf93d3SDennis Zhou BITS_TO_LONGS(pcpu_unit_pages)); 2526cafe8816STejun Heo 252730a5b536SDennis Zhou pcpu_stats_save_ai(ai); 252830a5b536SDennis Zhou 2529d9b55eebSTejun Heo /* 2530d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 2531d9b55eebSTejun Heo * empty chunks. 2532d9b55eebSTejun Heo */ 2533d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 25343c7be18aSRoman Gushchin pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * 25353c7be18aSRoman Gushchin sizeof(pcpu_chunk_lists[0]) * 25363c7be18aSRoman Gushchin PCPU_NR_CHUNK_TYPES, 25377e1c4e27SMike Rapoport SMP_CACHE_BYTES); 25383c7be18aSRoman Gushchin if (!pcpu_chunk_lists) 2539f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 25403c7be18aSRoman Gushchin pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) * 25413c7be18aSRoman Gushchin PCPU_NR_CHUNK_TYPES); 25423c7be18aSRoman Gushchin 25433c7be18aSRoman Gushchin for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) 2544fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 25453c7be18aSRoman Gushchin INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]); 2546fbf59bc9STejun Heo 2547edcb4639STejun Heo /* 2548d2f3c384SDennis Zhou (Facebook) * The end of the static region needs to be aligned with the 2549d2f3c384SDennis Zhou (Facebook) * minimum allocation size as this offsets the reserved and 2550d2f3c384SDennis Zhou (Facebook) * dynamic region. The first chunk ends page aligned by 2551d2f3c384SDennis Zhou (Facebook) * expanding the dynamic region, therefore the dynamic region 2552d2f3c384SDennis Zhou (Facebook) * can be shrunk to compensate while still staying above the 2553d2f3c384SDennis Zhou (Facebook) * configured sizes. 2554d2f3c384SDennis Zhou (Facebook) */ 2555d2f3c384SDennis Zhou (Facebook) static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); 2556d2f3c384SDennis Zhou (Facebook) dyn_size = ai->dyn_size - (static_size - ai->static_size); 2557d2f3c384SDennis Zhou (Facebook) 2558d2f3c384SDennis Zhou (Facebook) /* 2559c0ebfdc3SDennis Zhou (Facebook) * Initialize first chunk. 2560c0ebfdc3SDennis Zhou (Facebook) * If the reserved_size is non-zero, this initializes the reserved 2561c0ebfdc3SDennis Zhou (Facebook) * chunk. If the reserved_size is zero, the reserved chunk is NULL 2562c0ebfdc3SDennis Zhou (Facebook) * and the dynamic region is initialized here. The first chunk, 2563c0ebfdc3SDennis Zhou (Facebook) * pcpu_first_chunk, will always point to the chunk that serves 2564c0ebfdc3SDennis Zhou (Facebook) * the dynamic region. 2565edcb4639STejun Heo */ 2566d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size; 2567d2f3c384SDennis Zhou (Facebook) map_size = ai->reserved_size ?: dyn_size; 256840064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 256961ace7faSTejun Heo 2570edcb4639STejun Heo /* init dynamic chunk if necessary */ 2571b9c39442SDennis Zhou (Facebook) if (ai->reserved_size) { 25720c4169c3SDennis Zhou (Facebook) pcpu_reserved_chunk = chunk; 2573b9c39442SDennis Zhou (Facebook) 2574d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size + 2575c0ebfdc3SDennis Zhou (Facebook) ai->reserved_size; 2576d2f3c384SDennis Zhou (Facebook) map_size = dyn_size; 257740064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2578edcb4639STejun Heo } 2579edcb4639STejun Heo 25802441d15cSTejun Heo /* link the first chunk in */ 25810c4169c3SDennis Zhou (Facebook) pcpu_first_chunk = chunk; 25820cecf50cSDennis Zhou (Facebook) pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; 2583ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 2584fbf59bc9STejun Heo 25857e8a6304SDennis Zhou (Facebook) /* include all regions of the first chunk */ 25867e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += PFN_DOWN(size_sum); 25877e8a6304SDennis Zhou (Facebook) 258830a5b536SDennis Zhou pcpu_stats_chunk_alloc(); 2589df95e795SDennis Zhou trace_percpu_create_chunk(base_addr); 259030a5b536SDennis Zhou 2591fbf59bc9STejun Heo /* we're done */ 2592bba174f5STejun Heo pcpu_base_addr = base_addr; 2593fbf59bc9STejun Heo } 259466c3a757STejun Heo 2595bbddff05STejun Heo #ifdef CONFIG_SMP 2596bbddff05STejun Heo 259717f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 2598f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 2599f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 2600f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 2601f58dc01bSTejun Heo }; 260266c3a757STejun Heo 2603f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 2604f58dc01bSTejun Heo 2605f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 260666c3a757STejun Heo { 26075479c78aSCyrill Gorcunov if (!str) 26085479c78aSCyrill Gorcunov return -EINVAL; 26095479c78aSCyrill Gorcunov 2610f58dc01bSTejun Heo if (0) 2611f58dc01bSTejun Heo /* nada */; 2612f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 2613f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 2614f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 2615f58dc01bSTejun Heo #endif 2616f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2617f58dc01bSTejun Heo else if (!strcmp(str, "page")) 2618f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 2619f58dc01bSTejun Heo #endif 2620f58dc01bSTejun Heo else 2621870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str); 262266c3a757STejun Heo 2623f58dc01bSTejun Heo return 0; 262466c3a757STejun Heo } 2625f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 262666c3a757STejun Heo 26273c9a024fSTejun Heo /* 26283c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 26293c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 26303c9a024fSTejun Heo * to be used. 26313c9a024fSTejun Heo */ 263208fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 263308fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 26343c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 26353c9a024fSTejun Heo #endif 26363c9a024fSTejun Heo 26373c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 26383c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 26393c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 26403c9a024fSTejun Heo #endif 26413c9a024fSTejun Heo 26423c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 26433c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 26443c9a024fSTejun Heo /** 2645fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 2646fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2647fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2648fbf59bc9STejun Heo * @atom_size: allocation atom size 2649fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2650fbf59bc9STejun Heo * 2651fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 2652fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 2653fbf59bc9STejun Heo * atom size and distances between CPUs. 2654fbf59bc9STejun Heo * 2655bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of 2656fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 2657fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 2658fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 2659fbf59bc9STejun Heo * of allocated virtual address space. 2660fbf59bc9STejun Heo * 2661fbf59bc9STejun Heo * RETURNS: 2662fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 2663fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 2664fbf59bc9STejun Heo */ 2665fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 2666fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 2667fbf59bc9STejun Heo size_t atom_size, 2668fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 2669fbf59bc9STejun Heo { 2670fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 2671fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 2672fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 2673fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 2674fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 26753f649ab7SKees Cook int upa, max_upa, best_upa; /* units_per_alloc */ 2676fbf59bc9STejun Heo int last_allocs, group, unit; 2677fbf59bc9STejun Heo unsigned int cpu, tcpu; 2678fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 2679fbf59bc9STejun Heo unsigned int *cpu_map; 2680fbf59bc9STejun Heo 2681fbf59bc9STejun Heo /* this function may be called multiple times */ 2682fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 2683fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 2684fbf59bc9STejun Heo 2685fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 2686fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 2687fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 2688fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 2689fbf59bc9STejun Heo 2690fbf59bc9STejun Heo /* 2691fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 2692fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 269325985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 2694fbf59bc9STejun Heo * or larger than min_unit_size. 2695fbf59bc9STejun Heo */ 2696fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 2697fbf59bc9STejun Heo 26989c015162SDennis Zhou (Facebook) /* determine the maximum # of units that can fit in an allocation */ 2699fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 2700fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 2701f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2702fbf59bc9STejun Heo upa--; 2703fbf59bc9STejun Heo max_upa = upa; 2704fbf59bc9STejun Heo 2705fbf59bc9STejun Heo /* group cpus according to their proximity */ 2706fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 2707fbf59bc9STejun Heo group = 0; 2708fbf59bc9STejun Heo next_group: 2709fbf59bc9STejun Heo for_each_possible_cpu(tcpu) { 2710fbf59bc9STejun Heo if (cpu == tcpu) 2711fbf59bc9STejun Heo break; 2712fbf59bc9STejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 2713fbf59bc9STejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 2714fbf59bc9STejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 2715fbf59bc9STejun Heo group++; 2716fbf59bc9STejun Heo nr_groups = max(nr_groups, group + 1); 2717fbf59bc9STejun Heo goto next_group; 2718fbf59bc9STejun Heo } 2719fbf59bc9STejun Heo } 2720fbf59bc9STejun Heo group_map[cpu] = group; 2721fbf59bc9STejun Heo group_cnt[group]++; 2722fbf59bc9STejun Heo } 2723fbf59bc9STejun Heo 2724fbf59bc9STejun Heo /* 27259c015162SDennis Zhou (Facebook) * Wasted space is caused by a ratio imbalance of upa to group_cnt. 27269c015162SDennis Zhou (Facebook) * Expand the unit_size until we use >= 75% of the units allocated. 27279c015162SDennis Zhou (Facebook) * Related to atom_size, which could be much larger than the unit_size. 2728fbf59bc9STejun Heo */ 2729fbf59bc9STejun Heo last_allocs = INT_MAX; 2730fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 2731fbf59bc9STejun Heo int allocs = 0, wasted = 0; 2732fbf59bc9STejun Heo 2733f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2734fbf59bc9STejun Heo continue; 2735fbf59bc9STejun Heo 2736fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2737fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 2738fbf59bc9STejun Heo allocs += this_allocs; 2739fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 2740fbf59bc9STejun Heo } 2741fbf59bc9STejun Heo 2742fbf59bc9STejun Heo /* 2743fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 2744fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 2745fbf59bc9STejun Heo * passes the following check. 2746fbf59bc9STejun Heo */ 2747fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 2748fbf59bc9STejun Heo continue; 2749fbf59bc9STejun Heo 2750fbf59bc9STejun Heo /* and then don't consume more memory */ 2751fbf59bc9STejun Heo if (allocs > last_allocs) 2752fbf59bc9STejun Heo break; 2753fbf59bc9STejun Heo last_allocs = allocs; 2754fbf59bc9STejun Heo best_upa = upa; 2755fbf59bc9STejun Heo } 2756fbf59bc9STejun Heo upa = best_upa; 2757fbf59bc9STejun Heo 2758fbf59bc9STejun Heo /* allocate and fill alloc_info */ 2759fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 2760fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 2761fbf59bc9STejun Heo 2762fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 2763fbf59bc9STejun Heo if (!ai) 2764fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 2765fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 2766fbf59bc9STejun Heo 2767fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2768fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 2769fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 2770fbf59bc9STejun Heo } 2771fbf59bc9STejun Heo 2772fbf59bc9STejun Heo ai->static_size = static_size; 2773fbf59bc9STejun Heo ai->reserved_size = reserved_size; 2774fbf59bc9STejun Heo ai->dyn_size = dyn_size; 2775fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 2776fbf59bc9STejun Heo ai->atom_size = atom_size; 2777fbf59bc9STejun Heo ai->alloc_size = alloc_size; 2778fbf59bc9STejun Heo 27792de7852fSPeng Fan for (group = 0, unit = 0; group < nr_groups; group++) { 2780fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2781fbf59bc9STejun Heo 2782fbf59bc9STejun Heo /* 2783fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 2784fbf59bc9STejun Heo * back-to-back. The caller should update this to 2785fbf59bc9STejun Heo * reflect actual allocation. 2786fbf59bc9STejun Heo */ 2787fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 2788fbf59bc9STejun Heo 2789fbf59bc9STejun Heo for_each_possible_cpu(cpu) 2790fbf59bc9STejun Heo if (group_map[cpu] == group) 2791fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 2792fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 2793fbf59bc9STejun Heo unit += gi->nr_units; 2794fbf59bc9STejun Heo } 2795fbf59bc9STejun Heo BUG_ON(unit != nr_units); 2796fbf59bc9STejun Heo 2797fbf59bc9STejun Heo return ai; 2798fbf59bc9STejun Heo } 27993c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 2800fbf59bc9STejun Heo 28013c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 280266c3a757STejun Heo /** 280366c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 280466c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 28054ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2806c8826dd5STejun Heo * @atom_size: allocation atom size 2807c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2808c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 280925985edcSLucas De Marchi * @free_fn: function to free percpu page 281066c3a757STejun Heo * 281166c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 281266c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 281366c3a757STejun Heo * 281466c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 2815c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 2816c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 2817c8826dd5STejun Heo * aligned to @atom_size. 2818c8826dd5STejun Heo * 2819c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 2820c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 2821c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 2822c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 2823c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 2824c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 282566c3a757STejun Heo * 28264ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 282766c3a757STejun Heo * 282866c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 2829c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 283066c3a757STejun Heo * 283166c3a757STejun Heo * RETURNS: 2832fb435d52STejun Heo * 0 on success, -errno on failure. 283366c3a757STejun Heo */ 28344ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 2835c8826dd5STejun Heo size_t atom_size, 2836c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 2837c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2838c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 283966c3a757STejun Heo { 2840c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 2841c8826dd5STejun Heo void **areas = NULL; 2842fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 284393c76b6bSzijun_hu size_t size_sum, areas_size; 284493c76b6bSzijun_hu unsigned long max_distance; 2845163fa234SKefeng Wang int group, i, highest_group, rc = 0; 284666c3a757STejun Heo 2847c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 2848c8826dd5STejun Heo cpu_distance_fn); 2849fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2850fd1e8a1fSTejun Heo return PTR_ERR(ai); 285166c3a757STejun Heo 2852fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2853c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 285466c3a757STejun Heo 285526fb3daeSMike Rapoport areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); 2856c8826dd5STejun Heo if (!areas) { 2857fb435d52STejun Heo rc = -ENOMEM; 2858c8826dd5STejun Heo goto out_free; 2859fa8a7094STejun Heo } 286066c3a757STejun Heo 28619b739662Szijun_hu /* allocate, copy and determine base address & max_distance */ 28629b739662Szijun_hu highest_group = 0; 2863c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2864c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2865c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 2866c8826dd5STejun Heo void *ptr; 286766c3a757STejun Heo 2868c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 2869c8826dd5STejun Heo cpu = gi->cpu_map[i]; 2870c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 2871c8826dd5STejun Heo 2872c8826dd5STejun Heo /* allocate space for the whole group */ 2873c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 2874c8826dd5STejun Heo if (!ptr) { 2875c8826dd5STejun Heo rc = -ENOMEM; 2876c8826dd5STejun Heo goto out_free_areas; 2877c8826dd5STejun Heo } 2878f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2879f528f0b8SCatalin Marinas kmemleak_free(ptr); 2880c8826dd5STejun Heo areas[group] = ptr; 2881c8826dd5STejun Heo 2882c8826dd5STejun Heo base = min(ptr, base); 28839b739662Szijun_hu if (ptr > areas[highest_group]) 28849b739662Szijun_hu highest_group = group; 28859b739662Szijun_hu } 28869b739662Szijun_hu max_distance = areas[highest_group] - base; 28879b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 28889b739662Szijun_hu 28899b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */ 28909b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) { 28919b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 28929b739662Szijun_hu max_distance, VMALLOC_TOTAL); 28939b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 28949b739662Szijun_hu /* and fail if we have fallback */ 28959b739662Szijun_hu rc = -EINVAL; 28969b739662Szijun_hu goto out_free_areas; 28979b739662Szijun_hu #endif 289842b64281STejun Heo } 289942b64281STejun Heo 290042b64281STejun Heo /* 290142b64281STejun Heo * Copy data and free unused parts. This should happen after all 290242b64281STejun Heo * allocations are complete; otherwise, we may end up with 290342b64281STejun Heo * overlapping groups. 290442b64281STejun Heo */ 290542b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 290642b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 290742b64281STejun Heo void *ptr = areas[group]; 2908c8826dd5STejun Heo 2909c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 2910c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 2911c8826dd5STejun Heo /* unused unit, free whole */ 2912c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 2913c8826dd5STejun Heo continue; 2914c8826dd5STejun Heo } 2915c8826dd5STejun Heo /* copy and return the unused part */ 2916fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 2917c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 2918c8826dd5STejun Heo } 291966c3a757STejun Heo } 292066c3a757STejun Heo 2921c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 29226ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2923c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 29246ea529a2STejun Heo } 2925c8826dd5STejun Heo 292600206a69SMatteo Croce pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", 292700206a69SMatteo Croce PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, 2928fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 292966c3a757STejun Heo 2930163fa234SKefeng Wang pcpu_setup_first_chunk(ai, base); 2931c8826dd5STejun Heo goto out_free; 2932c8826dd5STejun Heo 2933c8826dd5STejun Heo out_free_areas: 2934c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 2935f851c8d8SMichael Holzheu if (areas[group]) 2936c8826dd5STejun Heo free_fn(areas[group], 2937c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 2938c8826dd5STejun Heo out_free: 2939fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2940c8826dd5STejun Heo if (areas) 2941999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 2942fb435d52STejun Heo return rc; 2943d4b95f80STejun Heo } 29443c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 2945d4b95f80STejun Heo 29463c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 2947d4b95f80STejun Heo /** 294800ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2949d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2950d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 295125985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 2952d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 2953d4b95f80STejun Heo * 295400ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 295500ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 2956d4b95f80STejun Heo * 2957d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 2958d4b95f80STejun Heo * page-by-page into vmalloc area. 2959d4b95f80STejun Heo * 2960d4b95f80STejun Heo * RETURNS: 2961fb435d52STejun Heo * 0 on success, -errno on failure. 2962d4b95f80STejun Heo */ 2963fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 2964d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2965d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 2966d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 2967d4b95f80STejun Heo { 29688f05a6a6STejun Heo static struct vm_struct vm; 2969fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 297000ae4064STejun Heo char psize_str[16]; 2971ce3141a2STejun Heo int unit_pages; 2972d4b95f80STejun Heo size_t pages_size; 2973ce3141a2STejun Heo struct page **pages; 2974163fa234SKefeng Wang int unit, i, j, rc = 0; 29758f606604Szijun_hu int upa; 29768f606604Szijun_hu int nr_g0_units; 2977d4b95f80STejun Heo 297800ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 297900ae4064STejun Heo 29804ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 2981fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2982fd1e8a1fSTejun Heo return PTR_ERR(ai); 2983fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 29848f606604Szijun_hu upa = ai->alloc_size/ai->unit_size; 29858f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa); 29860b59c25fSIgor Stoppa if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { 29878f606604Szijun_hu pcpu_free_alloc_info(ai); 29888f606604Szijun_hu return -EINVAL; 29898f606604Szijun_hu } 2990fd1e8a1fSTejun Heo 2991fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 2992d4b95f80STejun Heo 2993d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 2994fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2995fd1e8a1fSTejun Heo sizeof(pages[0])); 29967e1c4e27SMike Rapoport pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 2997f655f405SMike Rapoport if (!pages) 2998f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2999f655f405SMike Rapoport pages_size); 3000d4b95f80STejun Heo 30018f05a6a6STejun Heo /* allocate pages */ 3002d4b95f80STejun Heo j = 0; 30038f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) { 3004fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 30058f606604Szijun_hu for (i = 0; i < unit_pages; i++) { 3006d4b95f80STejun Heo void *ptr; 3007d4b95f80STejun Heo 30083cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 3009d4b95f80STejun Heo if (!ptr) { 3010870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n", 3011598d8091SJoe Perches psize_str, cpu); 3012d4b95f80STejun Heo goto enomem; 3013d4b95f80STejun Heo } 3014f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3015f528f0b8SCatalin Marinas kmemleak_free(ptr); 3016ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 3017d4b95f80STejun Heo } 30188f606604Szijun_hu } 3019d4b95f80STejun Heo 30208f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 30218f05a6a6STejun Heo vm.flags = VM_ALLOC; 3022fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 30238f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 30248f05a6a6STejun Heo 3025fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 30261d9d3257STejun Heo unsigned long unit_addr = 3027fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 30288f05a6a6STejun Heo 3029ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 30308f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 30318f05a6a6STejun Heo 30328f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 3033fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 3034ce3141a2STejun Heo unit_pages); 3035fb435d52STejun Heo if (rc < 0) 3036fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 30378f05a6a6STejun Heo 30388f05a6a6STejun Heo /* 30398f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 30408f05a6a6STejun Heo * cache for the linear mapping here - something 30418f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 30428f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 30438f05a6a6STejun Heo * data structures are not set up yet. 30448f05a6a6STejun Heo */ 30458f05a6a6STejun Heo 30468f05a6a6STejun Heo /* copy static data */ 3047fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 304866c3a757STejun Heo } 304966c3a757STejun Heo 305066c3a757STejun Heo /* we're ready, commit */ 305100206a69SMatteo Croce pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", 305200206a69SMatteo Croce unit_pages, psize_str, ai->static_size, 3053fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 305466c3a757STejun Heo 3055163fa234SKefeng Wang pcpu_setup_first_chunk(ai, vm.addr); 3056d4b95f80STejun Heo goto out_free_ar; 3057d4b95f80STejun Heo 3058d4b95f80STejun Heo enomem: 3059d4b95f80STejun Heo while (--j >= 0) 3060ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 3061fb435d52STejun Heo rc = -ENOMEM; 3062d4b95f80STejun Heo out_free_ar: 3063999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 3064fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 3065fb435d52STejun Heo return rc; 306666c3a757STejun Heo } 30673c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 3068d4b95f80STejun Heo 3069bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 30708c4bfc6eSTejun Heo /* 3071bbddff05STejun Heo * Generic SMP percpu area setup. 3072e74e3962STejun Heo * 3073e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 3074e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 3075e74e3962STejun Heo * important because many archs have addressing restrictions and might 3076e74e3962STejun Heo * fail if the percpu area is located far away from the previous 3077e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 3078e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 3079e74e3962STejun Heo * on the physical linear memory mapping which uses large page 3080e74e3962STejun Heo * mappings on applicable archs. 3081e74e3962STejun Heo */ 3082e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 3083e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 3084e74e3962STejun Heo 3085c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 3086c8826dd5STejun Heo size_t align) 3087c8826dd5STejun Heo { 308826fb3daeSMike Rapoport return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS)); 3089c8826dd5STejun Heo } 3090c8826dd5STejun Heo 3091c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 3092c8826dd5STejun Heo { 3093999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 3094c8826dd5STejun Heo } 3095c8826dd5STejun Heo 3096e74e3962STejun Heo void __init setup_per_cpu_areas(void) 3097e74e3962STejun Heo { 3098e74e3962STejun Heo unsigned long delta; 3099e74e3962STejun Heo unsigned int cpu; 3100fb435d52STejun Heo int rc; 3101e74e3962STejun Heo 3102e74e3962STejun Heo /* 3103e74e3962STejun Heo * Always reserve area for module percpu variables. That's 3104e74e3962STejun Heo * what the legacy allocator did. 3105e74e3962STejun Heo */ 3106fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 3107c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 3108c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 3109fb435d52STejun Heo if (rc < 0) 3110bbddff05STejun Heo panic("Failed to initialize percpu areas."); 3111e74e3962STejun Heo 3112e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 3113e74e3962STejun Heo for_each_possible_cpu(cpu) 3114fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 3115e74e3962STejun Heo } 3116e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 3117099a19d9STejun Heo 3118bbddff05STejun Heo #else /* CONFIG_SMP */ 3119bbddff05STejun Heo 3120bbddff05STejun Heo /* 3121bbddff05STejun Heo * UP percpu area setup. 3122bbddff05STejun Heo * 3123bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 3124bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 3125bbddff05STejun Heo * variables and don't require any special preparation. 3126bbddff05STejun Heo */ 3127bbddff05STejun Heo void __init setup_per_cpu_areas(void) 3128bbddff05STejun Heo { 3129bbddff05STejun Heo const size_t unit_size = 3130bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 3131bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 3132bbddff05STejun Heo struct pcpu_alloc_info *ai; 3133bbddff05STejun Heo void *fc; 3134bbddff05STejun Heo 3135bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 313626fb3daeSMike Rapoport fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 3137bbddff05STejun Heo if (!ai || !fc) 3138bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 3139100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3140100d13c3SCatalin Marinas kmemleak_free(fc); 3141bbddff05STejun Heo 3142bbddff05STejun Heo ai->dyn_size = unit_size; 3143bbddff05STejun Heo ai->unit_size = unit_size; 3144bbddff05STejun Heo ai->atom_size = unit_size; 3145bbddff05STejun Heo ai->alloc_size = unit_size; 3146bbddff05STejun Heo ai->groups[0].nr_units = 1; 3147bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 3148bbddff05STejun Heo 3149163fa234SKefeng Wang pcpu_setup_first_chunk(ai, fc); 3150438a5061SNicolas Pitre pcpu_free_alloc_info(ai); 3151bbddff05STejun Heo } 3152bbddff05STejun Heo 3153bbddff05STejun Heo #endif /* CONFIG_SMP */ 3154bbddff05STejun Heo 3155099a19d9STejun Heo /* 31567e8a6304SDennis Zhou (Facebook) * pcpu_nr_pages - calculate total number of populated backing pages 31577e8a6304SDennis Zhou (Facebook) * 31587e8a6304SDennis Zhou (Facebook) * This reflects the number of pages populated to back chunks. Metadata is 31597e8a6304SDennis Zhou (Facebook) * excluded in the number exposed in meminfo as the number of backing pages 31607e8a6304SDennis Zhou (Facebook) * scales with the number of cpus and can quickly outweigh the memory used for 31617e8a6304SDennis Zhou (Facebook) * metadata. It also keeps this calculation nice and simple. 31627e8a6304SDennis Zhou (Facebook) * 31637e8a6304SDennis Zhou (Facebook) * RETURNS: 31647e8a6304SDennis Zhou (Facebook) * Total number of populated backing pages in use by the allocator. 31657e8a6304SDennis Zhou (Facebook) */ 31667e8a6304SDennis Zhou (Facebook) unsigned long pcpu_nr_pages(void) 31677e8a6304SDennis Zhou (Facebook) { 31687e8a6304SDennis Zhou (Facebook) return pcpu_nr_populated * pcpu_nr_units; 31697e8a6304SDennis Zhou (Facebook) } 31707e8a6304SDennis Zhou (Facebook) 31717e8a6304SDennis Zhou (Facebook) /* 31721a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or 31731a4d7607STejun Heo * workqueue is available. Plug async management until everything is up 31741a4d7607STejun Heo * and running. 31751a4d7607STejun Heo */ 31761a4d7607STejun Heo static int __init percpu_enable_async(void) 31771a4d7607STejun Heo { 31781a4d7607STejun Heo pcpu_async_enabled = true; 31791a4d7607STejun Heo return 0; 31801a4d7607STejun Heo } 31811a4d7607STejun Heo subsys_initcall(percpu_enable_async); 3182