155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2fbf59bc9STejun Heo /* 388999a89STejun Heo * mm/percpu.c - percpu memory allocator 4fbf59bc9STejun Heo * 5fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 6fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 7fbf59bc9STejun Heo * 85e81ee3eSDennis Zhou (Facebook) * Copyright (C) 2017 Facebook Inc. 9bfacd38fSDennis Zhou * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> 105e81ee3eSDennis Zhou (Facebook) * 119c015162SDennis Zhou (Facebook) * The percpu allocator handles both static and dynamic areas. Percpu 129c015162SDennis Zhou (Facebook) * areas are allocated in chunks which are divided into units. There is 139c015162SDennis Zhou (Facebook) * a 1-to-1 mapping for units to possible cpus. These units are grouped 149c015162SDennis Zhou (Facebook) * based on NUMA properties of the machine. 15fbf59bc9STejun Heo * 16fbf59bc9STejun Heo * c0 c1 c2 17fbf59bc9STejun Heo * ------------------- ------------------- ------------ 18fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 19fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 20fbf59bc9STejun Heo * 219c015162SDennis Zhou (Facebook) * Allocation is done by offsets into a unit's address space. Ie., an 229c015162SDennis Zhou (Facebook) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 239c015162SDennis Zhou (Facebook) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 249c015162SDennis Zhou (Facebook) * and even sparse. Access is handled by configuring percpu base 259c015162SDennis Zhou (Facebook) * registers according to the cpu to unit mappings and offsetting the 269c015162SDennis Zhou (Facebook) * base address using pcpu_unit_size. 27fbf59bc9STejun Heo * 289c015162SDennis Zhou (Facebook) * There is special consideration for the first chunk which must handle 299c015162SDennis Zhou (Facebook) * the static percpu variables in the kernel image as allocation services 305e81ee3eSDennis Zhou (Facebook) * are not online yet. In short, the first chunk is structured like so: 319c015162SDennis Zhou (Facebook) * 329c015162SDennis Zhou (Facebook) * <Static | [Reserved] | Dynamic> 339c015162SDennis Zhou (Facebook) * 349c015162SDennis Zhou (Facebook) * The static data is copied from the original section managed by the 359c015162SDennis Zhou (Facebook) * linker. The reserved section, if non-zero, primarily manages static 369c015162SDennis Zhou (Facebook) * percpu variables from kernel modules. Finally, the dynamic section 379c015162SDennis Zhou (Facebook) * takes care of normal allocations. 38fbf59bc9STejun Heo * 395e81ee3eSDennis Zhou (Facebook) * The allocator organizes chunks into lists according to free size and 403c7be18aSRoman Gushchin * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT 413c7be18aSRoman Gushchin * flag should be passed. All memcg-aware allocations are sharing one set 423c7be18aSRoman Gushchin * of chunks and all unaccounted allocations and allocations performed 433c7be18aSRoman Gushchin * by processes belonging to the root memory cgroup are using the second set. 443c7be18aSRoman Gushchin * 453c7be18aSRoman Gushchin * The allocator tries to allocate from the fullest chunk first. Each chunk 463c7be18aSRoman Gushchin * is managed by a bitmap with metadata blocks. The allocation map is updated 473c7be18aSRoman Gushchin * on every allocation and free to reflect the current state while the boundary 485e81ee3eSDennis Zhou (Facebook) * map is only updated on allocation. Each metadata block contains 495e81ee3eSDennis Zhou (Facebook) * information to help mitigate the need to iterate over large portions 505e81ee3eSDennis Zhou (Facebook) * of the bitmap. The reverse mapping from page to chunk is stored in 515e81ee3eSDennis Zhou (Facebook) * the page's index. Lastly, units are lazily backed and grow in unison. 52fbf59bc9STejun Heo * 535e81ee3eSDennis Zhou (Facebook) * There is a unique conversion that goes on here between bytes and bits. 545e81ee3eSDennis Zhou (Facebook) * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 555e81ee3eSDennis Zhou (Facebook) * tracks the number of pages it is responsible for in nr_pages. Helper 565e81ee3eSDennis Zhou (Facebook) * functions are used to convert from between the bytes, bits, and blocks. 575e81ee3eSDennis Zhou (Facebook) * All hints are managed in bits unless explicitly stated. 589c015162SDennis Zhou (Facebook) * 594091fb95SMasahiro Yamada * To use this allocator, arch code should do the following: 60fbf59bc9STejun Heo * 61fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 62e0100983STejun Heo * regular address to percpu pointer and back if they need to be 63e0100983STejun Heo * different from the default 64fbf59bc9STejun Heo * 658d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 668d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 67fbf59bc9STejun Heo */ 68fbf59bc9STejun Heo 69870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 70870d4b12SJoe Perches 71fbf59bc9STejun Heo #include <linux/bitmap.h> 72d7d29ac7SWonhyuk Yang #include <linux/cpumask.h> 7357c8a661SMike Rapoport #include <linux/memblock.h> 74fd1e8a1fSTejun Heo #include <linux/err.h> 75fbf59bc9STejun Heo #include <linux/list.h> 76a530b795STejun Heo #include <linux/log2.h> 77fbf59bc9STejun Heo #include <linux/mm.h> 78fbf59bc9STejun Heo #include <linux/module.h> 79fbf59bc9STejun Heo #include <linux/mutex.h> 80fbf59bc9STejun Heo #include <linux/percpu.h> 81fbf59bc9STejun Heo #include <linux/pfn.h> 82fbf59bc9STejun Heo #include <linux/slab.h> 83ccea34b5STejun Heo #include <linux/spinlock.h> 84fbf59bc9STejun Heo #include <linux/vmalloc.h> 85a56dbddfSTejun Heo #include <linux/workqueue.h> 86f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 8771546d10STejun Heo #include <linux/sched.h> 8828307d93SFilipe Manana #include <linux/sched/mm.h> 893c7be18aSRoman Gushchin #include <linux/memcontrol.h> 90fbf59bc9STejun Heo 91fbf59bc9STejun Heo #include <asm/cacheflush.h> 92e0100983STejun Heo #include <asm/sections.h> 93fbf59bc9STejun Heo #include <asm/tlbflush.h> 943b034b0dSVivek Goyal #include <asm/io.h> 95fbf59bc9STejun Heo 96df95e795SDennis Zhou #define CREATE_TRACE_POINTS 97df95e795SDennis Zhou #include <trace/events/percpu.h> 98df95e795SDennis Zhou 998fa3ed80SDennis Zhou #include "percpu-internal.h" 1008fa3ed80SDennis Zhou 101ac9380f6SRoman Gushchin /* 102ac9380f6SRoman Gushchin * The slots are sorted by the size of the biggest continuous free area. 103ac9380f6SRoman Gushchin * 1-31 bytes share the same slot. 104ac9380f6SRoman Gushchin */ 10540064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT 5 1068744d859SDennis Zhou /* chunks in slots below this are subject to being sidelined on failed alloc */ 1078744d859SDennis Zhou #define PCPU_SLOT_FAIL_THRESHOLD 3 10840064aecSDennis Zhou (Facebook) 1091a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2 1101a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4 111fbf59bc9STejun Heo 112bbddff05STejun Heo #ifdef CONFIG_SMP 113e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 114e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 115e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 11643cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 11743cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 11843cf38ebSTejun Heo (unsigned long)__per_cpu_start) 119e0100983STejun Heo #endif 120e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 121e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 12243cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 12343cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 12443cf38ebSTejun Heo (unsigned long)__per_cpu_start) 125e0100983STejun Heo #endif 126bbddff05STejun Heo #else /* CONFIG_SMP */ 127bbddff05STejun Heo /* on UP, it's always identity mapped */ 128bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 129bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 130bbddff05STejun Heo #endif /* CONFIG_SMP */ 131e0100983STejun Heo 1321328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init; 1331328710bSDaniel Micay static int pcpu_unit_size __ro_after_init; 1341328710bSDaniel Micay static int pcpu_nr_units __ro_after_init; 1351328710bSDaniel Micay static int pcpu_atom_size __ro_after_init; 1368fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init; 1378d55ba5dSWei Yongjun static int pcpu_free_slot __ro_after_init; 138f1833241SRoman Gushchin int pcpu_sidelined_slot __ro_after_init; 139f1833241SRoman Gushchin int pcpu_to_depopulate_slot __ro_after_init; 1401328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init; 141fbf59bc9STejun Heo 142a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 1431328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init; 1441328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init; 1452f39e637STejun Heo 146fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 1471328710bSDaniel Micay void *pcpu_base_addr __ro_after_init; 148fbf59bc9STejun Heo 1491328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 1501328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 1512f39e637STejun Heo 1526563297cSTejun Heo /* group information, used for vm allocation */ 1531328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init; 1541328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init; 1551328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init; 1566563297cSTejun Heo 157ae9e6bc9STejun Heo /* 158ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 159ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 160ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 161ae9e6bc9STejun Heo */ 1628fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 163ae9e6bc9STejun Heo 164ae9e6bc9STejun Heo /* 165ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 166e2266705SDennis Zhou (Facebook) * chunk and serves it for reserved allocations. When the reserved 167e2266705SDennis Zhou (Facebook) * region doesn't exist, the following variable is NULL. 168ae9e6bc9STejun Heo */ 1698fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 170edcb4639STejun Heo 1718fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 1726710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 173fbf59bc9STejun Heo 1743c7be18aSRoman Gushchin struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ 175fbf59bc9STejun Heo 176b539b87fSTejun Heo /* 177faf65ddeSRoman Gushchin * The number of empty populated pages, protected by pcpu_lock. 1780760fa3dSRoman Gushchin * The reserved chunk doesn't contribute to the count. 179b539b87fSTejun Heo */ 180faf65ddeSRoman Gushchin int pcpu_nr_empty_pop_pages; 181b539b87fSTejun Heo 1821a4d7607STejun Heo /* 1837e8a6304SDennis Zhou (Facebook) * The number of populated pages in use by the allocator, protected by 1847e8a6304SDennis Zhou (Facebook) * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets 1857e8a6304SDennis Zhou (Facebook) * allocated/deallocated, it is allocated/deallocated in all units of a chunk 1867e8a6304SDennis Zhou (Facebook) * and increments/decrements this count by 1). 1877e8a6304SDennis Zhou (Facebook) */ 1887e8a6304SDennis Zhou (Facebook) static unsigned long pcpu_nr_populated; 1897e8a6304SDennis Zhou (Facebook) 1907e8a6304SDennis Zhou (Facebook) /* 1911a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We 1921a4d7607STejun Heo * try to keep the number of populated free pages between 1931a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 1941a4d7607STejun Heo * empty chunk. 1951a4d7607STejun Heo */ 196fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 197fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 1981a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly; 1991a4d7607STejun Heo static bool pcpu_atomic_alloc_failed; 2001a4d7607STejun Heo 2011a4d7607STejun Heo static void pcpu_schedule_balance_work(void) 2021a4d7607STejun Heo { 2031a4d7607STejun Heo if (pcpu_async_enabled) 2041a4d7607STejun Heo schedule_work(&pcpu_balance_work); 2051a4d7607STejun Heo } 206a56dbddfSTejun Heo 207c0ebfdc3SDennis Zhou (Facebook) /** 208560f2c23SDennis Zhou (Facebook) * pcpu_addr_in_chunk - check if the address is served from this chunk 209560f2c23SDennis Zhou (Facebook) * @chunk: chunk of interest 210560f2c23SDennis Zhou (Facebook) * @addr: percpu address 211c0ebfdc3SDennis Zhou (Facebook) * 212c0ebfdc3SDennis Zhou (Facebook) * RETURNS: 213560f2c23SDennis Zhou (Facebook) * True if the address is served from this chunk. 214c0ebfdc3SDennis Zhou (Facebook) */ 215560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) 216020ec653STejun Heo { 217c0ebfdc3SDennis Zhou (Facebook) void *start_addr, *end_addr; 218020ec653STejun Heo 219560f2c23SDennis Zhou (Facebook) if (!chunk) 220c0ebfdc3SDennis Zhou (Facebook) return false; 221c0ebfdc3SDennis Zhou (Facebook) 222560f2c23SDennis Zhou (Facebook) start_addr = chunk->base_addr + chunk->start_offset; 223560f2c23SDennis Zhou (Facebook) end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - 224560f2c23SDennis Zhou (Facebook) chunk->end_offset; 225c0ebfdc3SDennis Zhou (Facebook) 226c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr; 227020ec653STejun Heo } 228020ec653STejun Heo 229d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 230fbf59bc9STejun Heo { 231cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 232fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 233fbf59bc9STejun Heo } 234fbf59bc9STejun Heo 235d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 236d9b55eebSTejun Heo { 237d9b55eebSTejun Heo if (size == pcpu_unit_size) 2381c29a3ceSDennis Zhou return pcpu_free_slot; 239d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 240d9b55eebSTejun Heo } 241d9b55eebSTejun Heo 242fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 243fbf59bc9STejun Heo { 24492c14cabSDennis Zhou const struct pcpu_block_md *chunk_md = &chunk->chunk_md; 24592c14cabSDennis Zhou 24692c14cabSDennis Zhou if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || 24792c14cabSDennis Zhou chunk_md->contig_hint == 0) 248fbf59bc9STejun Heo return 0; 249fbf59bc9STejun Heo 25092c14cabSDennis Zhou return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); 251fbf59bc9STejun Heo } 252fbf59bc9STejun Heo 25388999a89STejun Heo /* set the pointer to a chunk in a page struct */ 25488999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 25588999a89STejun Heo { 25688999a89STejun Heo page->index = (unsigned long)pcpu; 25788999a89STejun Heo } 25888999a89STejun Heo 25988999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 26088999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 26188999a89STejun Heo { 26288999a89STejun Heo return (struct pcpu_chunk *)page->index; 26388999a89STejun Heo } 26488999a89STejun Heo 26588999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 266fbf59bc9STejun Heo { 2672f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 268fbf59bc9STejun Heo } 269fbf59bc9STejun Heo 270c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 271c0ebfdc3SDennis Zhou (Facebook) { 272c0ebfdc3SDennis Zhou (Facebook) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 273c0ebfdc3SDennis Zhou (Facebook) } 274c0ebfdc3SDennis Zhou (Facebook) 2759983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 276fbf59bc9STejun Heo unsigned int cpu, int page_idx) 277fbf59bc9STejun Heo { 278c0ebfdc3SDennis Zhou (Facebook) return (unsigned long)chunk->base_addr + 279c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(cpu, page_idx); 280fbf59bc9STejun Heo } 281fbf59bc9STejun Heo 282ca460b3cSDennis Zhou (Facebook) /* 283ca460b3cSDennis Zhou (Facebook) * The following are helper functions to help access bitmaps and convert 284ca460b3cSDennis Zhou (Facebook) * between bitmap offsets to address offsets. 285ca460b3cSDennis Zhou (Facebook) */ 286ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) 287ca460b3cSDennis Zhou (Facebook) { 288ca460b3cSDennis Zhou (Facebook) return chunk->alloc_map + 289ca460b3cSDennis Zhou (Facebook) (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); 290ca460b3cSDennis Zhou (Facebook) } 291ca460b3cSDennis Zhou (Facebook) 292ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off) 293ca460b3cSDennis Zhou (Facebook) { 294ca460b3cSDennis Zhou (Facebook) return off / PCPU_BITMAP_BLOCK_BITS; 295ca460b3cSDennis Zhou (Facebook) } 296ca460b3cSDennis Zhou (Facebook) 297ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off) 298ca460b3cSDennis Zhou (Facebook) { 299ca460b3cSDennis Zhou (Facebook) return off & (PCPU_BITMAP_BLOCK_BITS - 1); 300ca460b3cSDennis Zhou (Facebook) } 301ca460b3cSDennis Zhou (Facebook) 302b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off) 303b185cd0dSDennis Zhou (Facebook) { 304b185cd0dSDennis Zhou (Facebook) return index * PCPU_BITMAP_BLOCK_BITS + off; 305b185cd0dSDennis Zhou (Facebook) } 306b185cd0dSDennis Zhou (Facebook) 3078ea2e1e3SRoman Gushchin /** 3088ea2e1e3SRoman Gushchin * pcpu_check_block_hint - check against the contig hint 3098ea2e1e3SRoman Gushchin * @block: block of interest 3108ea2e1e3SRoman Gushchin * @bits: size of allocation 3118ea2e1e3SRoman Gushchin * @align: alignment of area (max PAGE_SIZE) 3128ea2e1e3SRoman Gushchin * 3138ea2e1e3SRoman Gushchin * Check to see if the allocation can fit in the block's contig hint. 3148ea2e1e3SRoman Gushchin * Note, a chunk uses the same hints as a block so this can also check against 3158ea2e1e3SRoman Gushchin * the chunk's contig hint. 3168ea2e1e3SRoman Gushchin */ 3178ea2e1e3SRoman Gushchin static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits, 3188ea2e1e3SRoman Gushchin size_t align) 3198ea2e1e3SRoman Gushchin { 3208ea2e1e3SRoman Gushchin int bit_off = ALIGN(block->contig_hint_start, align) - 3218ea2e1e3SRoman Gushchin block->contig_hint_start; 3228ea2e1e3SRoman Gushchin 3238ea2e1e3SRoman Gushchin return bit_off + bits <= block->contig_hint; 3248ea2e1e3SRoman Gushchin } 3258ea2e1e3SRoman Gushchin 326382b88e9SDennis Zhou /* 327382b88e9SDennis Zhou * pcpu_next_hint - determine which hint to use 328382b88e9SDennis Zhou * @block: block of interest 329382b88e9SDennis Zhou * @alloc_bits: size of allocation 330382b88e9SDennis Zhou * 331382b88e9SDennis Zhou * This determines if we should scan based on the scan_hint or first_free. 332382b88e9SDennis Zhou * In general, we want to scan from first_free to fulfill allocations by 333382b88e9SDennis Zhou * first fit. However, if we know a scan_hint at position scan_hint_start 334382b88e9SDennis Zhou * cannot fulfill an allocation, we can begin scanning from there knowing 335382b88e9SDennis Zhou * the contig_hint will be our fallback. 336382b88e9SDennis Zhou */ 337382b88e9SDennis Zhou static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) 338382b88e9SDennis Zhou { 339382b88e9SDennis Zhou /* 340382b88e9SDennis Zhou * The three conditions below determine if we can skip past the 341382b88e9SDennis Zhou * scan_hint. First, does the scan hint exist. Second, is the 342382b88e9SDennis Zhou * contig_hint after the scan_hint (possibly not true iff 343382b88e9SDennis Zhou * contig_hint == scan_hint). Third, is the allocation request 344382b88e9SDennis Zhou * larger than the scan_hint. 345382b88e9SDennis Zhou */ 346382b88e9SDennis Zhou if (block->scan_hint && 347382b88e9SDennis Zhou block->contig_hint_start > block->scan_hint_start && 348382b88e9SDennis Zhou alloc_bits > block->scan_hint) 349382b88e9SDennis Zhou return block->scan_hint_start + block->scan_hint; 350382b88e9SDennis Zhou 351382b88e9SDennis Zhou return block->first_free; 352382b88e9SDennis Zhou } 353382b88e9SDennis Zhou 354fbf59bc9STejun Heo /** 355525ca84dSDennis Zhou (Facebook) * pcpu_next_md_free_region - finds the next hint free area 356525ca84dSDennis Zhou (Facebook) * @chunk: chunk of interest 357525ca84dSDennis Zhou (Facebook) * @bit_off: chunk offset 358525ca84dSDennis Zhou (Facebook) * @bits: size of free area 359525ca84dSDennis Zhou (Facebook) * 360525ca84dSDennis Zhou (Facebook) * Helper function for pcpu_for_each_md_free_region. It checks 361525ca84dSDennis Zhou (Facebook) * block->contig_hint and performs aggregation across blocks to find the 362525ca84dSDennis Zhou (Facebook) * next hint. It modifies bit_off and bits in-place to be consumed in the 363525ca84dSDennis Zhou (Facebook) * loop. 364525ca84dSDennis Zhou (Facebook) */ 365525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, 366525ca84dSDennis Zhou (Facebook) int *bits) 367525ca84dSDennis Zhou (Facebook) { 368525ca84dSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 369525ca84dSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 370525ca84dSDennis Zhou (Facebook) struct pcpu_block_md *block; 371525ca84dSDennis Zhou (Facebook) 372525ca84dSDennis Zhou (Facebook) *bits = 0; 373525ca84dSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 374525ca84dSDennis Zhou (Facebook) block++, i++) { 375525ca84dSDennis Zhou (Facebook) /* handles contig area across blocks */ 376525ca84dSDennis Zhou (Facebook) if (*bits) { 377525ca84dSDennis Zhou (Facebook) *bits += block->left_free; 378525ca84dSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 379525ca84dSDennis Zhou (Facebook) continue; 380525ca84dSDennis Zhou (Facebook) return; 381525ca84dSDennis Zhou (Facebook) } 382525ca84dSDennis Zhou (Facebook) 383525ca84dSDennis Zhou (Facebook) /* 384525ca84dSDennis Zhou (Facebook) * This checks three things. First is there a contig_hint to 385525ca84dSDennis Zhou (Facebook) * check. Second, have we checked this hint before by 386525ca84dSDennis Zhou (Facebook) * comparing the block_off. Third, is this the same as the 387525ca84dSDennis Zhou (Facebook) * right contig hint. In the last case, it spills over into 388525ca84dSDennis Zhou (Facebook) * the next block and should be handled by the contig area 389525ca84dSDennis Zhou (Facebook) * across blocks code. 390525ca84dSDennis Zhou (Facebook) */ 391525ca84dSDennis Zhou (Facebook) *bits = block->contig_hint; 392525ca84dSDennis Zhou (Facebook) if (*bits && block->contig_hint_start >= block_off && 393525ca84dSDennis Zhou (Facebook) *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { 394525ca84dSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, 395525ca84dSDennis Zhou (Facebook) block->contig_hint_start); 396525ca84dSDennis Zhou (Facebook) return; 397525ca84dSDennis Zhou (Facebook) } 3981fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 3991fa4df3eSDennis Zhou block_off = 0; 400525ca84dSDennis Zhou (Facebook) 401525ca84dSDennis Zhou (Facebook) *bits = block->right_free; 402525ca84dSDennis Zhou (Facebook) *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 403525ca84dSDennis Zhou (Facebook) } 404525ca84dSDennis Zhou (Facebook) } 405525ca84dSDennis Zhou (Facebook) 406b4c2116cSDennis Zhou (Facebook) /** 407b4c2116cSDennis Zhou (Facebook) * pcpu_next_fit_region - finds fit areas for a given allocation request 408b4c2116cSDennis Zhou (Facebook) * @chunk: chunk of interest 409b4c2116cSDennis Zhou (Facebook) * @alloc_bits: size of allocation 410b4c2116cSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 411b4c2116cSDennis Zhou (Facebook) * @bit_off: chunk offset 412b4c2116cSDennis Zhou (Facebook) * @bits: size of free area 413b4c2116cSDennis Zhou (Facebook) * 414b4c2116cSDennis Zhou (Facebook) * Finds the next free region that is viable for use with a given size and 415b4c2116cSDennis Zhou (Facebook) * alignment. This only returns if there is a valid area to be used for this 416b4c2116cSDennis Zhou (Facebook) * allocation. block->first_free is returned if the allocation request fits 417b4c2116cSDennis Zhou (Facebook) * within the block to see if the request can be fulfilled prior to the contig 418b4c2116cSDennis Zhou (Facebook) * hint. 419b4c2116cSDennis Zhou (Facebook) */ 420b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, 421b4c2116cSDennis Zhou (Facebook) int align, int *bit_off, int *bits) 422b4c2116cSDennis Zhou (Facebook) { 423b4c2116cSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 424b4c2116cSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 425b4c2116cSDennis Zhou (Facebook) struct pcpu_block_md *block; 426b4c2116cSDennis Zhou (Facebook) 427b4c2116cSDennis Zhou (Facebook) *bits = 0; 428b4c2116cSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 429b4c2116cSDennis Zhou (Facebook) block++, i++) { 430b4c2116cSDennis Zhou (Facebook) /* handles contig area across blocks */ 431b4c2116cSDennis Zhou (Facebook) if (*bits) { 432b4c2116cSDennis Zhou (Facebook) *bits += block->left_free; 433b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 434b4c2116cSDennis Zhou (Facebook) return; 435b4c2116cSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 436b4c2116cSDennis Zhou (Facebook) continue; 437b4c2116cSDennis Zhou (Facebook) } 438b4c2116cSDennis Zhou (Facebook) 439b4c2116cSDennis Zhou (Facebook) /* check block->contig_hint */ 440b4c2116cSDennis Zhou (Facebook) *bits = ALIGN(block->contig_hint_start, align) - 441b4c2116cSDennis Zhou (Facebook) block->contig_hint_start; 442b4c2116cSDennis Zhou (Facebook) /* 443b4c2116cSDennis Zhou (Facebook) * This uses the block offset to determine if this has been 444b4c2116cSDennis Zhou (Facebook) * checked in the prior iteration. 445b4c2116cSDennis Zhou (Facebook) */ 446b4c2116cSDennis Zhou (Facebook) if (block->contig_hint && 447b4c2116cSDennis Zhou (Facebook) block->contig_hint_start >= block_off && 448b4c2116cSDennis Zhou (Facebook) block->contig_hint >= *bits + alloc_bits) { 449382b88e9SDennis Zhou int start = pcpu_next_hint(block, alloc_bits); 450382b88e9SDennis Zhou 451b4c2116cSDennis Zhou (Facebook) *bits += alloc_bits + block->contig_hint_start - 452382b88e9SDennis Zhou start; 453382b88e9SDennis Zhou *bit_off = pcpu_block_off_to_off(i, start); 454b4c2116cSDennis Zhou (Facebook) return; 455b4c2116cSDennis Zhou (Facebook) } 4561fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 4571fa4df3eSDennis Zhou block_off = 0; 458b4c2116cSDennis Zhou (Facebook) 459b4c2116cSDennis Zhou (Facebook) *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 460b4c2116cSDennis Zhou (Facebook) align); 461b4c2116cSDennis Zhou (Facebook) *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; 462b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, *bit_off); 463b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 464b4c2116cSDennis Zhou (Facebook) return; 465b4c2116cSDennis Zhou (Facebook) } 466b4c2116cSDennis Zhou (Facebook) 467b4c2116cSDennis Zhou (Facebook) /* no valid offsets were found - fail condition */ 468b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_chunk_map_bits(chunk); 469b4c2116cSDennis Zhou (Facebook) } 470b4c2116cSDennis Zhou (Facebook) 471525ca84dSDennis Zhou (Facebook) /* 472525ca84dSDennis Zhou (Facebook) * Metadata free area iterators. These perform aggregation of free areas 473525ca84dSDennis Zhou (Facebook) * based on the metadata blocks and return the offset @bit_off and size in 474b4c2116cSDennis Zhou (Facebook) * bits of the free area @bits. pcpu_for_each_fit_region only returns when 475b4c2116cSDennis Zhou (Facebook) * a fit is found for the allocation request. 476525ca84dSDennis Zhou (Facebook) */ 477525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ 478525ca84dSDennis Zhou (Facebook) for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ 479525ca84dSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 480525ca84dSDennis Zhou (Facebook) (bit_off) += (bits) + 1, \ 481525ca84dSDennis Zhou (Facebook) pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) 482525ca84dSDennis Zhou (Facebook) 483b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ 484b4c2116cSDennis Zhou (Facebook) for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 485b4c2116cSDennis Zhou (Facebook) &(bits)); \ 486b4c2116cSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 487b4c2116cSDennis Zhou (Facebook) (bit_off) += (bits), \ 488b4c2116cSDennis Zhou (Facebook) pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 489b4c2116cSDennis Zhou (Facebook) &(bits))) 490b4c2116cSDennis Zhou (Facebook) 491525ca84dSDennis Zhou (Facebook) /** 49290459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 4931880d93bSTejun Heo * @size: bytes to allocate 49447504ee0SDennis Zhou * @gfp: allocation flags 495fbf59bc9STejun Heo * 4961880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 49747504ee0SDennis Zhou * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. 49847504ee0SDennis Zhou * This is to facilitate passing through whitelisted flags. The 49947504ee0SDennis Zhou * returned memory is always zeroed. 500fbf59bc9STejun Heo * 501fbf59bc9STejun Heo * RETURNS: 5021880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 503fbf59bc9STejun Heo */ 50447504ee0SDennis Zhou static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) 505fbf59bc9STejun Heo { 506099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 507099a19d9STejun Heo return NULL; 508099a19d9STejun Heo 509fbf59bc9STejun Heo if (size <= PAGE_SIZE) 510554fef1cSDennis Zhou return kzalloc(size, gfp); 5117af4c093SJesper Juhl else 51288dca4caSChristoph Hellwig return __vmalloc(size, gfp | __GFP_ZERO); 5131880d93bSTejun Heo } 514fbf59bc9STejun Heo 5151880d93bSTejun Heo /** 5161880d93bSTejun Heo * pcpu_mem_free - free memory 5171880d93bSTejun Heo * @ptr: memory to free 5181880d93bSTejun Heo * 51990459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 5201880d93bSTejun Heo */ 5211d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr) 5221880d93bSTejun Heo { 5231d5cfdb0STetsuo Handa kvfree(ptr); 524fbf59bc9STejun Heo } 525fbf59bc9STejun Heo 5268744d859SDennis Zhou static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, 5278744d859SDennis Zhou bool move_front) 5288744d859SDennis Zhou { 5298744d859SDennis Zhou if (chunk != pcpu_reserved_chunk) { 5308744d859SDennis Zhou if (move_front) 531faf65ddeSRoman Gushchin list_move(&chunk->list, &pcpu_chunk_lists[slot]); 5328744d859SDennis Zhou else 533faf65ddeSRoman Gushchin list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]); 5348744d859SDennis Zhou } 5358744d859SDennis Zhou } 5368744d859SDennis Zhou 5378744d859SDennis Zhou static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) 5388744d859SDennis Zhou { 5398744d859SDennis Zhou __pcpu_chunk_move(chunk, slot, true); 5408744d859SDennis Zhou } 5418744d859SDennis Zhou 542fbf59bc9STejun Heo /** 543fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 544fbf59bc9STejun Heo * @chunk: chunk of interest 545fbf59bc9STejun Heo * @oslot: the previous slot it was on 546fbf59bc9STejun Heo * 547fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 548fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 549edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 550edcb4639STejun Heo * chunk slots. 551ccea34b5STejun Heo * 552ccea34b5STejun Heo * CONTEXT: 553ccea34b5STejun Heo * pcpu_lock. 554fbf59bc9STejun Heo */ 555fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 556fbf59bc9STejun Heo { 557fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 558fbf59bc9STejun Heo 559f1833241SRoman Gushchin /* leave isolated chunks in-place */ 560f1833241SRoman Gushchin if (chunk->isolated) 561f1833241SRoman Gushchin return; 562f1833241SRoman Gushchin 5638744d859SDennis Zhou if (oslot != nslot) 5648744d859SDennis Zhou __pcpu_chunk_move(chunk, nslot, oslot < nslot); 56540064aecSDennis Zhou (Facebook) } 56640064aecSDennis Zhou (Facebook) 567f1833241SRoman Gushchin static void pcpu_isolate_chunk(struct pcpu_chunk *chunk) 568f1833241SRoman Gushchin { 569f1833241SRoman Gushchin lockdep_assert_held(&pcpu_lock); 570f1833241SRoman Gushchin 571f1833241SRoman Gushchin if (!chunk->isolated) { 572f1833241SRoman Gushchin chunk->isolated = true; 573faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages; 574f1833241SRoman Gushchin } 575faf65ddeSRoman Gushchin list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]); 576f1833241SRoman Gushchin } 577f1833241SRoman Gushchin 578f1833241SRoman Gushchin static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk) 579f1833241SRoman Gushchin { 580f1833241SRoman Gushchin lockdep_assert_held(&pcpu_lock); 581f1833241SRoman Gushchin 582f1833241SRoman Gushchin if (chunk->isolated) { 583f1833241SRoman Gushchin chunk->isolated = false; 584faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages; 585f1833241SRoman Gushchin pcpu_chunk_relocate(chunk, -1); 586f1833241SRoman Gushchin } 587f1833241SRoman Gushchin } 588f1833241SRoman Gushchin 58940064aecSDennis Zhou (Facebook) /* 590b239f7daSDennis Zhou * pcpu_update_empty_pages - update empty page counters 591b239f7daSDennis Zhou * @chunk: chunk of interest 592b239f7daSDennis Zhou * @nr: nr of empty pages 59340064aecSDennis Zhou (Facebook) * 594b239f7daSDennis Zhou * This is used to keep track of the empty pages now based on the premise 595b239f7daSDennis Zhou * a md_block covers a page. The hint update functions recognize if a block 596b239f7daSDennis Zhou * is made full or broken to calculate deltas for keeping track of free pages. 59740064aecSDennis Zhou (Facebook) */ 598b239f7daSDennis Zhou static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) 599b239f7daSDennis Zhou { 600b239f7daSDennis Zhou chunk->nr_empty_pop_pages += nr; 601f1833241SRoman Gushchin if (chunk != pcpu_reserved_chunk && !chunk->isolated) 602faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages += nr; 60340064aecSDennis Zhou (Facebook) } 60440064aecSDennis Zhou (Facebook) 605d9f3a01eSDennis Zhou /* 606d9f3a01eSDennis Zhou * pcpu_region_overlap - determines if two regions overlap 607d9f3a01eSDennis Zhou * @a: start of first region, inclusive 608d9f3a01eSDennis Zhou * @b: end of first region, exclusive 609d9f3a01eSDennis Zhou * @x: start of second region, inclusive 610d9f3a01eSDennis Zhou * @y: end of second region, exclusive 611d9f3a01eSDennis Zhou * 612d9f3a01eSDennis Zhou * This is used to determine if the hint region [a, b) overlaps with the 613d9f3a01eSDennis Zhou * allocated region [x, y). 614d9f3a01eSDennis Zhou */ 615d9f3a01eSDennis Zhou static inline bool pcpu_region_overlap(int a, int b, int x, int y) 616d9f3a01eSDennis Zhou { 617d9f3a01eSDennis Zhou return (a < y) && (x < b); 61840064aecSDennis Zhou (Facebook) } 61940064aecSDennis Zhou (Facebook) 62040064aecSDennis Zhou (Facebook) /** 621ca460b3cSDennis Zhou (Facebook) * pcpu_block_update - updates a block given a free area 622ca460b3cSDennis Zhou (Facebook) * @block: block of interest 623ca460b3cSDennis Zhou (Facebook) * @start: start offset in block 624ca460b3cSDennis Zhou (Facebook) * @end: end offset in block 625ca460b3cSDennis Zhou (Facebook) * 626ca460b3cSDennis Zhou (Facebook) * Updates a block given a known free area. The region [start, end) is 627268625a6SDennis Zhou (Facebook) * expected to be the entirety of the free area within a block. Chooses 628268625a6SDennis Zhou (Facebook) * the best starting offset if the contig hints are equal. 629ca460b3cSDennis Zhou (Facebook) */ 630ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) 631ca460b3cSDennis Zhou (Facebook) { 632ca460b3cSDennis Zhou (Facebook) int contig = end - start; 633ca460b3cSDennis Zhou (Facebook) 634ca460b3cSDennis Zhou (Facebook) block->first_free = min(block->first_free, start); 635ca460b3cSDennis Zhou (Facebook) if (start == 0) 636ca460b3cSDennis Zhou (Facebook) block->left_free = contig; 637ca460b3cSDennis Zhou (Facebook) 638047924c9SDennis Zhou if (end == block->nr_bits) 639ca460b3cSDennis Zhou (Facebook) block->right_free = contig; 640ca460b3cSDennis Zhou (Facebook) 641ca460b3cSDennis Zhou (Facebook) if (contig > block->contig_hint) { 642382b88e9SDennis Zhou /* promote the old contig_hint to be the new scan_hint */ 643382b88e9SDennis Zhou if (start > block->contig_hint_start) { 644382b88e9SDennis Zhou if (block->contig_hint > block->scan_hint) { 645382b88e9SDennis Zhou block->scan_hint_start = 646382b88e9SDennis Zhou block->contig_hint_start; 647382b88e9SDennis Zhou block->scan_hint = block->contig_hint; 648382b88e9SDennis Zhou } else if (start < block->scan_hint_start) { 649382b88e9SDennis Zhou /* 650382b88e9SDennis Zhou * The old contig_hint == scan_hint. But, the 651382b88e9SDennis Zhou * new contig is larger so hold the invariant 652382b88e9SDennis Zhou * scan_hint_start < contig_hint_start. 653382b88e9SDennis Zhou */ 654382b88e9SDennis Zhou block->scan_hint = 0; 655382b88e9SDennis Zhou } 656382b88e9SDennis Zhou } else { 657382b88e9SDennis Zhou block->scan_hint = 0; 658382b88e9SDennis Zhou } 659ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = start; 660ca460b3cSDennis Zhou (Facebook) block->contig_hint = contig; 661382b88e9SDennis Zhou } else if (contig == block->contig_hint) { 662382b88e9SDennis Zhou if (block->contig_hint_start && 663382b88e9SDennis Zhou (!start || 664382b88e9SDennis Zhou __ffs(start) > __ffs(block->contig_hint_start))) { 665382b88e9SDennis Zhou /* start has a better alignment so use it */ 666268625a6SDennis Zhou (Facebook) block->contig_hint_start = start; 667382b88e9SDennis Zhou if (start < block->scan_hint_start && 668382b88e9SDennis Zhou block->contig_hint > block->scan_hint) 669382b88e9SDennis Zhou block->scan_hint = 0; 670382b88e9SDennis Zhou } else if (start > block->scan_hint_start || 671382b88e9SDennis Zhou block->contig_hint > block->scan_hint) { 672382b88e9SDennis Zhou /* 673382b88e9SDennis Zhou * Knowing contig == contig_hint, update the scan_hint 674382b88e9SDennis Zhou * if it is farther than or larger than the current 675382b88e9SDennis Zhou * scan_hint. 676382b88e9SDennis Zhou */ 677382b88e9SDennis Zhou block->scan_hint_start = start; 678382b88e9SDennis Zhou block->scan_hint = contig; 679382b88e9SDennis Zhou } 680382b88e9SDennis Zhou } else { 681382b88e9SDennis Zhou /* 682382b88e9SDennis Zhou * The region is smaller than the contig_hint. So only update 683382b88e9SDennis Zhou * the scan_hint if it is larger than or equal and farther than 684382b88e9SDennis Zhou * the current scan_hint. 685382b88e9SDennis Zhou */ 686382b88e9SDennis Zhou if ((start < block->contig_hint_start && 687382b88e9SDennis Zhou (contig > block->scan_hint || 688382b88e9SDennis Zhou (contig == block->scan_hint && 689382b88e9SDennis Zhou start > block->scan_hint_start)))) { 690382b88e9SDennis Zhou block->scan_hint_start = start; 691382b88e9SDennis Zhou block->scan_hint = contig; 692382b88e9SDennis Zhou } 693ca460b3cSDennis Zhou (Facebook) } 694ca460b3cSDennis Zhou (Facebook) } 695ca460b3cSDennis Zhou (Facebook) 696b89462a9SDennis Zhou /* 697b89462a9SDennis Zhou * pcpu_block_update_scan - update a block given a free area from a scan 698b89462a9SDennis Zhou * @chunk: chunk of interest 699b89462a9SDennis Zhou * @bit_off: chunk offset 700b89462a9SDennis Zhou * @bits: size of free area 701b89462a9SDennis Zhou * 702b89462a9SDennis Zhou * Finding the final allocation spot first goes through pcpu_find_block_fit() 703b89462a9SDennis Zhou * to find a block that can hold the allocation and then pcpu_alloc_area() 704b89462a9SDennis Zhou * where a scan is used. When allocations require specific alignments, 705b89462a9SDennis Zhou * we can inadvertently create holes which will not be seen in the alloc 706b89462a9SDennis Zhou * or free paths. 707b89462a9SDennis Zhou * 708b89462a9SDennis Zhou * This takes a given free area hole and updates a block as it may change the 709b89462a9SDennis Zhou * scan_hint. We need to scan backwards to ensure we don't miss free bits 710b89462a9SDennis Zhou * from alignment. 711b89462a9SDennis Zhou */ 712b89462a9SDennis Zhou static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, 713b89462a9SDennis Zhou int bits) 714b89462a9SDennis Zhou { 715b89462a9SDennis Zhou int s_off = pcpu_off_to_block_off(bit_off); 716b89462a9SDennis Zhou int e_off = s_off + bits; 717b89462a9SDennis Zhou int s_index, l_bit; 718b89462a9SDennis Zhou struct pcpu_block_md *block; 719b89462a9SDennis Zhou 720b89462a9SDennis Zhou if (e_off > PCPU_BITMAP_BLOCK_BITS) 721b89462a9SDennis Zhou return; 722b89462a9SDennis Zhou 723b89462a9SDennis Zhou s_index = pcpu_off_to_block_index(bit_off); 724b89462a9SDennis Zhou block = chunk->md_blocks + s_index; 725b89462a9SDennis Zhou 726b89462a9SDennis Zhou /* scan backwards in case of alignment skipping free bits */ 727b89462a9SDennis Zhou l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); 728b89462a9SDennis Zhou s_off = (s_off == l_bit) ? 0 : l_bit + 1; 729b89462a9SDennis Zhou 730b89462a9SDennis Zhou pcpu_block_update(block, s_off, e_off); 731b89462a9SDennis Zhou } 732b89462a9SDennis Zhou 733ca460b3cSDennis Zhou (Facebook) /** 73492c14cabSDennis Zhou * pcpu_chunk_refresh_hint - updates metadata about a chunk 73592c14cabSDennis Zhou * @chunk: chunk of interest 736d33d9f3dSDennis Zhou * @full_scan: if we should scan from the beginning 73792c14cabSDennis Zhou * 73892c14cabSDennis Zhou * Iterates over the metadata blocks to find the largest contig area. 739d33d9f3dSDennis Zhou * A full scan can be avoided on the allocation path as this is triggered 740d33d9f3dSDennis Zhou * if we broke the contig_hint. In doing so, the scan_hint will be before 741d33d9f3dSDennis Zhou * the contig_hint or after if the scan_hint == contig_hint. This cannot 742d33d9f3dSDennis Zhou * be prevented on freeing as we want to find the largest area possibly 743d33d9f3dSDennis Zhou * spanning blocks. 74492c14cabSDennis Zhou */ 745d33d9f3dSDennis Zhou static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) 74692c14cabSDennis Zhou { 74792c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 74892c14cabSDennis Zhou int bit_off, bits; 74992c14cabSDennis Zhou 750d33d9f3dSDennis Zhou /* promote scan_hint to contig_hint */ 751d33d9f3dSDennis Zhou if (!full_scan && chunk_md->scan_hint) { 752d33d9f3dSDennis Zhou bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; 753d33d9f3dSDennis Zhou chunk_md->contig_hint_start = chunk_md->scan_hint_start; 754d33d9f3dSDennis Zhou chunk_md->contig_hint = chunk_md->scan_hint; 755d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 756d33d9f3dSDennis Zhou } else { 75792c14cabSDennis Zhou bit_off = chunk_md->first_free; 758d33d9f3dSDennis Zhou chunk_md->contig_hint = 0; 759d33d9f3dSDennis Zhou } 760d33d9f3dSDennis Zhou 76192c14cabSDennis Zhou bits = 0; 762e837dfdeSDennis Zhou pcpu_for_each_md_free_region(chunk, bit_off, bits) 76392c14cabSDennis Zhou pcpu_block_update(chunk_md, bit_off, bit_off + bits); 764ca460b3cSDennis Zhou (Facebook) } 765ca460b3cSDennis Zhou (Facebook) 766ca460b3cSDennis Zhou (Facebook) /** 767ca460b3cSDennis Zhou (Facebook) * pcpu_block_refresh_hint 768ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 769ca460b3cSDennis Zhou (Facebook) * @index: index of the metadata block 770ca460b3cSDennis Zhou (Facebook) * 771ca460b3cSDennis Zhou (Facebook) * Scans over the block beginning at first_free and updates the block 772ca460b3cSDennis Zhou (Facebook) * metadata accordingly. 773ca460b3cSDennis Zhou (Facebook) */ 774ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) 775ca460b3cSDennis Zhou (Facebook) { 776ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *block = chunk->md_blocks + index; 777ca460b3cSDennis Zhou (Facebook) unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 778ec288a2cSYury Norov unsigned int start, end; /* region start, region end */ 779ca460b3cSDennis Zhou (Facebook) 780da3afdd5SDennis Zhou /* promote scan_hint to contig_hint */ 781da3afdd5SDennis Zhou if (block->scan_hint) { 782da3afdd5SDennis Zhou start = block->scan_hint_start + block->scan_hint; 783da3afdd5SDennis Zhou block->contig_hint_start = block->scan_hint_start; 784da3afdd5SDennis Zhou block->contig_hint = block->scan_hint; 785da3afdd5SDennis Zhou block->scan_hint = 0; 786da3afdd5SDennis Zhou } else { 787da3afdd5SDennis Zhou start = block->first_free; 788ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 789da3afdd5SDennis Zhou } 790da3afdd5SDennis Zhou 791da3afdd5SDennis Zhou block->right_free = 0; 792ca460b3cSDennis Zhou (Facebook) 793ca460b3cSDennis Zhou (Facebook) /* iterate over free areas and update the contig hints */ 794ec288a2cSYury Norov for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS) 795ec288a2cSYury Norov pcpu_block_update(block, start, end); 796ca460b3cSDennis Zhou (Facebook) } 797ca460b3cSDennis Zhou (Facebook) 798ca460b3cSDennis Zhou (Facebook) /** 799ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_alloc - update hint on allocation path 800ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 801ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 802ca460b3cSDennis Zhou (Facebook) * @bits: size of request 803fc304334SDennis Zhou (Facebook) * 804fc304334SDennis Zhou (Facebook) * Updates metadata for the allocation path. The metadata only has to be 805fc304334SDennis Zhou (Facebook) * refreshed by a full scan iff the chunk's contig hint is broken. Block level 806fc304334SDennis Zhou (Facebook) * scans are required if the block's contig hint is broken. 807ca460b3cSDennis Zhou (Facebook) */ 808ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, 809ca460b3cSDennis Zhou (Facebook) int bits) 810ca460b3cSDennis Zhou (Facebook) { 81192c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 812b239f7daSDennis Zhou int nr_empty_pages = 0; 813ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 814ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 815ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 816ca460b3cSDennis Zhou (Facebook) 817ca460b3cSDennis Zhou (Facebook) /* 818ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 819ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 820ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 821ca460b3cSDennis Zhou (Facebook) * range. 822ca460b3cSDennis Zhou (Facebook) */ 823ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 824ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 825ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 826ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 827ca460b3cSDennis Zhou (Facebook) 828ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 829ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 830ca460b3cSDennis Zhou (Facebook) 831ca460b3cSDennis Zhou (Facebook) /* 832ca460b3cSDennis Zhou (Facebook) * Update s_block. 833ca460b3cSDennis Zhou (Facebook) */ 834b239f7daSDennis Zhou if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 835b239f7daSDennis Zhou nr_empty_pages++; 836b239f7daSDennis Zhou 83773046f8dSBaoquan He /* 83873046f8dSBaoquan He * block->first_free must be updated if the allocation takes its place. 83973046f8dSBaoquan He * If the allocation breaks the contig_hint, a scan is required to 84073046f8dSBaoquan He * restore this hint. 84173046f8dSBaoquan He */ 842fc304334SDennis Zhou (Facebook) if (s_off == s_block->first_free) 843fc304334SDennis Zhou (Facebook) s_block->first_free = find_next_zero_bit( 844fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, s_index), 845fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, 846fc304334SDennis Zhou (Facebook) s_off + bits); 847fc304334SDennis Zhou (Facebook) 848382b88e9SDennis Zhou if (pcpu_region_overlap(s_block->scan_hint_start, 849382b88e9SDennis Zhou s_block->scan_hint_start + s_block->scan_hint, 850382b88e9SDennis Zhou s_off, 851382b88e9SDennis Zhou s_off + bits)) 852382b88e9SDennis Zhou s_block->scan_hint = 0; 853382b88e9SDennis Zhou 854d9f3a01eSDennis Zhou if (pcpu_region_overlap(s_block->contig_hint_start, 855d9f3a01eSDennis Zhou s_block->contig_hint_start + 856d9f3a01eSDennis Zhou s_block->contig_hint, 857d9f3a01eSDennis Zhou s_off, 858d9f3a01eSDennis Zhou s_off + bits)) { 859fc304334SDennis Zhou (Facebook) /* block contig hint is broken - scan to fix it */ 860da3afdd5SDennis Zhou if (!s_off) 861da3afdd5SDennis Zhou s_block->left_free = 0; 862ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, s_index); 863fc304334SDennis Zhou (Facebook) } else { 864fc304334SDennis Zhou (Facebook) /* update left and right contig manually */ 865fc304334SDennis Zhou (Facebook) s_block->left_free = min(s_block->left_free, s_off); 866fc304334SDennis Zhou (Facebook) if (s_index == e_index) 867fc304334SDennis Zhou (Facebook) s_block->right_free = min_t(int, s_block->right_free, 868fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 869fc304334SDennis Zhou (Facebook) else 870fc304334SDennis Zhou (Facebook) s_block->right_free = 0; 871fc304334SDennis Zhou (Facebook) } 872ca460b3cSDennis Zhou (Facebook) 873ca460b3cSDennis Zhou (Facebook) /* 874ca460b3cSDennis Zhou (Facebook) * Update e_block. 875ca460b3cSDennis Zhou (Facebook) */ 876ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 877b239f7daSDennis Zhou if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 878b239f7daSDennis Zhou nr_empty_pages++; 879b239f7daSDennis Zhou 880fc304334SDennis Zhou (Facebook) /* 881fc304334SDennis Zhou (Facebook) * When the allocation is across blocks, the end is along 882fc304334SDennis Zhou (Facebook) * the left part of the e_block. 883fc304334SDennis Zhou (Facebook) */ 884fc304334SDennis Zhou (Facebook) e_block->first_free = find_next_zero_bit( 885fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, e_index), 886fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, e_off); 887fc304334SDennis Zhou (Facebook) 888fc304334SDennis Zhou (Facebook) if (e_off == PCPU_BITMAP_BLOCK_BITS) { 889fc304334SDennis Zhou (Facebook) /* reset the block */ 890fc304334SDennis Zhou (Facebook) e_block++; 891fc304334SDennis Zhou (Facebook) } else { 892382b88e9SDennis Zhou if (e_off > e_block->scan_hint_start) 893382b88e9SDennis Zhou e_block->scan_hint = 0; 894382b88e9SDennis Zhou 895da3afdd5SDennis Zhou e_block->left_free = 0; 896fc304334SDennis Zhou (Facebook) if (e_off > e_block->contig_hint_start) { 897fc304334SDennis Zhou (Facebook) /* contig hint is broken - scan to fix it */ 898ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, e_index); 899fc304334SDennis Zhou (Facebook) } else { 900fc304334SDennis Zhou (Facebook) e_block->right_free = 901fc304334SDennis Zhou (Facebook) min_t(int, e_block->right_free, 902fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 903fc304334SDennis Zhou (Facebook) } 904fc304334SDennis Zhou (Facebook) } 905ca460b3cSDennis Zhou (Facebook) 906ca460b3cSDennis Zhou (Facebook) /* update in-between md_blocks */ 907b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 908ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 909382b88e9SDennis Zhou block->scan_hint = 0; 910ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 911ca460b3cSDennis Zhou (Facebook) block->left_free = 0; 912ca460b3cSDennis Zhou (Facebook) block->right_free = 0; 913ca460b3cSDennis Zhou (Facebook) } 914ca460b3cSDennis Zhou (Facebook) } 915ca460b3cSDennis Zhou (Facebook) 91673046f8dSBaoquan He /* 91773046f8dSBaoquan He * If the allocation is not atomic, some blocks may not be 91873046f8dSBaoquan He * populated with pages, while we account it here. The number 91973046f8dSBaoquan He * of pages will be added back with pcpu_chunk_populated() 92073046f8dSBaoquan He * when populating pages. 92173046f8dSBaoquan He */ 922b239f7daSDennis Zhou if (nr_empty_pages) 923b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr_empty_pages); 924b239f7daSDennis Zhou 925d33d9f3dSDennis Zhou if (pcpu_region_overlap(chunk_md->scan_hint_start, 926d33d9f3dSDennis Zhou chunk_md->scan_hint_start + 927d33d9f3dSDennis Zhou chunk_md->scan_hint, 928d33d9f3dSDennis Zhou bit_off, 929d33d9f3dSDennis Zhou bit_off + bits)) 930d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 931d33d9f3dSDennis Zhou 932fc304334SDennis Zhou (Facebook) /* 933fc304334SDennis Zhou (Facebook) * The only time a full chunk scan is required is if the chunk 934fc304334SDennis Zhou (Facebook) * contig hint is broken. Otherwise, it means a smaller space 935fc304334SDennis Zhou (Facebook) * was used and therefore the chunk contig hint is still correct. 936fc304334SDennis Zhou (Facebook) */ 93792c14cabSDennis Zhou if (pcpu_region_overlap(chunk_md->contig_hint_start, 93892c14cabSDennis Zhou chunk_md->contig_hint_start + 93992c14cabSDennis Zhou chunk_md->contig_hint, 940d9f3a01eSDennis Zhou bit_off, 941d9f3a01eSDennis Zhou bit_off + bits)) 942d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, false); 943ca460b3cSDennis Zhou (Facebook) } 944ca460b3cSDennis Zhou (Facebook) 945ca460b3cSDennis Zhou (Facebook) /** 946ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_free - updates the block hints on the free path 947ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 948ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 949ca460b3cSDennis Zhou (Facebook) * @bits: size of request 950b185cd0dSDennis Zhou (Facebook) * 951b185cd0dSDennis Zhou (Facebook) * Updates metadata for the allocation path. This avoids a blind block 952b185cd0dSDennis Zhou (Facebook) * refresh by making use of the block contig hints. If this fails, it scans 953b185cd0dSDennis Zhou (Facebook) * forward and backward to determine the extent of the free area. This is 954b185cd0dSDennis Zhou (Facebook) * capped at the boundary of blocks. 955b185cd0dSDennis Zhou (Facebook) * 956b185cd0dSDennis Zhou (Facebook) * A chunk update is triggered if a page becomes free, a block becomes free, 957b185cd0dSDennis Zhou (Facebook) * or the free spans across blocks. This tradeoff is to minimize iterating 95892c14cabSDennis Zhou * over the block metadata to update chunk_md->contig_hint. 95992c14cabSDennis Zhou * chunk_md->contig_hint may be off by up to a page, but it will never be more 96092c14cabSDennis Zhou * than the available space. If the contig hint is contained in one block, it 96192c14cabSDennis Zhou * will be accurate. 962ca460b3cSDennis Zhou (Facebook) */ 963ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, 964ca460b3cSDennis Zhou (Facebook) int bits) 965ca460b3cSDennis Zhou (Facebook) { 966b239f7daSDennis Zhou int nr_empty_pages = 0; 967ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 968ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 969ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 970b185cd0dSDennis Zhou (Facebook) int start, end; /* start and end of the whole free area */ 971ca460b3cSDennis Zhou (Facebook) 972ca460b3cSDennis Zhou (Facebook) /* 973ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 974ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 975ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 976ca460b3cSDennis Zhou (Facebook) * range. 977ca460b3cSDennis Zhou (Facebook) */ 978ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 979ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 980ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 981ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 982ca460b3cSDennis Zhou (Facebook) 983ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 984ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 985ca460b3cSDennis Zhou (Facebook) 986b185cd0dSDennis Zhou (Facebook) /* 987b185cd0dSDennis Zhou (Facebook) * Check if the freed area aligns with the block->contig_hint. 988b185cd0dSDennis Zhou (Facebook) * If it does, then the scan to find the beginning/end of the 989b185cd0dSDennis Zhou (Facebook) * larger free area can be avoided. 990b185cd0dSDennis Zhou (Facebook) * 991b185cd0dSDennis Zhou (Facebook) * start and end refer to beginning and end of the free area 992b185cd0dSDennis Zhou (Facebook) * within each their respective blocks. This is not necessarily 993b185cd0dSDennis Zhou (Facebook) * the entire free area as it may span blocks past the beginning 994b185cd0dSDennis Zhou (Facebook) * or end of the block. 995b185cd0dSDennis Zhou (Facebook) */ 996b185cd0dSDennis Zhou (Facebook) start = s_off; 997b185cd0dSDennis Zhou (Facebook) if (s_off == s_block->contig_hint + s_block->contig_hint_start) { 998b185cd0dSDennis Zhou (Facebook) start = s_block->contig_hint_start; 999b185cd0dSDennis Zhou (Facebook) } else { 1000b185cd0dSDennis Zhou (Facebook) /* 1001b185cd0dSDennis Zhou (Facebook) * Scan backwards to find the extent of the free area. 1002b185cd0dSDennis Zhou (Facebook) * find_last_bit returns the starting bit, so if the start bit 1003b185cd0dSDennis Zhou (Facebook) * is returned, that means there was no last bit and the 1004b185cd0dSDennis Zhou (Facebook) * remainder of the chunk is free. 1005b185cd0dSDennis Zhou (Facebook) */ 1006b185cd0dSDennis Zhou (Facebook) int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), 1007b185cd0dSDennis Zhou (Facebook) start); 1008b185cd0dSDennis Zhou (Facebook) start = (start == l_bit) ? 0 : l_bit + 1; 1009b185cd0dSDennis Zhou (Facebook) } 1010b185cd0dSDennis Zhou (Facebook) 1011b185cd0dSDennis Zhou (Facebook) end = e_off; 1012b185cd0dSDennis Zhou (Facebook) if (e_off == e_block->contig_hint_start) 1013b185cd0dSDennis Zhou (Facebook) end = e_block->contig_hint_start + e_block->contig_hint; 1014b185cd0dSDennis Zhou (Facebook) else 1015b185cd0dSDennis Zhou (Facebook) end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), 1016b185cd0dSDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, end); 1017b185cd0dSDennis Zhou (Facebook) 1018ca460b3cSDennis Zhou (Facebook) /* update s_block */ 1019b185cd0dSDennis Zhou (Facebook) e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; 1020b239f7daSDennis Zhou if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) 1021b239f7daSDennis Zhou nr_empty_pages++; 1022b185cd0dSDennis Zhou (Facebook) pcpu_block_update(s_block, start, e_off); 1023ca460b3cSDennis Zhou (Facebook) 1024ca460b3cSDennis Zhou (Facebook) /* freeing in the same block */ 1025ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 1026ca460b3cSDennis Zhou (Facebook) /* update e_block */ 1027b239f7daSDennis Zhou if (end == PCPU_BITMAP_BLOCK_BITS) 1028b239f7daSDennis Zhou nr_empty_pages++; 1029b185cd0dSDennis Zhou (Facebook) pcpu_block_update(e_block, 0, end); 1030ca460b3cSDennis Zhou (Facebook) 1031ca460b3cSDennis Zhou (Facebook) /* reset md_blocks in the middle */ 1032b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 1033ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 1034ca460b3cSDennis Zhou (Facebook) block->first_free = 0; 1035382b88e9SDennis Zhou block->scan_hint = 0; 1036ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = 0; 1037ca460b3cSDennis Zhou (Facebook) block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 1038ca460b3cSDennis Zhou (Facebook) block->left_free = PCPU_BITMAP_BLOCK_BITS; 1039ca460b3cSDennis Zhou (Facebook) block->right_free = PCPU_BITMAP_BLOCK_BITS; 1040ca460b3cSDennis Zhou (Facebook) } 1041ca460b3cSDennis Zhou (Facebook) } 1042ca460b3cSDennis Zhou (Facebook) 1043b239f7daSDennis Zhou if (nr_empty_pages) 1044b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr_empty_pages); 1045b239f7daSDennis Zhou 1046b185cd0dSDennis Zhou (Facebook) /* 1047b239f7daSDennis Zhou * Refresh chunk metadata when the free makes a block free or spans 1048b239f7daSDennis Zhou * across blocks. The contig_hint may be off by up to a page, but if 1049b239f7daSDennis Zhou * the contig_hint is contained in a block, it will be accurate with 1050b239f7daSDennis Zhou * the else condition below. 1051b185cd0dSDennis Zhou (Facebook) */ 1052b239f7daSDennis Zhou if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) 1053d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, true); 1054b185cd0dSDennis Zhou (Facebook) else 105592c14cabSDennis Zhou pcpu_block_update(&chunk->chunk_md, 105692c14cabSDennis Zhou pcpu_block_off_to_off(s_index, start), 105792c14cabSDennis Zhou end); 1058ca460b3cSDennis Zhou (Facebook) } 1059ca460b3cSDennis Zhou (Facebook) 1060ca460b3cSDennis Zhou (Facebook) /** 106140064aecSDennis Zhou (Facebook) * pcpu_is_populated - determines if the region is populated 106240064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 106340064aecSDennis Zhou (Facebook) * @bit_off: chunk offset 106440064aecSDennis Zhou (Facebook) * @bits: size of area 106540064aecSDennis Zhou (Facebook) * @next_off: return value for the next offset to start searching 106640064aecSDennis Zhou (Facebook) * 106740064aecSDennis Zhou (Facebook) * For atomic allocations, check if the backing pages are populated. 106840064aecSDennis Zhou (Facebook) * 106940064aecSDennis Zhou (Facebook) * RETURNS: 107040064aecSDennis Zhou (Facebook) * Bool if the backing pages are populated. 107140064aecSDennis Zhou (Facebook) * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. 107240064aecSDennis Zhou (Facebook) */ 107340064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 107440064aecSDennis Zhou (Facebook) int *next_off) 107540064aecSDennis Zhou (Facebook) { 1076801a5736SYury Norov unsigned int start, end; 107740064aecSDennis Zhou (Facebook) 1078801a5736SYury Norov start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 1079801a5736SYury Norov end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 108040064aecSDennis Zhou (Facebook) 1081801a5736SYury Norov start = find_next_zero_bit(chunk->populated, end, start); 1082801a5736SYury Norov if (start >= end) 108340064aecSDennis Zhou (Facebook) return true; 108440064aecSDennis Zhou (Facebook) 1085801a5736SYury Norov end = find_next_bit(chunk->populated, end, start + 1); 1086801a5736SYury Norov 1087801a5736SYury Norov *next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 108840064aecSDennis Zhou (Facebook) return false; 108940064aecSDennis Zhou (Facebook) } 109040064aecSDennis Zhou (Facebook) 109140064aecSDennis Zhou (Facebook) /** 109240064aecSDennis Zhou (Facebook) * pcpu_find_block_fit - finds the block index to start searching 109340064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 109440064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 109540064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE bytes) 109640064aecSDennis Zhou (Facebook) * @pop_only: use populated regions only 109740064aecSDennis Zhou (Facebook) * 1098b4c2116cSDennis Zhou (Facebook) * Given a chunk and an allocation spec, find the offset to begin searching 1099b4c2116cSDennis Zhou (Facebook) * for a free region. This iterates over the bitmap metadata blocks to 1100b4c2116cSDennis Zhou (Facebook) * find an offset that will be guaranteed to fit the requirements. It is 1101b4c2116cSDennis Zhou (Facebook) * not quite first fit as if the allocation does not fit in the contig hint 1102b4c2116cSDennis Zhou (Facebook) * of a block or chunk, it is skipped. This errs on the side of caution 1103b4c2116cSDennis Zhou (Facebook) * to prevent excess iteration. Poor alignment can cause the allocator to 1104b4c2116cSDennis Zhou (Facebook) * skip over blocks and chunks that have valid free areas. 1105b4c2116cSDennis Zhou (Facebook) * 110640064aecSDennis Zhou (Facebook) * RETURNS: 110740064aecSDennis Zhou (Facebook) * The offset in the bitmap to begin searching. 110840064aecSDennis Zhou (Facebook) * -1 if no offset is found. 110940064aecSDennis Zhou (Facebook) */ 111040064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, 111140064aecSDennis Zhou (Facebook) size_t align, bool pop_only) 111240064aecSDennis Zhou (Facebook) { 111392c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1114b4c2116cSDennis Zhou (Facebook) int bit_off, bits, next_off; 111540064aecSDennis Zhou (Facebook) 111613f96637SDennis Zhou (Facebook) /* 11178ea2e1e3SRoman Gushchin * This is an optimization to prevent scanning by assuming if the 11188ea2e1e3SRoman Gushchin * allocation cannot fit in the global hint, there is memory pressure 11198ea2e1e3SRoman Gushchin * and creating a new chunk would happen soon. 112013f96637SDennis Zhou (Facebook) */ 11218ea2e1e3SRoman Gushchin if (!pcpu_check_block_hint(chunk_md, alloc_bits, align)) 112213f96637SDennis Zhou (Facebook) return -1; 112313f96637SDennis Zhou (Facebook) 1124d33d9f3dSDennis Zhou bit_off = pcpu_next_hint(chunk_md, alloc_bits); 1125b4c2116cSDennis Zhou (Facebook) bits = 0; 1126b4c2116cSDennis Zhou (Facebook) pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { 112740064aecSDennis Zhou (Facebook) if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, 1128b4c2116cSDennis Zhou (Facebook) &next_off)) 112940064aecSDennis Zhou (Facebook) break; 113040064aecSDennis Zhou (Facebook) 1131b4c2116cSDennis Zhou (Facebook) bit_off = next_off; 113240064aecSDennis Zhou (Facebook) bits = 0; 113340064aecSDennis Zhou (Facebook) } 113440064aecSDennis Zhou (Facebook) 113540064aecSDennis Zhou (Facebook) if (bit_off == pcpu_chunk_map_bits(chunk)) 113640064aecSDennis Zhou (Facebook) return -1; 113740064aecSDennis Zhou (Facebook) 113840064aecSDennis Zhou (Facebook) return bit_off; 113940064aecSDennis Zhou (Facebook) } 114040064aecSDennis Zhou (Facebook) 1141b89462a9SDennis Zhou /* 1142b89462a9SDennis Zhou * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() 1143b89462a9SDennis Zhou * @map: the address to base the search on 1144b89462a9SDennis Zhou * @size: the bitmap size in bits 1145b89462a9SDennis Zhou * @start: the bitnumber to start searching at 1146b89462a9SDennis Zhou * @nr: the number of zeroed bits we're looking for 1147b89462a9SDennis Zhou * @align_mask: alignment mask for zero area 1148b89462a9SDennis Zhou * @largest_off: offset of the largest area skipped 1149b89462a9SDennis Zhou * @largest_bits: size of the largest area skipped 1150b89462a9SDennis Zhou * 1151b89462a9SDennis Zhou * The @align_mask should be one less than a power of 2. 1152b89462a9SDennis Zhou * 1153b89462a9SDennis Zhou * This is a modified version of bitmap_find_next_zero_area_off() to remember 1154b89462a9SDennis Zhou * the largest area that was skipped. This is imperfect, but in general is 1155b89462a9SDennis Zhou * good enough. The largest remembered region is the largest failed region 1156b89462a9SDennis Zhou * seen. This does not include anything we possibly skipped due to alignment. 1157b89462a9SDennis Zhou * pcpu_block_update_scan() does scan backwards to try and recover what was 1158b89462a9SDennis Zhou * lost to alignment. While this can cause scanning to miss earlier possible 1159b89462a9SDennis Zhou * free areas, smaller allocations will eventually fill those holes. 1160b89462a9SDennis Zhou */ 1161b89462a9SDennis Zhou static unsigned long pcpu_find_zero_area(unsigned long *map, 1162b89462a9SDennis Zhou unsigned long size, 1163b89462a9SDennis Zhou unsigned long start, 1164b89462a9SDennis Zhou unsigned long nr, 1165b89462a9SDennis Zhou unsigned long align_mask, 1166b89462a9SDennis Zhou unsigned long *largest_off, 1167b89462a9SDennis Zhou unsigned long *largest_bits) 1168b89462a9SDennis Zhou { 1169b89462a9SDennis Zhou unsigned long index, end, i, area_off, area_bits; 1170b89462a9SDennis Zhou again: 1171b89462a9SDennis Zhou index = find_next_zero_bit(map, size, start); 1172b89462a9SDennis Zhou 1173b89462a9SDennis Zhou /* Align allocation */ 1174b89462a9SDennis Zhou index = __ALIGN_MASK(index, align_mask); 1175b89462a9SDennis Zhou area_off = index; 1176b89462a9SDennis Zhou 1177b89462a9SDennis Zhou end = index + nr; 1178b89462a9SDennis Zhou if (end > size) 1179b89462a9SDennis Zhou return end; 1180b89462a9SDennis Zhou i = find_next_bit(map, end, index); 1181b89462a9SDennis Zhou if (i < end) { 1182b89462a9SDennis Zhou area_bits = i - area_off; 1183b89462a9SDennis Zhou /* remember largest unused area with best alignment */ 1184b89462a9SDennis Zhou if (area_bits > *largest_bits || 1185b89462a9SDennis Zhou (area_bits == *largest_bits && *largest_off && 1186b89462a9SDennis Zhou (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { 1187b89462a9SDennis Zhou *largest_off = area_off; 1188b89462a9SDennis Zhou *largest_bits = area_bits; 1189b89462a9SDennis Zhou } 1190b89462a9SDennis Zhou 1191b89462a9SDennis Zhou start = i + 1; 1192b89462a9SDennis Zhou goto again; 1193b89462a9SDennis Zhou } 1194b89462a9SDennis Zhou return index; 1195b89462a9SDennis Zhou } 1196b89462a9SDennis Zhou 119740064aecSDennis Zhou (Facebook) /** 119840064aecSDennis Zhou (Facebook) * pcpu_alloc_area - allocates an area from a pcpu_chunk 119940064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 120040064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 120140064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 120240064aecSDennis Zhou (Facebook) * @start: bit_off to start searching 120340064aecSDennis Zhou (Facebook) * 120440064aecSDennis Zhou (Facebook) * This function takes in a @start offset to begin searching to fit an 1205b4c2116cSDennis Zhou (Facebook) * allocation of @alloc_bits with alignment @align. It needs to scan 1206b4c2116cSDennis Zhou (Facebook) * the allocation map because if it fits within the block's contig hint, 1207b4c2116cSDennis Zhou (Facebook) * @start will be block->first_free. This is an attempt to fill the 1208b4c2116cSDennis Zhou (Facebook) * allocation prior to breaking the contig hint. The allocation and 1209b4c2116cSDennis Zhou (Facebook) * boundary maps are updated accordingly if it confirms a valid 1210b4c2116cSDennis Zhou (Facebook) * free area. 121140064aecSDennis Zhou (Facebook) * 121240064aecSDennis Zhou (Facebook) * RETURNS: 121340064aecSDennis Zhou (Facebook) * Allocated addr offset in @chunk on success. 121440064aecSDennis Zhou (Facebook) * -1 if no matching area is found. 121540064aecSDennis Zhou (Facebook) */ 121640064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, 121740064aecSDennis Zhou (Facebook) size_t align, int start) 121840064aecSDennis Zhou (Facebook) { 121992c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 122040064aecSDennis Zhou (Facebook) size_t align_mask = (align) ? (align - 1) : 0; 1221b89462a9SDennis Zhou unsigned long area_off = 0, area_bits = 0; 122240064aecSDennis Zhou (Facebook) int bit_off, end, oslot; 12239f7dcf22STejun Heo 12244f996e23STejun Heo lockdep_assert_held(&pcpu_lock); 12254f996e23STejun Heo 122640064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1227833af842STejun Heo 1228833af842STejun Heo /* 122940064aecSDennis Zhou (Facebook) * Search to find a fit. 1230833af842STejun Heo */ 12318c43004aSDennis Zhou end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, 12328c43004aSDennis Zhou pcpu_chunk_map_bits(chunk)); 1233b89462a9SDennis Zhou bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, 1234b89462a9SDennis Zhou align_mask, &area_off, &area_bits); 123540064aecSDennis Zhou (Facebook) if (bit_off >= end) 1236a16037c8STejun Heo return -1; 1237a16037c8STejun Heo 1238b89462a9SDennis Zhou if (area_bits) 1239b89462a9SDennis Zhou pcpu_block_update_scan(chunk, area_off, area_bits); 1240b89462a9SDennis Zhou 124140064aecSDennis Zhou (Facebook) /* update alloc map */ 124240064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, bit_off, alloc_bits); 1243a16037c8STejun Heo 124440064aecSDennis Zhou (Facebook) /* update boundary map */ 124540064aecSDennis Zhou (Facebook) set_bit(bit_off, chunk->bound_map); 124640064aecSDennis Zhou (Facebook) bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); 124740064aecSDennis Zhou (Facebook) set_bit(bit_off + alloc_bits, chunk->bound_map); 1248a16037c8STejun Heo 124940064aecSDennis Zhou (Facebook) chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; 125040064aecSDennis Zhou (Facebook) 125186b442fbSDennis Zhou (Facebook) /* update first free bit */ 125292c14cabSDennis Zhou if (bit_off == chunk_md->first_free) 125392c14cabSDennis Zhou chunk_md->first_free = find_next_zero_bit( 125486b442fbSDennis Zhou (Facebook) chunk->alloc_map, 125586b442fbSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk), 125686b442fbSDennis Zhou (Facebook) bit_off + alloc_bits); 125786b442fbSDennis Zhou (Facebook) 1258ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); 125940064aecSDennis Zhou (Facebook) 126040064aecSDennis Zhou (Facebook) pcpu_chunk_relocate(chunk, oslot); 126140064aecSDennis Zhou (Facebook) 126240064aecSDennis Zhou (Facebook) return bit_off * PCPU_MIN_ALLOC_SIZE; 1263a16037c8STejun Heo } 1264a16037c8STejun Heo 1265a16037c8STejun Heo /** 126640064aecSDennis Zhou (Facebook) * pcpu_free_area - frees the corresponding offset 1267fbf59bc9STejun Heo * @chunk: chunk of interest 126840064aecSDennis Zhou (Facebook) * @off: addr offset into chunk 1269fbf59bc9STejun Heo * 127040064aecSDennis Zhou (Facebook) * This function determines the size of an allocation to free using 127140064aecSDennis Zhou (Facebook) * the boundary bitmap and clears the allocation map. 12725b32af91SRoman Gushchin * 12735b32af91SRoman Gushchin * RETURNS: 12745b32af91SRoman Gushchin * Number of freed bytes. 1275fbf59bc9STejun Heo */ 12765b32af91SRoman Gushchin static int pcpu_free_area(struct pcpu_chunk *chunk, int off) 1277fbf59bc9STejun Heo { 127892c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 12795b32af91SRoman Gushchin int bit_off, bits, end, oslot, freed; 1280fbf59bc9STejun Heo 12815ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock); 128230a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk); 12835ccd30e4SDennis Zhou 128440064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1285723ad1d9SAl Viro 128640064aecSDennis Zhou (Facebook) bit_off = off / PCPU_MIN_ALLOC_SIZE; 1287fbf59bc9STejun Heo 128840064aecSDennis Zhou (Facebook) /* find end index */ 128940064aecSDennis Zhou (Facebook) end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), 129040064aecSDennis Zhou (Facebook) bit_off + 1); 129140064aecSDennis Zhou (Facebook) bits = end - bit_off; 129240064aecSDennis Zhou (Facebook) bitmap_clear(chunk->alloc_map, bit_off, bits); 12933d331ad7SAl Viro 12945b32af91SRoman Gushchin freed = bits * PCPU_MIN_ALLOC_SIZE; 12955b32af91SRoman Gushchin 129640064aecSDennis Zhou (Facebook) /* update metadata */ 12975b32af91SRoman Gushchin chunk->free_bytes += freed; 1298fbf59bc9STejun Heo 129986b442fbSDennis Zhou (Facebook) /* update first free bit */ 130092c14cabSDennis Zhou chunk_md->first_free = min(chunk_md->first_free, bit_off); 130186b442fbSDennis Zhou (Facebook) 1302ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_free(chunk, bit_off, bits); 1303b539b87fSTejun Heo 1304fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 13055b32af91SRoman Gushchin 13065b32af91SRoman Gushchin return freed; 1307fbf59bc9STejun Heo } 1308fbf59bc9STejun Heo 1309047924c9SDennis Zhou static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) 1310047924c9SDennis Zhou { 1311047924c9SDennis Zhou block->scan_hint = 0; 1312047924c9SDennis Zhou block->contig_hint = nr_bits; 1313047924c9SDennis Zhou block->left_free = nr_bits; 1314047924c9SDennis Zhou block->right_free = nr_bits; 1315047924c9SDennis Zhou block->first_free = 0; 1316047924c9SDennis Zhou block->nr_bits = nr_bits; 1317047924c9SDennis Zhou } 1318047924c9SDennis Zhou 1319ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) 1320ca460b3cSDennis Zhou (Facebook) { 1321ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *md_block; 1322ca460b3cSDennis Zhou (Facebook) 132392c14cabSDennis Zhou /* init the chunk's block */ 132492c14cabSDennis Zhou pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); 132592c14cabSDennis Zhou 1326ca460b3cSDennis Zhou (Facebook) for (md_block = chunk->md_blocks; 1327ca460b3cSDennis Zhou (Facebook) md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); 1328047924c9SDennis Zhou md_block++) 1329047924c9SDennis Zhou pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); 1330ca460b3cSDennis Zhou (Facebook) } 1331ca460b3cSDennis Zhou (Facebook) 133240064aecSDennis Zhou (Facebook) /** 133340064aecSDennis Zhou (Facebook) * pcpu_alloc_first_chunk - creates chunks that serve the first chunk 133440064aecSDennis Zhou (Facebook) * @tmp_addr: the start of the region served 133540064aecSDennis Zhou (Facebook) * @map_size: size of the region served 133640064aecSDennis Zhou (Facebook) * 133740064aecSDennis Zhou (Facebook) * This is responsible for creating the chunks that serve the first chunk. The 133840064aecSDennis Zhou (Facebook) * base_addr is page aligned down of @tmp_addr while the region end is page 133940064aecSDennis Zhou (Facebook) * aligned up. Offsets are kept track of to determine the region served. All 134040064aecSDennis Zhou (Facebook) * this is done to appease the bitmap allocator in avoiding partial blocks. 134140064aecSDennis Zhou (Facebook) * 134240064aecSDennis Zhou (Facebook) * RETURNS: 134340064aecSDennis Zhou (Facebook) * Chunk serving the region at @tmp_addr of @map_size. 134440064aecSDennis Zhou (Facebook) */ 1345c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 134640064aecSDennis Zhou (Facebook) int map_size) 134710edf5b0SDennis Zhou (Facebook) { 134810edf5b0SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 13493289e053SBaoquan He unsigned long aligned_addr; 135040064aecSDennis Zhou (Facebook) int start_offset, offset_bits, region_size, region_bits; 1351f655f405SMike Rapoport size_t alloc_size; 1352c0ebfdc3SDennis Zhou (Facebook) 1353c0ebfdc3SDennis Zhou (Facebook) /* region calculations */ 1354c0ebfdc3SDennis Zhou (Facebook) aligned_addr = tmp_addr & PAGE_MASK; 1355c0ebfdc3SDennis Zhou (Facebook) 1356c0ebfdc3SDennis Zhou (Facebook) start_offset = tmp_addr - aligned_addr; 13573289e053SBaoquan He region_size = ALIGN(start_offset + map_size, PAGE_SIZE); 135810edf5b0SDennis Zhou (Facebook) 1359c0ebfdc3SDennis Zhou (Facebook) /* allocate chunk */ 136061cf93d3SDennis Zhou alloc_size = struct_size(chunk, populated, 136161cf93d3SDennis Zhou BITS_TO_LONGS(region_size >> PAGE_SHIFT)); 1362f655f405SMike Rapoport chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1363f655f405SMike Rapoport if (!chunk) 1364f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1365f655f405SMike Rapoport alloc_size); 1366c0ebfdc3SDennis Zhou (Facebook) 136710edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->list); 1368c0ebfdc3SDennis Zhou (Facebook) 1369c0ebfdc3SDennis Zhou (Facebook) chunk->base_addr = (void *)aligned_addr; 137010edf5b0SDennis Zhou (Facebook) chunk->start_offset = start_offset; 13716b9d7c8eSDennis Zhou (Facebook) chunk->end_offset = region_size - chunk->start_offset - map_size; 1372c0ebfdc3SDennis Zhou (Facebook) 13738ab16c43SDennis Zhou (Facebook) chunk->nr_pages = region_size >> PAGE_SHIFT; 137440064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 1375c0ebfdc3SDennis Zhou (Facebook) 1376f655f405SMike Rapoport alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); 1377f655f405SMike Rapoport chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1378f655f405SMike Rapoport if (!chunk->alloc_map) 1379f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1380f655f405SMike Rapoport alloc_size); 1381f655f405SMike Rapoport 1382f655f405SMike Rapoport alloc_size = 1383f655f405SMike Rapoport BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); 1384f655f405SMike Rapoport chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1385f655f405SMike Rapoport if (!chunk->bound_map) 1386f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1387f655f405SMike Rapoport alloc_size); 1388f655f405SMike Rapoport 1389f655f405SMike Rapoport alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); 1390f655f405SMike Rapoport chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1391f655f405SMike Rapoport if (!chunk->md_blocks) 1392f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1393f655f405SMike Rapoport alloc_size); 1394f655f405SMike Rapoport 13953c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 1396faf65ddeSRoman Gushchin /* first chunk is free to use */ 13973c7be18aSRoman Gushchin chunk->obj_cgroups = NULL; 13983c7be18aSRoman Gushchin #endif 1399ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 140010edf5b0SDennis Zhou (Facebook) 140110edf5b0SDennis Zhou (Facebook) /* manage populated page bitmap */ 140210edf5b0SDennis Zhou (Facebook) chunk->immutable = true; 14038ab16c43SDennis Zhou (Facebook) bitmap_fill(chunk->populated, chunk->nr_pages); 14048ab16c43SDennis Zhou (Facebook) chunk->nr_populated = chunk->nr_pages; 1405b239f7daSDennis Zhou chunk->nr_empty_pop_pages = chunk->nr_pages; 140610edf5b0SDennis Zhou (Facebook) 140740064aecSDennis Zhou (Facebook) chunk->free_bytes = map_size; 1408c0ebfdc3SDennis Zhou (Facebook) 1409c0ebfdc3SDennis Zhou (Facebook) if (chunk->start_offset) { 1410c0ebfdc3SDennis Zhou (Facebook) /* hide the beginning of the bitmap */ 141140064aecSDennis Zhou (Facebook) offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; 141240064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 0, offset_bits); 141340064aecSDennis Zhou (Facebook) set_bit(0, chunk->bound_map); 141440064aecSDennis Zhou (Facebook) set_bit(offset_bits, chunk->bound_map); 1415ca460b3cSDennis Zhou (Facebook) 141692c14cabSDennis Zhou chunk->chunk_md.first_free = offset_bits; 141786b442fbSDennis Zhou (Facebook) 1418ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, 0, offset_bits); 1419c0ebfdc3SDennis Zhou (Facebook) } 1420c0ebfdc3SDennis Zhou (Facebook) 14216b9d7c8eSDennis Zhou (Facebook) if (chunk->end_offset) { 14226b9d7c8eSDennis Zhou (Facebook) /* hide the end of the bitmap */ 142340064aecSDennis Zhou (Facebook) offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; 142440064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 142540064aecSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk) - offset_bits, 142640064aecSDennis Zhou (Facebook) offset_bits); 142740064aecSDennis Zhou (Facebook) set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, 142840064aecSDennis Zhou (Facebook) chunk->bound_map); 142940064aecSDennis Zhou (Facebook) set_bit(region_bits, chunk->bound_map); 14306b9d7c8eSDennis Zhou (Facebook) 1431ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) 1432ca460b3cSDennis Zhou (Facebook) - offset_bits, offset_bits); 1433ca460b3cSDennis Zhou (Facebook) } 143440064aecSDennis Zhou (Facebook) 143510edf5b0SDennis Zhou (Facebook) return chunk; 143610edf5b0SDennis Zhou (Facebook) } 143710edf5b0SDennis Zhou (Facebook) 1438faf65ddeSRoman Gushchin static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) 14396081089fSTejun Heo { 14406081089fSTejun Heo struct pcpu_chunk *chunk; 144140064aecSDennis Zhou (Facebook) int region_bits; 14426081089fSTejun Heo 144347504ee0SDennis Zhou chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); 14446081089fSTejun Heo if (!chunk) 14456081089fSTejun Heo return NULL; 14466081089fSTejun Heo 14476081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 1448c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages; 144940064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 145040064aecSDennis Zhou (Facebook) 145140064aecSDennis Zhou (Facebook) chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 145247504ee0SDennis Zhou sizeof(chunk->alloc_map[0]), gfp); 145340064aecSDennis Zhou (Facebook) if (!chunk->alloc_map) 145440064aecSDennis Zhou (Facebook) goto alloc_map_fail; 145540064aecSDennis Zhou (Facebook) 145640064aecSDennis Zhou (Facebook) chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 145747504ee0SDennis Zhou sizeof(chunk->bound_map[0]), gfp); 145840064aecSDennis Zhou (Facebook) if (!chunk->bound_map) 145940064aecSDennis Zhou (Facebook) goto bound_map_fail; 146040064aecSDennis Zhou (Facebook) 1461ca460b3cSDennis Zhou (Facebook) chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 146247504ee0SDennis Zhou sizeof(chunk->md_blocks[0]), gfp); 1463ca460b3cSDennis Zhou (Facebook) if (!chunk->md_blocks) 1464ca460b3cSDennis Zhou (Facebook) goto md_blocks_fail; 1465ca460b3cSDennis Zhou (Facebook) 14663c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 1467faf65ddeSRoman Gushchin if (!mem_cgroup_kmem_disabled()) { 14683c7be18aSRoman Gushchin chunk->obj_cgroups = 14693c7be18aSRoman Gushchin pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * 14703c7be18aSRoman Gushchin sizeof(struct obj_cgroup *), gfp); 14713c7be18aSRoman Gushchin if (!chunk->obj_cgroups) 14723c7be18aSRoman Gushchin goto objcg_fail; 14733c7be18aSRoman Gushchin } 14743c7be18aSRoman Gushchin #endif 14753c7be18aSRoman Gushchin 1476ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 1477ca460b3cSDennis Zhou (Facebook) 147840064aecSDennis Zhou (Facebook) /* init metadata */ 147940064aecSDennis Zhou (Facebook) chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; 1480c0ebfdc3SDennis Zhou (Facebook) 14816081089fSTejun Heo return chunk; 148240064aecSDennis Zhou (Facebook) 14833c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 14843c7be18aSRoman Gushchin objcg_fail: 14853c7be18aSRoman Gushchin pcpu_mem_free(chunk->md_blocks); 14863c7be18aSRoman Gushchin #endif 1487ca460b3cSDennis Zhou (Facebook) md_blocks_fail: 1488ca460b3cSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 148940064aecSDennis Zhou (Facebook) bound_map_fail: 149040064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 149140064aecSDennis Zhou (Facebook) alloc_map_fail: 149240064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk); 149340064aecSDennis Zhou (Facebook) 149440064aecSDennis Zhou (Facebook) return NULL; 14956081089fSTejun Heo } 14966081089fSTejun Heo 14976081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 14986081089fSTejun Heo { 14996081089fSTejun Heo if (!chunk) 15006081089fSTejun Heo return; 15013c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 15023c7be18aSRoman Gushchin pcpu_mem_free(chunk->obj_cgroups); 15033c7be18aSRoman Gushchin #endif 15046685b357SMike Rapoport pcpu_mem_free(chunk->md_blocks); 150540064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 150640064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 15071d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 15086081089fSTejun Heo } 15096081089fSTejun Heo 1510b539b87fSTejun Heo /** 1511b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 1512b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 1513b539b87fSTejun Heo * @page_start: the start page 1514b539b87fSTejun Heo * @page_end: the end page 1515b539b87fSTejun Heo * 1516b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1517b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 1518b539b87fSTejun Heo * successful population. 1519b539b87fSTejun Heo */ 152040064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, 1521b239f7daSDennis Zhou int page_end) 1522b539b87fSTejun Heo { 1523b539b87fSTejun Heo int nr = page_end - page_start; 1524b539b87fSTejun Heo 1525b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1526b539b87fSTejun Heo 1527b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 1528b539b87fSTejun Heo chunk->nr_populated += nr; 15297e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += nr; 153040064aecSDennis Zhou (Facebook) 1531b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr); 153240064aecSDennis Zhou (Facebook) } 1533b539b87fSTejun Heo 1534b539b87fSTejun Heo /** 1535b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 1536b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 1537b539b87fSTejun Heo * @page_start: the start page 1538b539b87fSTejun Heo * @page_end: the end page 1539b539b87fSTejun Heo * 1540b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1541b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 1542b539b87fSTejun Heo * each successful depopulation. 1543b539b87fSTejun Heo */ 1544b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 1545b539b87fSTejun Heo int page_start, int page_end) 1546b539b87fSTejun Heo { 1547b539b87fSTejun Heo int nr = page_end - page_start; 1548b539b87fSTejun Heo 1549b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1550b539b87fSTejun Heo 1551b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 1552b539b87fSTejun Heo chunk->nr_populated -= nr; 15537e8a6304SDennis Zhou (Facebook) pcpu_nr_populated -= nr; 1554b239f7daSDennis Zhou 1555b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr); 1556b539b87fSTejun Heo } 1557b539b87fSTejun Heo 1558fbf59bc9STejun Heo /* 15599f645532STejun Heo * Chunk management implementation. 1560fbf59bc9STejun Heo * 15619f645532STejun Heo * To allow different implementations, chunk alloc/free and 15629f645532STejun Heo * [de]population are implemented in a separate file which is pulled 15639f645532STejun Heo * into this file and compiled together. The following functions 15649f645532STejun Heo * should be implemented. 1565ccea34b5STejun Heo * 15669f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 15679f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 156893274f1dSDennis Zhou * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk 15699f645532STejun Heo * pcpu_create_chunk - create a new chunk 15709f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 15719f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 15729f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1573fbf59bc9STejun Heo */ 157415d9f3d1SDennis Zhou static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 157547504ee0SDennis Zhou int page_start, int page_end, gfp_t gfp); 157615d9f3d1SDennis Zhou static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 157715d9f3d1SDennis Zhou int page_start, int page_end); 157893274f1dSDennis Zhou static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 157993274f1dSDennis Zhou int page_start, int page_end); 1580faf65ddeSRoman Gushchin static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp); 15819f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 15829f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 15839f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1584fbf59bc9STejun Heo 1585b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 1586b0c9778bSTejun Heo #include "percpu-km.c" 1587b0c9778bSTejun Heo #else 15889f645532STejun Heo #include "percpu-vm.c" 1589b0c9778bSTejun Heo #endif 1590fbf59bc9STejun Heo 1591fbf59bc9STejun Heo /** 159288999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 159388999a89STejun Heo * @addr: address for which the chunk needs to be determined. 159488999a89STejun Heo * 1595c0ebfdc3SDennis Zhou (Facebook) * This is an internal function that handles all but static allocations. 1596c0ebfdc3SDennis Zhou (Facebook) * Static percpu address values should never be passed into the allocator. 1597c0ebfdc3SDennis Zhou (Facebook) * 159888999a89STejun Heo * RETURNS: 159988999a89STejun Heo * The address of the found chunk. 160088999a89STejun Heo */ 160188999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 160288999a89STejun Heo { 1603c0ebfdc3SDennis Zhou (Facebook) /* is it in the dynamic region (first chunk)? */ 1604560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) 1605c0ebfdc3SDennis Zhou (Facebook) return pcpu_first_chunk; 1606c0ebfdc3SDennis Zhou (Facebook) 1607c0ebfdc3SDennis Zhou (Facebook) /* is it in the reserved region? */ 1608560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) 160988999a89STejun Heo return pcpu_reserved_chunk; 161088999a89STejun Heo 161188999a89STejun Heo /* 161288999a89STejun Heo * The address is relative to unit0 which might be unused and 161388999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 161488999a89STejun Heo * current processor before looking it up in the vmalloc 161588999a89STejun Heo * space. Note that any possible cpu id can be used here, so 161688999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 161788999a89STejun Heo */ 161888999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 16199f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 162088999a89STejun Heo } 162188999a89STejun Heo 16223c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM 1623faf65ddeSRoman Gushchin static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, 16243c7be18aSRoman Gushchin struct obj_cgroup **objcgp) 16253c7be18aSRoman Gushchin { 16263c7be18aSRoman Gushchin struct obj_cgroup *objcg; 16273c7be18aSRoman Gushchin 1628f7a449f7SRoman Gushchin if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT)) 1629faf65ddeSRoman Gushchin return true; 16303c7be18aSRoman Gushchin 16313c7be18aSRoman Gushchin objcg = get_obj_cgroup_from_current(); 16323c7be18aSRoman Gushchin if (!objcg) 1633faf65ddeSRoman Gushchin return true; 16343c7be18aSRoman Gushchin 16358c57c077SQi Zheng if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) { 16363c7be18aSRoman Gushchin obj_cgroup_put(objcg); 1637faf65ddeSRoman Gushchin return false; 16383c7be18aSRoman Gushchin } 16393c7be18aSRoman Gushchin 16403c7be18aSRoman Gushchin *objcgp = objcg; 1641faf65ddeSRoman Gushchin return true; 16423c7be18aSRoman Gushchin } 16433c7be18aSRoman Gushchin 16443c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 16453c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off, 16463c7be18aSRoman Gushchin size_t size) 16473c7be18aSRoman Gushchin { 16483c7be18aSRoman Gushchin if (!objcg) 16493c7be18aSRoman Gushchin return; 16503c7be18aSRoman Gushchin 1651faf65ddeSRoman Gushchin if (likely(chunk && chunk->obj_cgroups)) { 16523c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; 1653772616b0SRoman Gushchin 1654772616b0SRoman Gushchin rcu_read_lock(); 1655772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 16568c57c077SQi Zheng pcpu_obj_full_size(size)); 1657772616b0SRoman Gushchin rcu_read_unlock(); 16583c7be18aSRoman Gushchin } else { 16598c57c077SQi Zheng obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size)); 16603c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16613c7be18aSRoman Gushchin } 16623c7be18aSRoman Gushchin } 16633c7be18aSRoman Gushchin 16643c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 16653c7be18aSRoman Gushchin { 16663c7be18aSRoman Gushchin struct obj_cgroup *objcg; 16673c7be18aSRoman Gushchin 1668faf65ddeSRoman Gushchin if (unlikely(!chunk->obj_cgroups)) 16693c7be18aSRoman Gushchin return; 16703c7be18aSRoman Gushchin 16713c7be18aSRoman Gushchin objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; 1672faf65ddeSRoman Gushchin if (!objcg) 1673faf65ddeSRoman Gushchin return; 16743c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; 16753c7be18aSRoman Gushchin 16768c57c077SQi Zheng obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size)); 16773c7be18aSRoman Gushchin 1678772616b0SRoman Gushchin rcu_read_lock(); 1679772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 16808c57c077SQi Zheng -pcpu_obj_full_size(size)); 1681772616b0SRoman Gushchin rcu_read_unlock(); 1682772616b0SRoman Gushchin 16833c7be18aSRoman Gushchin obj_cgroup_put(objcg); 16843c7be18aSRoman Gushchin } 16853c7be18aSRoman Gushchin 16863c7be18aSRoman Gushchin #else /* CONFIG_MEMCG_KMEM */ 1687faf65ddeSRoman Gushchin static bool 16883c7be18aSRoman Gushchin pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) 16893c7be18aSRoman Gushchin { 1690faf65ddeSRoman Gushchin return true; 16913c7be18aSRoman Gushchin } 16923c7be18aSRoman Gushchin 16933c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 16943c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off, 16953c7be18aSRoman Gushchin size_t size) 16963c7be18aSRoman Gushchin { 16973c7be18aSRoman Gushchin } 16983c7be18aSRoman Gushchin 16993c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 17003c7be18aSRoman Gushchin { 17013c7be18aSRoman Gushchin } 17023c7be18aSRoman Gushchin #endif /* CONFIG_MEMCG_KMEM */ 17033c7be18aSRoman Gushchin 170488999a89STejun Heo /** 1705edcb4639STejun Heo * pcpu_alloc - the percpu allocator 1706cae3aeb8STejun Heo * @size: size of area to allocate in bytes 1707fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 1708edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 17095835d96eSTejun Heo * @gfp: allocation flags 1710fbf59bc9STejun Heo * 17115835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 17120ea7eeecSDaniel Borkmann * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN 17130ea7eeecSDaniel Borkmann * then no warning will be triggered on invalid or failed allocation 17140ea7eeecSDaniel Borkmann * requests. 1715fbf59bc9STejun Heo * 1716fbf59bc9STejun Heo * RETURNS: 1717fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1718fbf59bc9STejun Heo */ 17195835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 17205835d96eSTejun Heo gfp_t gfp) 1721fbf59bc9STejun Heo { 172228307d93SFilipe Manana gfp_t pcpu_gfp; 172328307d93SFilipe Manana bool is_atomic; 172428307d93SFilipe Manana bool do_warn; 17253c7be18aSRoman Gushchin struct obj_cgroup *objcg = NULL; 1726f2badb0cSTejun Heo static int warn_limit = 10; 17278744d859SDennis Zhou struct pcpu_chunk *chunk, *next; 1728f2badb0cSTejun Heo const char *err; 172940064aecSDennis Zhou (Facebook) int slot, off, cpu, ret; 1730403a91b1SJiri Kosina unsigned long flags; 1731f528f0b8SCatalin Marinas void __percpu *ptr; 173240064aecSDennis Zhou (Facebook) size_t bits, bit_align; 1733fbf59bc9STejun Heo 173428307d93SFilipe Manana gfp = current_gfp_context(gfp); 173528307d93SFilipe Manana /* whitelisted flags that can be passed to the backing allocators */ 173628307d93SFilipe Manana pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 173728307d93SFilipe Manana is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 173828307d93SFilipe Manana do_warn = !(gfp & __GFP_NOWARN); 173928307d93SFilipe Manana 1740723ad1d9SAl Viro /* 174140064aecSDennis Zhou (Facebook) * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, 174240064aecSDennis Zhou (Facebook) * therefore alignment must be a minimum of that many bytes. 174340064aecSDennis Zhou (Facebook) * An allocation may have internal fragmentation from rounding up 174440064aecSDennis Zhou (Facebook) * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. 1745723ad1d9SAl Viro */ 1746d2f3c384SDennis Zhou (Facebook) if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) 1747d2f3c384SDennis Zhou (Facebook) align = PCPU_MIN_ALLOC_SIZE; 1748723ad1d9SAl Viro 1749d2f3c384SDennis Zhou (Facebook) size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); 175040064aecSDennis Zhou (Facebook) bits = size >> PCPU_MIN_ALLOC_SHIFT; 175140064aecSDennis Zhou (Facebook) bit_align = align >> PCPU_MIN_ALLOC_SHIFT; 17522f69fa82SViro 17533ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 17543ca45a46Szijun_hu !is_power_of_2(align))) { 17550ea7eeecSDaniel Borkmann WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1756756a025fSJoe Perches size, align); 1757fbf59bc9STejun Heo return NULL; 1758fbf59bc9STejun Heo } 1759fbf59bc9STejun Heo 1760faf65ddeSRoman Gushchin if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg))) 17613c7be18aSRoman Gushchin return NULL; 17623c7be18aSRoman Gushchin 1763f52ba1feSKirill Tkhai if (!is_atomic) { 1764f52ba1feSKirill Tkhai /* 1765f52ba1feSKirill Tkhai * pcpu_balance_workfn() allocates memory under this mutex, 1766f52ba1feSKirill Tkhai * and it may wait for memory reclaim. Allow current task 1767f52ba1feSKirill Tkhai * to become OOM victim, in case of memory pressure. 1768f52ba1feSKirill Tkhai */ 17693c7be18aSRoman Gushchin if (gfp & __GFP_NOFAIL) { 17706710e594STejun Heo mutex_lock(&pcpu_alloc_mutex); 17713c7be18aSRoman Gushchin } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { 17723c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 1773f52ba1feSKirill Tkhai return NULL; 1774f52ba1feSKirill Tkhai } 17753c7be18aSRoman Gushchin } 17766710e594STejun Heo 1777403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1778fbf59bc9STejun Heo 1779edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 1780edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 1781edcb4639STejun Heo chunk = pcpu_reserved_chunk; 1782833af842STejun Heo 178340064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); 178440064aecSDennis Zhou (Facebook) if (off < 0) { 1785833af842STejun Heo err = "alloc from reserved chunk failed"; 1786ccea34b5STejun Heo goto fail_unlock; 1787f2badb0cSTejun Heo } 1788833af842STejun Heo 178940064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1790edcb4639STejun Heo if (off >= 0) 1791edcb4639STejun Heo goto area_found; 1792833af842STejun Heo 1793f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 1794ccea34b5STejun Heo goto fail_unlock; 1795edcb4639STejun Heo } 1796edcb4639STejun Heo 1797ccea34b5STejun Heo restart: 1798edcb4639STejun Heo /* search through normal chunks */ 1799f1833241SRoman Gushchin for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) { 1800faf65ddeSRoman Gushchin list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot], 1801faf65ddeSRoman Gushchin list) { 180240064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, 180340064aecSDennis Zhou (Facebook) is_atomic); 18048744d859SDennis Zhou if (off < 0) { 18058744d859SDennis Zhou if (slot < PCPU_SLOT_FAIL_THRESHOLD) 18068744d859SDennis Zhou pcpu_chunk_move(chunk, 0); 1807fbf59bc9STejun Heo continue; 18088744d859SDennis Zhou } 1809ccea34b5STejun Heo 181040064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1811f1833241SRoman Gushchin if (off >= 0) { 1812f1833241SRoman Gushchin pcpu_reintegrate_chunk(chunk); 1813fbf59bc9STejun Heo goto area_found; 1814f1833241SRoman Gushchin } 1815fbf59bc9STejun Heo } 1816fbf59bc9STejun Heo } 1817fbf59bc9STejun Heo 1818403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1819ccea34b5STejun Heo 182011df02bfSDennis Zhou if (is_atomic) { 182111df02bfSDennis Zhou err = "atomic alloc failed, no space left"; 18225835d96eSTejun Heo goto fail; 182311df02bfSDennis Zhou } 18245835d96eSTejun Heo 1825e04cb697SBaoquan He /* No space left. Create a new chunk. */ 1826faf65ddeSRoman Gushchin if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) { 1827faf65ddeSRoman Gushchin chunk = pcpu_create_chunk(pcpu_gfp); 1828f2badb0cSTejun Heo if (!chunk) { 1829f2badb0cSTejun Heo err = "failed to allocate new chunk"; 1830b38d08f3STejun Heo goto fail; 1831f2badb0cSTejun Heo } 1832ccea34b5STejun Heo 1833403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1834fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1835b38d08f3STejun Heo } else { 1836b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1837b38d08f3STejun Heo } 1838b38d08f3STejun Heo 1839ccea34b5STejun Heo goto restart; 1840fbf59bc9STejun Heo 1841fbf59bc9STejun Heo area_found: 184230a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size); 1843403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1844ccea34b5STejun Heo 1845dca49645STejun Heo /* populate if not all pages are already there */ 18465835d96eSTejun Heo if (!is_atomic) { 1847ec288a2cSYury Norov unsigned int page_end, rs, re; 1848e04d3208STejun Heo 1849ec288a2cSYury Norov rs = PFN_DOWN(off); 1850dca49645STejun Heo page_end = PFN_UP(off + size); 1851dca49645STejun Heo 1852ec288a2cSYury Norov for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) { 1853dca49645STejun Heo WARN_ON(chunk->immutable); 1854dca49645STejun Heo 1855554fef1cSDennis Zhou ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); 1856b38d08f3STejun Heo 1857403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1858b38d08f3STejun Heo if (ret) { 185940064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off); 1860f2badb0cSTejun Heo err = "failed to populate"; 1861ccea34b5STejun Heo goto fail_unlock; 1862fbf59bc9STejun Heo } 1863b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, re); 1864b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1865dca49645STejun Heo } 1866dca49645STejun Heo 1867ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1868e04d3208STejun Heo } 1869ccea34b5STejun Heo 1870faf65ddeSRoman Gushchin if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 18711a4d7607STejun Heo pcpu_schedule_balance_work(); 18721a4d7607STejun Heo 1873dca49645STejun Heo /* clear the areas and return address relative to base address */ 1874dca49645STejun Heo for_each_possible_cpu(cpu) 1875dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1876dca49645STejun Heo 1877f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 18788a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp); 1879df95e795SDennis Zhou 1880f67bed13SVasily Averin trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align, 1881f67bed13SVasily Averin chunk->base_addr, off, ptr, 1882f67bed13SVasily Averin pcpu_obj_full_size(size), gfp); 1883df95e795SDennis Zhou 18843c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); 18853c7be18aSRoman Gushchin 1886f528f0b8SCatalin Marinas return ptr; 1887ccea34b5STejun Heo 1888ccea34b5STejun Heo fail_unlock: 1889403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1890b38d08f3STejun Heo fail: 1891df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1892df95e795SDennis Zhou 18930ea7eeecSDaniel Borkmann if (!is_atomic && do_warn && warn_limit) { 1894870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 18955835d96eSTejun Heo size, align, is_atomic, err); 1896f2badb0cSTejun Heo dump_stack(); 1897f2badb0cSTejun Heo if (!--warn_limit) 1898870d4b12SJoe Perches pr_info("limit reached, disable warning\n"); 1899f2badb0cSTejun Heo } 19001a4d7607STejun Heo if (is_atomic) { 1901f0953a1bSIngo Molnar /* see the flag handling in pcpu_balance_workfn() */ 19021a4d7607STejun Heo pcpu_atomic_alloc_failed = true; 19031a4d7607STejun Heo pcpu_schedule_balance_work(); 19046710e594STejun Heo } else { 19056710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex); 19061a4d7607STejun Heo } 19073c7be18aSRoman Gushchin 19083c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 19093c7be18aSRoman Gushchin 1910ccea34b5STejun Heo return NULL; 1911fbf59bc9STejun Heo } 1912edcb4639STejun Heo 1913edcb4639STejun Heo /** 19145835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1915edcb4639STejun Heo * @size: size of area to allocate in bytes 1916edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 19175835d96eSTejun Heo * @gfp: allocation flags 1918edcb4639STejun Heo * 19195835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 19205835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 19210ea7eeecSDaniel Borkmann * be called from any context but is a lot more likely to fail. If @gfp 19220ea7eeecSDaniel Borkmann * has __GFP_NOWARN then no warning will be triggered on invalid or failed 19230ea7eeecSDaniel Borkmann * allocation requests. 1924ccea34b5STejun Heo * 1925edcb4639STejun Heo * RETURNS: 1926edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1927edcb4639STejun Heo */ 19285835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 19295835d96eSTejun Heo { 19305835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 19315835d96eSTejun Heo } 19325835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 19335835d96eSTejun Heo 19345835d96eSTejun Heo /** 19355835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 19365835d96eSTejun Heo * @size: size of area to allocate in bytes 19375835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 19385835d96eSTejun Heo * 19395835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 19405835d96eSTejun Heo */ 194143cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1942edcb4639STejun Heo { 19435835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1944edcb4639STejun Heo } 1945fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1946fbf59bc9STejun Heo 1947edcb4639STejun Heo /** 1948edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1949edcb4639STejun Heo * @size: size of area to allocate in bytes 1950edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1951edcb4639STejun Heo * 19529329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 19539329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 19549329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 19559329ba97STejun Heo * Might trigger writeouts. 1956edcb4639STejun Heo * 1957ccea34b5STejun Heo * CONTEXT: 1958ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1959ccea34b5STejun Heo * 1960edcb4639STejun Heo * RETURNS: 1961edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1962edcb4639STejun Heo */ 196343cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1964edcb4639STejun Heo { 19655835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1966edcb4639STejun Heo } 1967edcb4639STejun Heo 1968a56dbddfSTejun Heo /** 196967c2669dSRoman Gushchin * pcpu_balance_free - manage the amount of free chunks 1970f1833241SRoman Gushchin * @empty_only: free chunks only if there are no populated pages 1971a56dbddfSTejun Heo * 1972f1833241SRoman Gushchin * If empty_only is %false, reclaim all fully free chunks regardless of the 1973f1833241SRoman Gushchin * number of populated pages. Otherwise, only reclaim chunks that have no 1974f1833241SRoman Gushchin * populated pages. 1975e4d77700SRoman Gushchin * 1976e4d77700SRoman Gushchin * CONTEXT: 1977e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily) 1978a56dbddfSTejun Heo */ 1979faf65ddeSRoman Gushchin static void pcpu_balance_free(bool empty_only) 1980fbf59bc9STejun Heo { 1981fe6bd8c3STejun Heo LIST_HEAD(to_free); 1982faf65ddeSRoman Gushchin struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot]; 1983a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 1984a56dbddfSTejun Heo 1985e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock); 1986a56dbddfSTejun Heo 19871a4d7607STejun Heo /* 19881a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM 19891a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one. 19901a4d7607STejun Heo */ 1991fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 19928d408b4bSTejun Heo WARN_ON(chunk->immutable); 1993a56dbddfSTejun Heo 1994a56dbddfSTejun Heo /* spare the first one */ 1995fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1996a56dbddfSTejun Heo continue; 1997a56dbddfSTejun Heo 1998f1833241SRoman Gushchin if (!empty_only || chunk->nr_empty_pop_pages == 0) 1999fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 2000a56dbddfSTejun Heo } 2001a56dbddfSTejun Heo 2002e4d77700SRoman Gushchin if (list_empty(&to_free)) 2003e4d77700SRoman Gushchin return; 2004a56dbddfSTejun Heo 2005e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock); 2006fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 2007e837dfdeSDennis Zhou unsigned int rs, re; 2008dca49645STejun Heo 2009ec288a2cSYury Norov for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) { 2010a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 2011b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 2012b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 2013b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 2014a93ace48STejun Heo } 20156081089fSTejun Heo pcpu_destroy_chunk(chunk); 2016accd4f36SEric Dumazet cond_resched(); 2017fbf59bc9STejun Heo } 2018e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock); 201967c2669dSRoman Gushchin } 202067c2669dSRoman Gushchin 202167c2669dSRoman Gushchin /** 202267c2669dSRoman Gushchin * pcpu_balance_populated - manage the amount of populated pages 202367c2669dSRoman Gushchin * 202467c2669dSRoman Gushchin * Maintain a certain amount of populated pages to satisfy atomic allocations. 202567c2669dSRoman Gushchin * It is possible that this is called when physical memory is scarce causing 202667c2669dSRoman Gushchin * OOM killer to be triggered. We should avoid doing so until an actual 202767c2669dSRoman Gushchin * allocation causes the failure as it is possible that requests can be 202867c2669dSRoman Gushchin * serviced from already backed regions. 2029e4d77700SRoman Gushchin * 2030e4d77700SRoman Gushchin * CONTEXT: 2031e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily) 203267c2669dSRoman Gushchin */ 2033faf65ddeSRoman Gushchin static void pcpu_balance_populated(void) 203467c2669dSRoman Gushchin { 203567c2669dSRoman Gushchin /* gfp flags passed to underlying allocators */ 203667c2669dSRoman Gushchin const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 203767c2669dSRoman Gushchin struct pcpu_chunk *chunk; 203867c2669dSRoman Gushchin int slot, nr_to_pop, ret; 2039971f3918STejun Heo 2040e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock); 2041971f3918STejun Heo 20421a4d7607STejun Heo /* 20431a4d7607STejun Heo * Ensure there are certain number of free populated pages for 20441a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic 20451a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation 20461a4d7607STejun Heo * failed previously, always populate the maximum amount. This 20471a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping 20481a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not 20491a4d7607STejun Heo * something we support properly and can be highly unreliable and 20501a4d7607STejun Heo * inefficient. 20511a4d7607STejun Heo */ 20521a4d7607STejun Heo retry_pop: 20531a4d7607STejun Heo if (pcpu_atomic_alloc_failed) { 20541a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 20551a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */ 20561a4d7607STejun Heo pcpu_atomic_alloc_failed = false; 20571a4d7607STejun Heo } else { 20581a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 2059faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages, 20601a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH); 20611a4d7607STejun Heo } 20621a4d7607STejun Heo 20631c29a3ceSDennis Zhou for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) { 2064e837dfdeSDennis Zhou unsigned int nr_unpop = 0, rs, re; 20651a4d7607STejun Heo 20661a4d7607STejun Heo if (!nr_to_pop) 20671a4d7607STejun Heo break; 20681a4d7607STejun Heo 2069faf65ddeSRoman Gushchin list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) { 20708ab16c43SDennis Zhou (Facebook) nr_unpop = chunk->nr_pages - chunk->nr_populated; 20711a4d7607STejun Heo if (nr_unpop) 20721a4d7607STejun Heo break; 20731a4d7607STejun Heo } 20741a4d7607STejun Heo 20751a4d7607STejun Heo if (!nr_unpop) 20761a4d7607STejun Heo continue; 20771a4d7607STejun Heo 20781a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */ 2079ec288a2cSYury Norov for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) { 2080e837dfdeSDennis Zhou int nr = min_t(int, re - rs, nr_to_pop); 20811a4d7607STejun Heo 2082e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock); 208347504ee0SDennis Zhou ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); 2084e4d77700SRoman Gushchin cond_resched(); 2085e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock); 20861a4d7607STejun Heo if (!ret) { 20871a4d7607STejun Heo nr_to_pop -= nr; 2088b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, rs + nr); 20891a4d7607STejun Heo } else { 20901a4d7607STejun Heo nr_to_pop = 0; 20911a4d7607STejun Heo } 20921a4d7607STejun Heo 20931a4d7607STejun Heo if (!nr_to_pop) 20941a4d7607STejun Heo break; 20951a4d7607STejun Heo } 20961a4d7607STejun Heo } 20971a4d7607STejun Heo 20981a4d7607STejun Heo if (nr_to_pop) { 20991a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */ 21001a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 2101e4d77700SRoman Gushchin chunk = pcpu_create_chunk(gfp); 2102e4d77700SRoman Gushchin cond_resched(); 2103e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock); 2104e4d77700SRoman Gushchin if (chunk) { 2105e4d77700SRoman Gushchin pcpu_chunk_relocate(chunk, -1); 21061a4d7607STejun Heo goto retry_pop; 21071a4d7607STejun Heo } 21081a4d7607STejun Heo } 2109a56dbddfSTejun Heo } 2110fbf59bc9STejun Heo 2111fbf59bc9STejun Heo /** 2112f1833241SRoman Gushchin * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages 2113f1833241SRoman Gushchin * 2114f1833241SRoman Gushchin * Scan over chunks in the depopulate list and try to release unused populated 2115f1833241SRoman Gushchin * pages back to the system. Depopulated chunks are sidelined to prevent 2116f1833241SRoman Gushchin * repopulating these pages unless required. Fully free chunks are reintegrated 2117f1833241SRoman Gushchin * and freed accordingly (1 is kept around). If we drop below the empty 2118f1833241SRoman Gushchin * populated pages threshold, reintegrate the chunk if it has empty free pages. 2119f1833241SRoman Gushchin * Each chunk is scanned in the reverse order to keep populated pages close to 2120f1833241SRoman Gushchin * the beginning of the chunk. 2121e4d77700SRoman Gushchin * 2122e4d77700SRoman Gushchin * CONTEXT: 2123e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily) 2124e4d77700SRoman Gushchin * 2125f1833241SRoman Gushchin */ 2126faf65ddeSRoman Gushchin static void pcpu_reclaim_populated(void) 2127f1833241SRoman Gushchin { 2128f1833241SRoman Gushchin struct pcpu_chunk *chunk; 2129f1833241SRoman Gushchin struct pcpu_block_md *block; 213093274f1dSDennis Zhou int freed_page_start, freed_page_end; 2131f1833241SRoman Gushchin int i, end; 213293274f1dSDennis Zhou bool reintegrate; 2133f1833241SRoman Gushchin 2134e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock); 2135f1833241SRoman Gushchin 2136f1833241SRoman Gushchin /* 2137f1833241SRoman Gushchin * Once a chunk is isolated to the to_depopulate list, the chunk is no 2138f1833241SRoman Gushchin * longer discoverable to allocations whom may populate pages. The only 2139f1833241SRoman Gushchin * other accessor is the free path which only returns area back to the 2140f1833241SRoman Gushchin * allocator not touching the populated bitmap. 2141f1833241SRoman Gushchin */ 2142c1f6688dSBaoquan He while ((chunk = list_first_entry_or_null( 2143c1f6688dSBaoquan He &pcpu_chunk_lists[pcpu_to_depopulate_slot], 2144c1f6688dSBaoquan He struct pcpu_chunk, list))) { 2145f1833241SRoman Gushchin WARN_ON(chunk->immutable); 2146f1833241SRoman Gushchin 2147f1833241SRoman Gushchin /* 2148f1833241SRoman Gushchin * Scan chunk's pages in the reverse order to keep populated 2149f1833241SRoman Gushchin * pages close to the beginning of the chunk. 2150f1833241SRoman Gushchin */ 215193274f1dSDennis Zhou freed_page_start = chunk->nr_pages; 215293274f1dSDennis Zhou freed_page_end = 0; 215393274f1dSDennis Zhou reintegrate = false; 2154f1833241SRoman Gushchin for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) { 2155f1833241SRoman Gushchin /* no more work to do */ 2156f1833241SRoman Gushchin if (chunk->nr_empty_pop_pages == 0) 2157f1833241SRoman Gushchin break; 2158f1833241SRoman Gushchin 2159f1833241SRoman Gushchin /* reintegrate chunk to prevent atomic alloc failures */ 2160faf65ddeSRoman Gushchin if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) { 216193274f1dSDennis Zhou reintegrate = true; 216283d261fcSBaoquan He break; 2163f1833241SRoman Gushchin } 2164f1833241SRoman Gushchin 2165f1833241SRoman Gushchin /* 2166f1833241SRoman Gushchin * If the page is empty and populated, start or 2167f1833241SRoman Gushchin * extend the (i, end) range. If i == 0, decrease 2168f1833241SRoman Gushchin * i and perform the depopulation to cover the last 2169f1833241SRoman Gushchin * (first) page in the chunk. 2170f1833241SRoman Gushchin */ 2171f1833241SRoman Gushchin block = chunk->md_blocks + i; 2172f1833241SRoman Gushchin if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS && 2173f1833241SRoman Gushchin test_bit(i, chunk->populated)) { 2174f1833241SRoman Gushchin if (end == -1) 2175f1833241SRoman Gushchin end = i; 2176f1833241SRoman Gushchin if (i > 0) 2177f1833241SRoman Gushchin continue; 2178f1833241SRoman Gushchin i--; 2179f1833241SRoman Gushchin } 2180f1833241SRoman Gushchin 2181f1833241SRoman Gushchin /* depopulate if there is an active range */ 2182f1833241SRoman Gushchin if (end == -1) 2183f1833241SRoman Gushchin continue; 2184f1833241SRoman Gushchin 2185f1833241SRoman Gushchin spin_unlock_irq(&pcpu_lock); 2186f1833241SRoman Gushchin pcpu_depopulate_chunk(chunk, i + 1, end + 1); 2187f1833241SRoman Gushchin cond_resched(); 2188f1833241SRoman Gushchin spin_lock_irq(&pcpu_lock); 2189f1833241SRoman Gushchin 2190f1833241SRoman Gushchin pcpu_chunk_depopulated(chunk, i + 1, end + 1); 219193274f1dSDennis Zhou freed_page_start = min(freed_page_start, i + 1); 219293274f1dSDennis Zhou freed_page_end = max(freed_page_end, end + 1); 2193f1833241SRoman Gushchin 2194f1833241SRoman Gushchin /* reset the range and continue */ 2195f1833241SRoman Gushchin end = -1; 2196f1833241SRoman Gushchin } 2197f1833241SRoman Gushchin 219893274f1dSDennis Zhou /* batch tlb flush per chunk to amortize cost */ 219993274f1dSDennis Zhou if (freed_page_start < freed_page_end) { 220093274f1dSDennis Zhou spin_unlock_irq(&pcpu_lock); 220193274f1dSDennis Zhou pcpu_post_unmap_tlb_flush(chunk, 220293274f1dSDennis Zhou freed_page_start, 220393274f1dSDennis Zhou freed_page_end); 220493274f1dSDennis Zhou cond_resched(); 220593274f1dSDennis Zhou spin_lock_irq(&pcpu_lock); 220693274f1dSDennis Zhou } 220793274f1dSDennis Zhou 220893274f1dSDennis Zhou if (reintegrate || chunk->free_bytes == pcpu_unit_size) 2209f1833241SRoman Gushchin pcpu_reintegrate_chunk(chunk); 2210f1833241SRoman Gushchin else 221193274f1dSDennis Zhou list_move_tail(&chunk->list, 2212faf65ddeSRoman Gushchin &pcpu_chunk_lists[pcpu_sidelined_slot]); 2213f1833241SRoman Gushchin } 2214fbf59bc9STejun Heo } 2215fbf59bc9STejun Heo 2216fbf59bc9STejun Heo /** 22173c7be18aSRoman Gushchin * pcpu_balance_workfn - manage the amount of free chunks and populated pages 22183c7be18aSRoman Gushchin * @work: unused 22193c7be18aSRoman Gushchin * 2220f1833241SRoman Gushchin * For each chunk type, manage the number of fully free chunks and the number of 2221f1833241SRoman Gushchin * populated pages. An important thing to consider is when pages are freed and 2222f1833241SRoman Gushchin * how they contribute to the global counts. 22233c7be18aSRoman Gushchin */ 22243c7be18aSRoman Gushchin static void pcpu_balance_workfn(struct work_struct *work) 22253c7be18aSRoman Gushchin { 2226f1833241SRoman Gushchin /* 2227f1833241SRoman Gushchin * pcpu_balance_free() is called twice because the first time we may 2228f1833241SRoman Gushchin * trim pages in the active pcpu_nr_empty_pop_pages which may cause us 2229f1833241SRoman Gushchin * to grow other chunks. This then gives pcpu_reclaim_populated() time 2230f1833241SRoman Gushchin * to move fully free chunks to the active list to be freed if 2231f1833241SRoman Gushchin * appropriate. 2232f1833241SRoman Gushchin */ 223367c2669dSRoman Gushchin mutex_lock(&pcpu_alloc_mutex); 2234e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock); 22353c7be18aSRoman Gushchin 2236faf65ddeSRoman Gushchin pcpu_balance_free(false); 2237faf65ddeSRoman Gushchin pcpu_reclaim_populated(); 2238faf65ddeSRoman Gushchin pcpu_balance_populated(); 2239faf65ddeSRoman Gushchin pcpu_balance_free(true); 2240e4d77700SRoman Gushchin 2241e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock); 224267c2669dSRoman Gushchin mutex_unlock(&pcpu_alloc_mutex); 22433c7be18aSRoman Gushchin } 22443c7be18aSRoman Gushchin 22453c7be18aSRoman Gushchin /** 2246fbf59bc9STejun Heo * free_percpu - free percpu area 2247fbf59bc9STejun Heo * @ptr: pointer to area to free 2248fbf59bc9STejun Heo * 2249ccea34b5STejun Heo * Free percpu area @ptr. 2250ccea34b5STejun Heo * 2251ccea34b5STejun Heo * CONTEXT: 2252ccea34b5STejun Heo * Can be called from atomic context. 2253fbf59bc9STejun Heo */ 225443cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 2255fbf59bc9STejun Heo { 2256129182e5SAndrew Morton void *addr; 2257fbf59bc9STejun Heo struct pcpu_chunk *chunk; 2258ccea34b5STejun Heo unsigned long flags; 22593c7be18aSRoman Gushchin int size, off; 2260198790d9SJohn Sperbeck bool need_balance = false; 2261fbf59bc9STejun Heo 2262fbf59bc9STejun Heo if (!ptr) 2263fbf59bc9STejun Heo return; 2264fbf59bc9STejun Heo 2265f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 2266f528f0b8SCatalin Marinas 2267129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 2268129182e5SAndrew Morton 2269ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 2270fbf59bc9STejun Heo 2271fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 2272bba174f5STejun Heo off = addr - chunk->base_addr; 2273fbf59bc9STejun Heo 22743c7be18aSRoman Gushchin size = pcpu_free_area(chunk, off); 22753c7be18aSRoman Gushchin 22763c7be18aSRoman Gushchin pcpu_memcg_free_hook(chunk, off, size); 2277fbf59bc9STejun Heo 2278f1833241SRoman Gushchin /* 2279f1833241SRoman Gushchin * If there are more than one fully free chunks, wake up grim reaper. 2280f1833241SRoman Gushchin * If the chunk is isolated, it may be in the process of being 2281f1833241SRoman Gushchin * reclaimed. Let reclaim manage cleaning up of that chunk. 2282f1833241SRoman Gushchin */ 2283f1833241SRoman Gushchin if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) { 2284fbf59bc9STejun Heo struct pcpu_chunk *pos; 2285fbf59bc9STejun Heo 2286faf65ddeSRoman Gushchin list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list) 2287fbf59bc9STejun Heo if (pos != chunk) { 2288198790d9SJohn Sperbeck need_balance = true; 2289fbf59bc9STejun Heo break; 2290fbf59bc9STejun Heo } 2291f1833241SRoman Gushchin } else if (pcpu_should_reclaim_chunk(chunk)) { 2292f1833241SRoman Gushchin pcpu_isolate_chunk(chunk); 2293f1833241SRoman Gushchin need_balance = true; 2294fbf59bc9STejun Heo } 2295fbf59bc9STejun Heo 2296df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr); 2297df95e795SDennis Zhou 2298ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 2299198790d9SJohn Sperbeck 2300198790d9SJohn Sperbeck if (need_balance) 2301198790d9SJohn Sperbeck pcpu_schedule_balance_work(); 2302fbf59bc9STejun Heo } 2303fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 2304fbf59bc9STejun Heo 2305383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 2306383776faSThomas Gleixner { 2307383776faSThomas Gleixner #ifdef CONFIG_SMP 2308383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start; 2309383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 2310383776faSThomas Gleixner unsigned int cpu; 2311383776faSThomas Gleixner 2312383776faSThomas Gleixner for_each_possible_cpu(cpu) { 2313383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu); 2314383776faSThomas Gleixner void *va = (void *)addr; 2315383776faSThomas Gleixner 2316383776faSThomas Gleixner if (va >= start && va < start + static_size) { 23178ce371f9SPeter Zijlstra if (can_addr) { 2318383776faSThomas Gleixner *can_addr = (unsigned long) (va - start); 23198ce371f9SPeter Zijlstra *can_addr += (unsigned long) 23208ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id()); 23218ce371f9SPeter Zijlstra } 2322383776faSThomas Gleixner return true; 2323383776faSThomas Gleixner } 2324383776faSThomas Gleixner } 2325383776faSThomas Gleixner #endif 2326383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */ 2327383776faSThomas Gleixner return false; 2328383776faSThomas Gleixner } 2329383776faSThomas Gleixner 23303b034b0dSVivek Goyal /** 233110fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 233210fad5e4STejun Heo * @addr: address to test 233310fad5e4STejun Heo * 233410fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 233510fad5e4STejun Heo * static percpu areas are not considered. For those, use 233610fad5e4STejun Heo * is_module_percpu_address(). 233710fad5e4STejun Heo * 233810fad5e4STejun Heo * RETURNS: 233910fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 234010fad5e4STejun Heo */ 234110fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 234210fad5e4STejun Heo { 2343383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL); 234410fad5e4STejun Heo } 234510fad5e4STejun Heo 234610fad5e4STejun Heo /** 23473b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 23483b034b0dSVivek Goyal * @addr: the address to be converted to physical address 23493b034b0dSVivek Goyal * 23503b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 23513b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 23523b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 23533b034b0dSVivek Goyal * until this function finishes. 23543b034b0dSVivek Goyal * 235567589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 235667589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 235767589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 235867589c71SDave Young * km) provides translation. 235967589c71SDave Young * 2360bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the 236167589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 236267589c71SDave Young * actually works, and the verification can discover both bugs in percpu 236367589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 236467589c71SDave Young * code. 236567589c71SDave Young * 23663b034b0dSVivek Goyal * RETURNS: 23673b034b0dSVivek Goyal * The physical address for @addr. 23683b034b0dSVivek Goyal */ 23693b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 23703b034b0dSVivek Goyal { 23719983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 23729983b6f0STejun Heo bool in_first_chunk = false; 2373a855b84cSTejun Heo unsigned long first_low, first_high; 23749983b6f0STejun Heo unsigned int cpu; 23759983b6f0STejun Heo 23769983b6f0STejun Heo /* 2377a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 23789983b6f0STejun Heo * necessary but will speed up lookups of addresses which 23799983b6f0STejun Heo * aren't in the first chunk. 2380c0ebfdc3SDennis Zhou (Facebook) * 2381c0ebfdc3SDennis Zhou (Facebook) * The address check is against full chunk sizes. pcpu_base_addr 2382c0ebfdc3SDennis Zhou (Facebook) * points to the beginning of the first chunk including the 2383c0ebfdc3SDennis Zhou (Facebook) * static region. Assumes good intent as the first chunk may 2384c0ebfdc3SDennis Zhou (Facebook) * not be full (ie. < pcpu_unit_pages in size). 23859983b6f0STejun Heo */ 2386c0ebfdc3SDennis Zhou (Facebook) first_low = (unsigned long)pcpu_base_addr + 2387c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 2388c0ebfdc3SDennis Zhou (Facebook) first_high = (unsigned long)pcpu_base_addr + 2389c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 2390a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 2391a855b84cSTejun Heo (unsigned long)addr < first_high) { 23929983b6f0STejun Heo for_each_possible_cpu(cpu) { 23939983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 23949983b6f0STejun Heo 23959983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 23969983b6f0STejun Heo in_first_chunk = true; 23979983b6f0STejun Heo break; 23989983b6f0STejun Heo } 23999983b6f0STejun Heo } 24009983b6f0STejun Heo } 24019983b6f0STejun Heo 24029983b6f0STejun Heo if (in_first_chunk) { 2403eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 24043b034b0dSVivek Goyal return __pa(addr); 24053b034b0dSVivek Goyal else 24069f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 24079f57bd4dSEugene Surovegin offset_in_page(addr); 2408020ec653STejun Heo } else 24099f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 24109f57bd4dSEugene Surovegin offset_in_page(addr); 24113b034b0dSVivek Goyal } 24123b034b0dSVivek Goyal 2413fbf59bc9STejun Heo /** 2414fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 2415fd1e8a1fSTejun Heo * @nr_groups: the number of groups 2416fd1e8a1fSTejun Heo * @nr_units: the number of units 2417033e48fbSTejun Heo * 2418fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 2419fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 2420fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 2421fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 2422fd1e8a1fSTejun Heo * pointer of other groups. 2423033e48fbSTejun Heo * 2424033e48fbSTejun Heo * RETURNS: 2425fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 2426fd1e8a1fSTejun Heo * failure. 2427033e48fbSTejun Heo */ 2428fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 2429fd1e8a1fSTejun Heo int nr_units) 2430fd1e8a1fSTejun Heo { 2431fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 2432fd1e8a1fSTejun Heo size_t base_size, ai_size; 2433fd1e8a1fSTejun Heo void *ptr; 2434fd1e8a1fSTejun Heo int unit; 2435fd1e8a1fSTejun Heo 243614d37612SGustavo A. R. Silva base_size = ALIGN(struct_size(ai, groups, nr_groups), 2437fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 2438fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 2439fd1e8a1fSTejun Heo 244026fb3daeSMike Rapoport ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); 2441fd1e8a1fSTejun Heo if (!ptr) 2442fd1e8a1fSTejun Heo return NULL; 2443fd1e8a1fSTejun Heo ai = ptr; 2444fd1e8a1fSTejun Heo ptr += base_size; 2445fd1e8a1fSTejun Heo 2446fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 2447fd1e8a1fSTejun Heo 2448fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 2449fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 2450fd1e8a1fSTejun Heo 2451fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 2452fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 2453fd1e8a1fSTejun Heo 2454fd1e8a1fSTejun Heo return ai; 2455fd1e8a1fSTejun Heo } 2456fd1e8a1fSTejun Heo 2457fd1e8a1fSTejun Heo /** 2458fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 2459fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 2460fd1e8a1fSTejun Heo * 2461fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 2462fd1e8a1fSTejun Heo */ 2463fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 2464fd1e8a1fSTejun Heo { 24654421cca0SMike Rapoport memblock_free(ai, ai->__ai_size); 2466fd1e8a1fSTejun Heo } 2467fd1e8a1fSTejun Heo 2468fd1e8a1fSTejun Heo /** 2469fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 2470fd1e8a1fSTejun Heo * @lvl: loglevel 2471fd1e8a1fSTejun Heo * @ai: allocation info to dump 2472fd1e8a1fSTejun Heo * 2473fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 2474fd1e8a1fSTejun Heo */ 2475fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 2476fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 2477033e48fbSTejun Heo { 2478fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 2479033e48fbSTejun Heo char empty_str[] = "--------"; 2480fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 2481fd1e8a1fSTejun Heo int group, v; 2482fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 2483033e48fbSTejun Heo 2484fd1e8a1fSTejun Heo v = ai->nr_groups; 2485033e48fbSTejun Heo while (v /= 10) 2486fd1e8a1fSTejun Heo group_width++; 2487033e48fbSTejun Heo 2488fd1e8a1fSTejun Heo v = num_possible_cpus(); 2489fd1e8a1fSTejun Heo while (v /= 10) 2490fd1e8a1fSTejun Heo cpu_width++; 2491fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 2492033e48fbSTejun Heo 2493fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 2494fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 2495fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 2496033e48fbSTejun Heo 2497fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 2498fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 2499fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 2500fd1e8a1fSTejun Heo 2501fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 2502fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 2503fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 2504fd1e8a1fSTejun Heo 2505fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 2506fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 2507fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 2508fd1e8a1fSTejun Heo if (!(alloc % apl)) { 25091170532bSJoe Perches pr_cont("\n"); 2510fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 2511033e48fbSTejun Heo } 25121170532bSJoe Perches pr_cont("[%0*d] ", group_width, group); 2513fd1e8a1fSTejun Heo 2514fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 2515fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 25161170532bSJoe Perches pr_cont("%0*d ", 25171170532bSJoe Perches cpu_width, gi->cpu_map[unit]); 2518033e48fbSTejun Heo else 25191170532bSJoe Perches pr_cont("%s ", empty_str); 2520033e48fbSTejun Heo } 2521fd1e8a1fSTejun Heo } 25221170532bSJoe Perches pr_cont("\n"); 2523033e48fbSTejun Heo } 2524033e48fbSTejun Heo 2525fbf59bc9STejun Heo /** 25268d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 2527fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 252838a6be52STejun Heo * @base_addr: mapped address 2529fbf59bc9STejun Heo * 25308d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 253169ab285bSChristophe JAILLET * percpu area. This function is to be called from arch percpu area 253238a6be52STejun Heo * setup path. 25338d408b4bSTejun Heo * 2534fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 2535fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 25368d408b4bSTejun Heo * 2537fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 2538fd1e8a1fSTejun Heo * 2539fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 2540edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 2541edcb4639STejun Heo * the first chunk such that it's available only through reserved 2542edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 2543edcb4639STejun Heo * static areas on architectures where the addressing model has 2544edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 2545edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 2546edcb4639STejun Heo * 2547fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 2548fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 2549fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 25506074d5b0STejun Heo * 2551fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 2552fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 2553fd1e8a1fSTejun Heo * @ai->dyn_size. 25548d408b4bSTejun Heo * 2555fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 2556fd1e8a1fSTejun Heo * for vm areas. 25578d408b4bSTejun Heo * 2558fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 2559fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 2560fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 2561fd1e8a1fSTejun Heo * 2562fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 2563fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 2564fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 2565fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 2566fd1e8a1fSTejun Heo * all units is assumed. 25678d408b4bSTejun Heo * 256838a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 256938a6be52STejun Heo * copied static data to each unit. 2570fbf59bc9STejun Heo * 2571c0ebfdc3SDennis Zhou (Facebook) * The first chunk will always contain a static and a dynamic region. 2572c0ebfdc3SDennis Zhou (Facebook) * However, the static region is not managed by any chunk. If the first 2573c0ebfdc3SDennis Zhou (Facebook) * chunk also contains a reserved region, it is served by two chunks - 2574c0ebfdc3SDennis Zhou (Facebook) * one for the reserved region and one for the dynamic region. They 2575c0ebfdc3SDennis Zhou (Facebook) * share the same vm, but use offset regions in the area allocation map. 2576c0ebfdc3SDennis Zhou (Facebook) * The chunk serving the dynamic region is circulated in the chunk slots 2577c0ebfdc3SDennis Zhou (Facebook) * and available for dynamic allocation like any other chunk. 2578fbf59bc9STejun Heo */ 2579163fa234SKefeng Wang void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 2580fd1e8a1fSTejun Heo void *base_addr) 2581fbf59bc9STejun Heo { 2582b9c39442SDennis Zhou (Facebook) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2583d2f3c384SDennis Zhou (Facebook) size_t static_size, dyn_size; 25840c4169c3SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 25856563297cSTejun Heo unsigned long *group_offsets; 25866563297cSTejun Heo size_t *group_sizes; 2587fb435d52STejun Heo unsigned long *unit_off; 2588fbf59bc9STejun Heo unsigned int cpu; 2589fd1e8a1fSTejun Heo int *unit_map; 2590fd1e8a1fSTejun Heo int group, unit, i; 2591c0ebfdc3SDennis Zhou (Facebook) int map_size; 2592c0ebfdc3SDennis Zhou (Facebook) unsigned long tmp_addr; 2593f655f405SMike Rapoport size_t alloc_size; 2594fbf59bc9STejun Heo 2595635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 2596635b75fcSTejun Heo if (unlikely(cond)) { \ 2597870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \ 2598870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \ 2599807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \ 2600635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 2601635b75fcSTejun Heo BUG(); \ 2602635b75fcSTejun Heo } \ 2603635b75fcSTejun Heo } while (0) 2604635b75fcSTejun Heo 26052f39e637STejun Heo /* sanity checks */ 2606635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 2607bbddff05STejun Heo #ifdef CONFIG_SMP 2608635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 2609f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 2610bbddff05STejun Heo #endif 2611635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 2612f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 2613635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 2614f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 2615635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 2616ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); 2617099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 2618fb29a2ccSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!ai->dyn_size); 2619d2f3c384SDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); 2620ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || 2621ca460b3cSDennis Zhou (Facebook) IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); 26229f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 26238d408b4bSTejun Heo 26246563297cSTejun Heo /* process group information and build config tables accordingly */ 2625f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_offsets[0]); 2626f655f405SMike Rapoport group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2627f655f405SMike Rapoport if (!group_offsets) 2628f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2629f655f405SMike Rapoport alloc_size); 2630f655f405SMike Rapoport 2631f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_sizes[0]); 2632f655f405SMike Rapoport group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2633f655f405SMike Rapoport if (!group_sizes) 2634f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2635f655f405SMike Rapoport alloc_size); 2636f655f405SMike Rapoport 2637f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_map[0]); 2638f655f405SMike Rapoport unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2639f655f405SMike Rapoport if (!unit_map) 2640f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2641f655f405SMike Rapoport alloc_size); 2642f655f405SMike Rapoport 2643f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_off[0]); 2644f655f405SMike Rapoport unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2645f655f405SMike Rapoport if (!unit_off) 2646f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2647f655f405SMike Rapoport alloc_size); 26482f39e637STejun Heo 2649fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2650ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 2651a855b84cSTejun Heo 2652a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 2653a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 26542f39e637STejun Heo 2655fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 2656fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 26572f39e637STejun Heo 26586563297cSTejun Heo group_offsets[group] = gi->base_offset; 26596563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 26606563297cSTejun Heo 2661fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 2662fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 2663fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 2664fd1e8a1fSTejun Heo continue; 2665fd1e8a1fSTejun Heo 26669f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 2667635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 2668635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 2669fd1e8a1fSTejun Heo 2670fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 2671fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 2672fb435d52STejun Heo 2673a855b84cSTejun Heo /* determine low/high unit_cpu */ 2674a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 2675a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 2676a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 2677a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 2678a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 2679a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 26800fc0531eSLinus Torvalds } 26810fc0531eSLinus Torvalds } 2682fd1e8a1fSTejun Heo pcpu_nr_units = unit; 26832f39e637STejun Heo 26842f39e637STejun Heo for_each_possible_cpu(cpu) 2685635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 2686635b75fcSTejun Heo 2687635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 2688635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 2689bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 26902f39e637STejun Heo 26916563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 26926563297cSTejun Heo pcpu_group_offsets = group_offsets; 26936563297cSTejun Heo pcpu_group_sizes = group_sizes; 2694fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 2695fb435d52STejun Heo pcpu_unit_offsets = unit_off; 26962f39e637STejun Heo 26972f39e637STejun Heo /* determine basic parameters */ 2698fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2699d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 27006563297cSTejun Heo pcpu_atom_size = ai->atom_size; 270161cf93d3SDennis Zhou pcpu_chunk_struct_size = struct_size(chunk, populated, 270261cf93d3SDennis Zhou BITS_TO_LONGS(pcpu_unit_pages)); 2703cafe8816STejun Heo 270430a5b536SDennis Zhou pcpu_stats_save_ai(ai); 270530a5b536SDennis Zhou 2706d9b55eebSTejun Heo /* 2707f1833241SRoman Gushchin * Allocate chunk slots. The slots after the active slots are: 2708f1833241SRoman Gushchin * sidelined_slot - isolated, depopulated chunks 2709f1833241SRoman Gushchin * free_slot - fully free chunks 2710f1833241SRoman Gushchin * to_depopulate_slot - isolated, chunks to depopulate 2711d9b55eebSTejun Heo */ 2712f1833241SRoman Gushchin pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1; 2713f1833241SRoman Gushchin pcpu_free_slot = pcpu_sidelined_slot + 1; 2714f1833241SRoman Gushchin pcpu_to_depopulate_slot = pcpu_free_slot + 1; 2715f1833241SRoman Gushchin pcpu_nr_slots = pcpu_to_depopulate_slot + 1; 27163c7be18aSRoman Gushchin pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * 2717faf65ddeSRoman Gushchin sizeof(pcpu_chunk_lists[0]), 27187e1c4e27SMike Rapoport SMP_CACHE_BYTES); 27193c7be18aSRoman Gushchin if (!pcpu_chunk_lists) 2720f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2721faf65ddeSRoman Gushchin pcpu_nr_slots * sizeof(pcpu_chunk_lists[0])); 27223c7be18aSRoman Gushchin 2723fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 2724faf65ddeSRoman Gushchin INIT_LIST_HEAD(&pcpu_chunk_lists[i]); 2725fbf59bc9STejun Heo 2726edcb4639STejun Heo /* 2727d2f3c384SDennis Zhou (Facebook) * The end of the static region needs to be aligned with the 2728d2f3c384SDennis Zhou (Facebook) * minimum allocation size as this offsets the reserved and 2729d2f3c384SDennis Zhou (Facebook) * dynamic region. The first chunk ends page aligned by 2730d2f3c384SDennis Zhou (Facebook) * expanding the dynamic region, therefore the dynamic region 2731d2f3c384SDennis Zhou (Facebook) * can be shrunk to compensate while still staying above the 2732d2f3c384SDennis Zhou (Facebook) * configured sizes. 2733d2f3c384SDennis Zhou (Facebook) */ 2734d2f3c384SDennis Zhou (Facebook) static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); 2735d2f3c384SDennis Zhou (Facebook) dyn_size = ai->dyn_size - (static_size - ai->static_size); 2736d2f3c384SDennis Zhou (Facebook) 2737d2f3c384SDennis Zhou (Facebook) /* 2738c0ebfdc3SDennis Zhou (Facebook) * Initialize first chunk. 2739c0ebfdc3SDennis Zhou (Facebook) * If the reserved_size is non-zero, this initializes the reserved 2740c0ebfdc3SDennis Zhou (Facebook) * chunk. If the reserved_size is zero, the reserved chunk is NULL 2741c0ebfdc3SDennis Zhou (Facebook) * and the dynamic region is initialized here. The first chunk, 2742c0ebfdc3SDennis Zhou (Facebook) * pcpu_first_chunk, will always point to the chunk that serves 2743c0ebfdc3SDennis Zhou (Facebook) * the dynamic region. 2744edcb4639STejun Heo */ 2745d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size; 2746d2f3c384SDennis Zhou (Facebook) map_size = ai->reserved_size ?: dyn_size; 274740064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 274861ace7faSTejun Heo 2749edcb4639STejun Heo /* init dynamic chunk if necessary */ 2750b9c39442SDennis Zhou (Facebook) if (ai->reserved_size) { 27510c4169c3SDennis Zhou (Facebook) pcpu_reserved_chunk = chunk; 2752b9c39442SDennis Zhou (Facebook) 2753d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size + 2754c0ebfdc3SDennis Zhou (Facebook) ai->reserved_size; 2755d2f3c384SDennis Zhou (Facebook) map_size = dyn_size; 275640064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2757edcb4639STejun Heo } 2758edcb4639STejun Heo 27592441d15cSTejun Heo /* link the first chunk in */ 27600c4169c3SDennis Zhou (Facebook) pcpu_first_chunk = chunk; 2761faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; 2762ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 2763fbf59bc9STejun Heo 27647e8a6304SDennis Zhou (Facebook) /* include all regions of the first chunk */ 27657e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += PFN_DOWN(size_sum); 27667e8a6304SDennis Zhou (Facebook) 276730a5b536SDennis Zhou pcpu_stats_chunk_alloc(); 2768df95e795SDennis Zhou trace_percpu_create_chunk(base_addr); 276930a5b536SDennis Zhou 2770fbf59bc9STejun Heo /* we're done */ 2771bba174f5STejun Heo pcpu_base_addr = base_addr; 2772fbf59bc9STejun Heo } 277366c3a757STejun Heo 2774bbddff05STejun Heo #ifdef CONFIG_SMP 2775bbddff05STejun Heo 277617f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 2777f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 2778f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 2779f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 2780f58dc01bSTejun Heo }; 278166c3a757STejun Heo 2782f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 2783f58dc01bSTejun Heo 2784f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 278566c3a757STejun Heo { 27865479c78aSCyrill Gorcunov if (!str) 27875479c78aSCyrill Gorcunov return -EINVAL; 27885479c78aSCyrill Gorcunov 2789f58dc01bSTejun Heo if (0) 2790f58dc01bSTejun Heo /* nada */; 2791f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 2792f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 2793f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 2794f58dc01bSTejun Heo #endif 2795f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2796f58dc01bSTejun Heo else if (!strcmp(str, "page")) 2797f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 2798f58dc01bSTejun Heo #endif 2799f58dc01bSTejun Heo else 2800870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str); 280166c3a757STejun Heo 2802f58dc01bSTejun Heo return 0; 280366c3a757STejun Heo } 2804f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 280566c3a757STejun Heo 28063c9a024fSTejun Heo /* 28073c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 28083c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 28093c9a024fSTejun Heo * to be used. 28103c9a024fSTejun Heo */ 281108fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 281208fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 28133c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 28143c9a024fSTejun Heo #endif 28153c9a024fSTejun Heo 28163c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 28173c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 28183c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 28193c9a024fSTejun Heo #endif 28203c9a024fSTejun Heo 28213c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 28223c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 28233c9a024fSTejun Heo /** 2824fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 2825fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2826fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2827fbf59bc9STejun Heo * @atom_size: allocation atom size 2828fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2829fbf59bc9STejun Heo * 2830fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 2831fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 2832fbf59bc9STejun Heo * atom size and distances between CPUs. 2833fbf59bc9STejun Heo * 2834bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of 2835fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 2836fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 2837fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 2838fbf59bc9STejun Heo * of allocated virtual address space. 2839fbf59bc9STejun Heo * 2840fbf59bc9STejun Heo * RETURNS: 2841fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 2842fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 2843fbf59bc9STejun Heo */ 2844258e0815SDennis Zhou static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info( 2845fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 2846fbf59bc9STejun Heo size_t atom_size, 2847fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 2848fbf59bc9STejun Heo { 2849fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 2850fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 2851d7d29ac7SWonhyuk Yang static struct cpumask mask __initdata; 2852fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 2853fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 2854fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 28553f649ab7SKees Cook int upa, max_upa, best_upa; /* units_per_alloc */ 2856fbf59bc9STejun Heo int last_allocs, group, unit; 2857fbf59bc9STejun Heo unsigned int cpu, tcpu; 2858fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 2859fbf59bc9STejun Heo unsigned int *cpu_map; 2860fbf59bc9STejun Heo 2861fbf59bc9STejun Heo /* this function may be called multiple times */ 2862fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 2863fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 2864d7d29ac7SWonhyuk Yang cpumask_clear(&mask); 2865fbf59bc9STejun Heo 2866fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 2867fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 2868fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 2869fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 2870fbf59bc9STejun Heo 2871fbf59bc9STejun Heo /* 2872fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 2873fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 287425985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 2875fbf59bc9STejun Heo * or larger than min_unit_size. 2876fbf59bc9STejun Heo */ 2877fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 2878fbf59bc9STejun Heo 28799c015162SDennis Zhou (Facebook) /* determine the maximum # of units that can fit in an allocation */ 2880fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 2881fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 2882f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2883fbf59bc9STejun Heo upa--; 2884fbf59bc9STejun Heo max_upa = upa; 2885fbf59bc9STejun Heo 2886d7d29ac7SWonhyuk Yang cpumask_copy(&mask, cpu_possible_mask); 2887d7d29ac7SWonhyuk Yang 2888fbf59bc9STejun Heo /* group cpus according to their proximity */ 2889d7d29ac7SWonhyuk Yang for (group = 0; !cpumask_empty(&mask); group++) { 2890d7d29ac7SWonhyuk Yang /* pop the group's first cpu */ 2891d7d29ac7SWonhyuk Yang cpu = cpumask_first(&mask); 2892fbf59bc9STejun Heo group_map[cpu] = group; 2893fbf59bc9STejun Heo group_cnt[group]++; 2894d7d29ac7SWonhyuk Yang cpumask_clear_cpu(cpu, &mask); 2895d7d29ac7SWonhyuk Yang 2896d7d29ac7SWonhyuk Yang for_each_cpu(tcpu, &mask) { 2897d7d29ac7SWonhyuk Yang if (!cpu_distance_fn || 2898d7d29ac7SWonhyuk Yang (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE && 2899d7d29ac7SWonhyuk Yang cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) { 2900d7d29ac7SWonhyuk Yang group_map[tcpu] = group; 2901d7d29ac7SWonhyuk Yang group_cnt[group]++; 2902d7d29ac7SWonhyuk Yang cpumask_clear_cpu(tcpu, &mask); 2903fbf59bc9STejun Heo } 2904d7d29ac7SWonhyuk Yang } 2905d7d29ac7SWonhyuk Yang } 2906d7d29ac7SWonhyuk Yang nr_groups = group; 2907fbf59bc9STejun Heo 2908fbf59bc9STejun Heo /* 29099c015162SDennis Zhou (Facebook) * Wasted space is caused by a ratio imbalance of upa to group_cnt. 29109c015162SDennis Zhou (Facebook) * Expand the unit_size until we use >= 75% of the units allocated. 29119c015162SDennis Zhou (Facebook) * Related to atom_size, which could be much larger than the unit_size. 2912fbf59bc9STejun Heo */ 2913fbf59bc9STejun Heo last_allocs = INT_MAX; 29144829c791SDennis Zhou best_upa = 0; 2915fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 2916fbf59bc9STejun Heo int allocs = 0, wasted = 0; 2917fbf59bc9STejun Heo 2918f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2919fbf59bc9STejun Heo continue; 2920fbf59bc9STejun Heo 2921fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2922fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 2923fbf59bc9STejun Heo allocs += this_allocs; 2924fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 2925fbf59bc9STejun Heo } 2926fbf59bc9STejun Heo 2927fbf59bc9STejun Heo /* 2928fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 2929fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 2930fbf59bc9STejun Heo * passes the following check. 2931fbf59bc9STejun Heo */ 2932fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 2933fbf59bc9STejun Heo continue; 2934fbf59bc9STejun Heo 2935fbf59bc9STejun Heo /* and then don't consume more memory */ 2936fbf59bc9STejun Heo if (allocs > last_allocs) 2937fbf59bc9STejun Heo break; 2938fbf59bc9STejun Heo last_allocs = allocs; 2939fbf59bc9STejun Heo best_upa = upa; 2940fbf59bc9STejun Heo } 29414829c791SDennis Zhou BUG_ON(!best_upa); 2942fbf59bc9STejun Heo upa = best_upa; 2943fbf59bc9STejun Heo 2944fbf59bc9STejun Heo /* allocate and fill alloc_info */ 2945fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 2946fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 2947fbf59bc9STejun Heo 2948fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 2949fbf59bc9STejun Heo if (!ai) 2950fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 2951fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 2952fbf59bc9STejun Heo 2953fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2954fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 2955fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 2956fbf59bc9STejun Heo } 2957fbf59bc9STejun Heo 2958fbf59bc9STejun Heo ai->static_size = static_size; 2959fbf59bc9STejun Heo ai->reserved_size = reserved_size; 2960fbf59bc9STejun Heo ai->dyn_size = dyn_size; 2961fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 2962fbf59bc9STejun Heo ai->atom_size = atom_size; 2963fbf59bc9STejun Heo ai->alloc_size = alloc_size; 2964fbf59bc9STejun Heo 29652de7852fSPeng Fan for (group = 0, unit = 0; group < nr_groups; group++) { 2966fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2967fbf59bc9STejun Heo 2968fbf59bc9STejun Heo /* 2969fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 2970fbf59bc9STejun Heo * back-to-back. The caller should update this to 2971fbf59bc9STejun Heo * reflect actual allocation. 2972fbf59bc9STejun Heo */ 2973fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 2974fbf59bc9STejun Heo 2975fbf59bc9STejun Heo for_each_possible_cpu(cpu) 2976fbf59bc9STejun Heo if (group_map[cpu] == group) 2977fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 2978fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 2979fbf59bc9STejun Heo unit += gi->nr_units; 2980fbf59bc9STejun Heo } 2981fbf59bc9STejun Heo BUG_ON(unit != nr_units); 2982fbf59bc9STejun Heo 2983fbf59bc9STejun Heo return ai; 2984fbf59bc9STejun Heo } 298523f91716SKefeng Wang 298623f91716SKefeng Wang static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align, 298723f91716SKefeng Wang pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) 298823f91716SKefeng Wang { 298923f91716SKefeng Wang const unsigned long goal = __pa(MAX_DMA_ADDRESS); 299023f91716SKefeng Wang #ifdef CONFIG_NUMA 299123f91716SKefeng Wang int node = NUMA_NO_NODE; 299223f91716SKefeng Wang void *ptr; 299323f91716SKefeng Wang 299423f91716SKefeng Wang if (cpu_to_nd_fn) 299523f91716SKefeng Wang node = cpu_to_nd_fn(cpu); 299623f91716SKefeng Wang 299723f91716SKefeng Wang if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) { 299823f91716SKefeng Wang ptr = memblock_alloc_from(size, align, goal); 299923f91716SKefeng Wang pr_info("cpu %d has no node %d or node-local memory\n", 300023f91716SKefeng Wang cpu, node); 300123f91716SKefeng Wang pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n", 300223f91716SKefeng Wang cpu, size, (u64)__pa(ptr)); 300323f91716SKefeng Wang } else { 300423f91716SKefeng Wang ptr = memblock_alloc_try_nid(size, align, goal, 300523f91716SKefeng Wang MEMBLOCK_ALLOC_ACCESSIBLE, 300623f91716SKefeng Wang node); 300723f91716SKefeng Wang 300823f91716SKefeng Wang pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n", 300923f91716SKefeng Wang cpu, size, node, (u64)__pa(ptr)); 301023f91716SKefeng Wang } 301123f91716SKefeng Wang return ptr; 301223f91716SKefeng Wang #else 301323f91716SKefeng Wang return memblock_alloc_from(size, align, goal); 301423f91716SKefeng Wang #endif 301523f91716SKefeng Wang } 301623f91716SKefeng Wang 301723f91716SKefeng Wang static void __init pcpu_fc_free(void *ptr, size_t size) 301823f91716SKefeng Wang { 301923f91716SKefeng Wang memblock_free(ptr, size); 302023f91716SKefeng Wang } 30213c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 3022fbf59bc9STejun Heo 30233c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 302466c3a757STejun Heo /** 302566c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 302666c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 30274ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 3028c8826dd5STejun Heo * @atom_size: allocation atom size 3029c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 30301ca3fb3aSKefeng Wang * @cpu_to_nd_fn: callback to convert cpu to it's node, optional 303166c3a757STejun Heo * 303266c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 303366c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 303466c3a757STejun Heo * 303566c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 303623f91716SKefeng Wang * by calling pcpu_fc_alloc and used as-is without being mapped into 3037c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 3038c8826dd5STejun Heo * aligned to @atom_size. 3039c8826dd5STejun Heo * 3040c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 3041c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 3042c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 3043c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 3044c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 3045c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 304666c3a757STejun Heo * 30474ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 304866c3a757STejun Heo * 304966c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 305023f91716SKefeng Wang * size, the leftover is returned using pcpu_fc_free. 305166c3a757STejun Heo * 305266c3a757STejun Heo * RETURNS: 3053fb435d52STejun Heo * 0 on success, -errno on failure. 305466c3a757STejun Heo */ 30554ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 3056c8826dd5STejun Heo size_t atom_size, 3057c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 305823f91716SKefeng Wang pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) 305966c3a757STejun Heo { 3060c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 3061c8826dd5STejun Heo void **areas = NULL; 3062fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 306393c76b6bSzijun_hu size_t size_sum, areas_size; 306493c76b6bSzijun_hu unsigned long max_distance; 3065163fa234SKefeng Wang int group, i, highest_group, rc = 0; 306666c3a757STejun Heo 3067c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 3068c8826dd5STejun Heo cpu_distance_fn); 3069fd1e8a1fSTejun Heo if (IS_ERR(ai)) 3070fd1e8a1fSTejun Heo return PTR_ERR(ai); 307166c3a757STejun Heo 3072fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 3073c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 307466c3a757STejun Heo 307526fb3daeSMike Rapoport areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); 3076c8826dd5STejun Heo if (!areas) { 3077fb435d52STejun Heo rc = -ENOMEM; 3078c8826dd5STejun Heo goto out_free; 3079fa8a7094STejun Heo } 308066c3a757STejun Heo 30819b739662Szijun_hu /* allocate, copy and determine base address & max_distance */ 30829b739662Szijun_hu highest_group = 0; 3083c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 3084c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 3085c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 3086c8826dd5STejun Heo void *ptr; 308766c3a757STejun Heo 3088c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 3089c8826dd5STejun Heo cpu = gi->cpu_map[i]; 3090c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 3091c8826dd5STejun Heo 3092c8826dd5STejun Heo /* allocate space for the whole group */ 309323f91716SKefeng Wang ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn); 3094c8826dd5STejun Heo if (!ptr) { 3095c8826dd5STejun Heo rc = -ENOMEM; 3096c8826dd5STejun Heo goto out_free_areas; 3097c8826dd5STejun Heo } 3098f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3099a317ebccSPatrick Wang kmemleak_ignore_phys(__pa(ptr)); 3100c8826dd5STejun Heo areas[group] = ptr; 3101c8826dd5STejun Heo 3102c8826dd5STejun Heo base = min(ptr, base); 31039b739662Szijun_hu if (ptr > areas[highest_group]) 31049b739662Szijun_hu highest_group = group; 31059b739662Szijun_hu } 31069b739662Szijun_hu max_distance = areas[highest_group] - base; 31079b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 31089b739662Szijun_hu 31099b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */ 31109b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) { 31119b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 31129b739662Szijun_hu max_distance, VMALLOC_TOTAL); 31139b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 31149b739662Szijun_hu /* and fail if we have fallback */ 31159b739662Szijun_hu rc = -EINVAL; 31169b739662Szijun_hu goto out_free_areas; 31179b739662Szijun_hu #endif 311842b64281STejun Heo } 311942b64281STejun Heo 312042b64281STejun Heo /* 312142b64281STejun Heo * Copy data and free unused parts. This should happen after all 312242b64281STejun Heo * allocations are complete; otherwise, we may end up with 312342b64281STejun Heo * overlapping groups. 312442b64281STejun Heo */ 312542b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 312642b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 312742b64281STejun Heo void *ptr = areas[group]; 3128c8826dd5STejun Heo 3129c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 3130c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 3131c8826dd5STejun Heo /* unused unit, free whole */ 313223f91716SKefeng Wang pcpu_fc_free(ptr, ai->unit_size); 3133c8826dd5STejun Heo continue; 3134c8826dd5STejun Heo } 3135c8826dd5STejun Heo /* copy and return the unused part */ 3136fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 313723f91716SKefeng Wang pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum); 3138c8826dd5STejun Heo } 313966c3a757STejun Heo } 314066c3a757STejun Heo 3141c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 31426ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 3143c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 31446ea529a2STejun Heo } 3145c8826dd5STejun Heo 314600206a69SMatteo Croce pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", 314700206a69SMatteo Croce PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, 3148fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 314966c3a757STejun Heo 3150163fa234SKefeng Wang pcpu_setup_first_chunk(ai, base); 3151c8826dd5STejun Heo goto out_free; 3152c8826dd5STejun Heo 3153c8826dd5STejun Heo out_free_areas: 3154c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 3155f851c8d8SMichael Holzheu if (areas[group]) 315623f91716SKefeng Wang pcpu_fc_free(areas[group], 3157c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 3158c8826dd5STejun Heo out_free: 3159fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 3160c8826dd5STejun Heo if (areas) 31614421cca0SMike Rapoport memblock_free(areas, areas_size); 3162fb435d52STejun Heo return rc; 3163d4b95f80STejun Heo } 31643c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 3165d4b95f80STejun Heo 31663c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 316720c03576SKefeng Wang #include <asm/pgalloc.h> 316820c03576SKefeng Wang 316920c03576SKefeng Wang #ifndef P4D_TABLE_SIZE 317020c03576SKefeng Wang #define P4D_TABLE_SIZE PAGE_SIZE 317120c03576SKefeng Wang #endif 317220c03576SKefeng Wang 317320c03576SKefeng Wang #ifndef PUD_TABLE_SIZE 317420c03576SKefeng Wang #define PUD_TABLE_SIZE PAGE_SIZE 317520c03576SKefeng Wang #endif 317620c03576SKefeng Wang 317720c03576SKefeng Wang #ifndef PMD_TABLE_SIZE 317820c03576SKefeng Wang #define PMD_TABLE_SIZE PAGE_SIZE 317920c03576SKefeng Wang #endif 318020c03576SKefeng Wang 318120c03576SKefeng Wang #ifndef PTE_TABLE_SIZE 318220c03576SKefeng Wang #define PTE_TABLE_SIZE PAGE_SIZE 318320c03576SKefeng Wang #endif 318420c03576SKefeng Wang void __init __weak pcpu_populate_pte(unsigned long addr) 318520c03576SKefeng Wang { 318620c03576SKefeng Wang pgd_t *pgd = pgd_offset_k(addr); 318720c03576SKefeng Wang p4d_t *p4d; 318820c03576SKefeng Wang pud_t *pud; 318920c03576SKefeng Wang pmd_t *pmd; 319020c03576SKefeng Wang 319120c03576SKefeng Wang if (pgd_none(*pgd)) { 3192*41fd59b7SBibo Mao p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE); 3193*41fd59b7SBibo Mao if (!p4d) 319420c03576SKefeng Wang goto err_alloc; 3195*41fd59b7SBibo Mao pgd_populate(&init_mm, pgd, p4d); 319620c03576SKefeng Wang } 319720c03576SKefeng Wang 319820c03576SKefeng Wang p4d = p4d_offset(pgd, addr); 319920c03576SKefeng Wang if (p4d_none(*p4d)) { 3200*41fd59b7SBibo Mao pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); 3201*41fd59b7SBibo Mao if (!pud) 320220c03576SKefeng Wang goto err_alloc; 3203*41fd59b7SBibo Mao p4d_populate(&init_mm, p4d, pud); 320420c03576SKefeng Wang } 320520c03576SKefeng Wang 320620c03576SKefeng Wang pud = pud_offset(p4d, addr); 320720c03576SKefeng Wang if (pud_none(*pud)) { 3208*41fd59b7SBibo Mao pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); 3209*41fd59b7SBibo Mao if (!pmd) 321020c03576SKefeng Wang goto err_alloc; 3211*41fd59b7SBibo Mao pud_populate(&init_mm, pud, pmd); 321220c03576SKefeng Wang } 321320c03576SKefeng Wang 321420c03576SKefeng Wang pmd = pmd_offset(pud, addr); 321520c03576SKefeng Wang if (!pmd_present(*pmd)) { 321620c03576SKefeng Wang pte_t *new; 321720c03576SKefeng Wang 321820c03576SKefeng Wang new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); 321920c03576SKefeng Wang if (!new) 322020c03576SKefeng Wang goto err_alloc; 322120c03576SKefeng Wang pmd_populate_kernel(&init_mm, pmd, new); 322220c03576SKefeng Wang } 322320c03576SKefeng Wang 322420c03576SKefeng Wang return; 322520c03576SKefeng Wang 322620c03576SKefeng Wang err_alloc: 322720c03576SKefeng Wang panic("%s: Failed to allocate memory\n", __func__); 322820c03576SKefeng Wang } 322920c03576SKefeng Wang 3230d4b95f80STejun Heo /** 323100ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 3232d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 32331ca3fb3aSKefeng Wang * @cpu_to_nd_fn: callback to convert cpu to it's node, optional 3234d4b95f80STejun Heo * 323500ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 323600ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 3237d4b95f80STejun Heo * 3238d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 3239d4b95f80STejun Heo * page-by-page into vmalloc area. 3240d4b95f80STejun Heo * 3241d4b95f80STejun Heo * RETURNS: 3242fb435d52STejun Heo * 0 on success, -errno on failure. 3243d4b95f80STejun Heo */ 324420c03576SKefeng Wang int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) 3245d4b95f80STejun Heo { 32468f05a6a6STejun Heo static struct vm_struct vm; 3247fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 324800ae4064STejun Heo char psize_str[16]; 3249ce3141a2STejun Heo int unit_pages; 3250d4b95f80STejun Heo size_t pages_size; 3251ce3141a2STejun Heo struct page **pages; 3252163fa234SKefeng Wang int unit, i, j, rc = 0; 32538f606604Szijun_hu int upa; 32548f606604Szijun_hu int nr_g0_units; 3255d4b95f80STejun Heo 325600ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 325700ae4064STejun Heo 32584ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 3259fd1e8a1fSTejun Heo if (IS_ERR(ai)) 3260fd1e8a1fSTejun Heo return PTR_ERR(ai); 3261fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 32628f606604Szijun_hu upa = ai->alloc_size/ai->unit_size; 32638f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa); 32640b59c25fSIgor Stoppa if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { 32658f606604Szijun_hu pcpu_free_alloc_info(ai); 32668f606604Szijun_hu return -EINVAL; 32678f606604Szijun_hu } 3268fd1e8a1fSTejun Heo 3269fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 3270d4b95f80STejun Heo 3271d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 3272fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 3273fd1e8a1fSTejun Heo sizeof(pages[0])); 32747e1c4e27SMike Rapoport pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 3275f655f405SMike Rapoport if (!pages) 3276f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 3277f655f405SMike Rapoport pages_size); 3278d4b95f80STejun Heo 32798f05a6a6STejun Heo /* allocate pages */ 3280d4b95f80STejun Heo j = 0; 32818f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) { 3282fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 32838f606604Szijun_hu for (i = 0; i < unit_pages; i++) { 3284d4b95f80STejun Heo void *ptr; 3285d4b95f80STejun Heo 328623f91716SKefeng Wang ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn); 3287d4b95f80STejun Heo if (!ptr) { 3288870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n", 3289598d8091SJoe Perches psize_str, cpu); 3290d4b95f80STejun Heo goto enomem; 3291d4b95f80STejun Heo } 3292f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3293a317ebccSPatrick Wang kmemleak_ignore_phys(__pa(ptr)); 3294ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 3295d4b95f80STejun Heo } 32968f606604Szijun_hu } 3297d4b95f80STejun Heo 32988f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 32998f05a6a6STejun Heo vm.flags = VM_ALLOC; 3300fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 33018f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 33028f05a6a6STejun Heo 3303fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 33041d9d3257STejun Heo unsigned long unit_addr = 3305fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 33068f05a6a6STejun Heo 3307ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 330820c03576SKefeng Wang pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT)); 33098f05a6a6STejun Heo 33108f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 3311fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 3312ce3141a2STejun Heo unit_pages); 3313fb435d52STejun Heo if (rc < 0) 3314fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 33158f05a6a6STejun Heo 33168f05a6a6STejun Heo /* 33178f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 33188f05a6a6STejun Heo * cache for the linear mapping here - something 33198f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 33208f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 33218f05a6a6STejun Heo * data structures are not set up yet. 33228f05a6a6STejun Heo */ 33238f05a6a6STejun Heo 33248f05a6a6STejun Heo /* copy static data */ 3325fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 332666c3a757STejun Heo } 332766c3a757STejun Heo 332866c3a757STejun Heo /* we're ready, commit */ 332900206a69SMatteo Croce pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", 333000206a69SMatteo Croce unit_pages, psize_str, ai->static_size, 3331fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 333266c3a757STejun Heo 3333163fa234SKefeng Wang pcpu_setup_first_chunk(ai, vm.addr); 3334d4b95f80STejun Heo goto out_free_ar; 3335d4b95f80STejun Heo 3336d4b95f80STejun Heo enomem: 3337d4b95f80STejun Heo while (--j >= 0) 333823f91716SKefeng Wang pcpu_fc_free(page_address(pages[j]), PAGE_SIZE); 3339fb435d52STejun Heo rc = -ENOMEM; 3340d4b95f80STejun Heo out_free_ar: 33414421cca0SMike Rapoport memblock_free(pages, pages_size); 3342fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 3343fb435d52STejun Heo return rc; 334466c3a757STejun Heo } 33453c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 3346d4b95f80STejun Heo 3347bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 33488c4bfc6eSTejun Heo /* 3349bbddff05STejun Heo * Generic SMP percpu area setup. 3350e74e3962STejun Heo * 3351e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 3352e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 3353e74e3962STejun Heo * important because many archs have addressing restrictions and might 3354e74e3962STejun Heo * fail if the percpu area is located far away from the previous 3355e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 3356e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 3357e74e3962STejun Heo * on the physical linear memory mapping which uses large page 3358e74e3962STejun Heo * mappings on applicable archs. 3359e74e3962STejun Heo */ 3360e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 3361e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 3362e74e3962STejun Heo 3363e74e3962STejun Heo void __init setup_per_cpu_areas(void) 3364e74e3962STejun Heo { 3365e74e3962STejun Heo unsigned long delta; 3366e74e3962STejun Heo unsigned int cpu; 3367fb435d52STejun Heo int rc; 3368e74e3962STejun Heo 3369e74e3962STejun Heo /* 3370e74e3962STejun Heo * Always reserve area for module percpu variables. That's 3371e74e3962STejun Heo * what the legacy allocator did. 3372e74e3962STejun Heo */ 337323f91716SKefeng Wang rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, 337423f91716SKefeng Wang PAGE_SIZE, NULL, NULL); 3375fb435d52STejun Heo if (rc < 0) 3376bbddff05STejun Heo panic("Failed to initialize percpu areas."); 3377e74e3962STejun Heo 3378e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 3379e74e3962STejun Heo for_each_possible_cpu(cpu) 3380fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 3381e74e3962STejun Heo } 3382e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 3383099a19d9STejun Heo 3384bbddff05STejun Heo #else /* CONFIG_SMP */ 3385bbddff05STejun Heo 3386bbddff05STejun Heo /* 3387bbddff05STejun Heo * UP percpu area setup. 3388bbddff05STejun Heo * 3389bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 3390bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 3391bbddff05STejun Heo * variables and don't require any special preparation. 3392bbddff05STejun Heo */ 3393bbddff05STejun Heo void __init setup_per_cpu_areas(void) 3394bbddff05STejun Heo { 3395bbddff05STejun Heo const size_t unit_size = 3396bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 3397bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 3398bbddff05STejun Heo struct pcpu_alloc_info *ai; 3399bbddff05STejun Heo void *fc; 3400bbddff05STejun Heo 3401bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 340226fb3daeSMike Rapoport fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 3403bbddff05STejun Heo if (!ai || !fc) 3404bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 3405100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 3406a317ebccSPatrick Wang kmemleak_ignore_phys(__pa(fc)); 3407bbddff05STejun Heo 3408bbddff05STejun Heo ai->dyn_size = unit_size; 3409bbddff05STejun Heo ai->unit_size = unit_size; 3410bbddff05STejun Heo ai->atom_size = unit_size; 3411bbddff05STejun Heo ai->alloc_size = unit_size; 3412bbddff05STejun Heo ai->groups[0].nr_units = 1; 3413bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 3414bbddff05STejun Heo 3415163fa234SKefeng Wang pcpu_setup_first_chunk(ai, fc); 3416438a5061SNicolas Pitre pcpu_free_alloc_info(ai); 3417bbddff05STejun Heo } 3418bbddff05STejun Heo 3419bbddff05STejun Heo #endif /* CONFIG_SMP */ 3420bbddff05STejun Heo 3421099a19d9STejun Heo /* 34227e8a6304SDennis Zhou (Facebook) * pcpu_nr_pages - calculate total number of populated backing pages 34237e8a6304SDennis Zhou (Facebook) * 34247e8a6304SDennis Zhou (Facebook) * This reflects the number of pages populated to back chunks. Metadata is 34257e8a6304SDennis Zhou (Facebook) * excluded in the number exposed in meminfo as the number of backing pages 34267e8a6304SDennis Zhou (Facebook) * scales with the number of cpus and can quickly outweigh the memory used for 34277e8a6304SDennis Zhou (Facebook) * metadata. It also keeps this calculation nice and simple. 34287e8a6304SDennis Zhou (Facebook) * 34297e8a6304SDennis Zhou (Facebook) * RETURNS: 34307e8a6304SDennis Zhou (Facebook) * Total number of populated backing pages in use by the allocator. 34317e8a6304SDennis Zhou (Facebook) */ 34327e8a6304SDennis Zhou (Facebook) unsigned long pcpu_nr_pages(void) 34337e8a6304SDennis Zhou (Facebook) { 34347e8a6304SDennis Zhou (Facebook) return pcpu_nr_populated * pcpu_nr_units; 34357e8a6304SDennis Zhou (Facebook) } 34367e8a6304SDennis Zhou (Facebook) 34377e8a6304SDennis Zhou (Facebook) /* 34381a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or 34391a4d7607STejun Heo * workqueue is available. Plug async management until everything is up 34401a4d7607STejun Heo * and running. 34411a4d7607STejun Heo */ 34421a4d7607STejun Heo static int __init percpu_enable_async(void) 34431a4d7607STejun Heo { 34441a4d7607STejun Heo pcpu_async_enabled = true; 34451a4d7607STejun Heo return 0; 34461a4d7607STejun Heo } 34471a4d7607STejun Heo subsys_initcall(percpu_enable_async); 3448