1fbf59bc9STejun Heo /* 288999a89STejun Heo * mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 1088999a89STejun Heo * areas. Percpu areas are allocated in chunks. Each chunk is 1188999a89STejun Heo * consisted of boot-time determined number of units and the first 1288999a89STejun Heo * chunk is used for static percpu variables in the kernel image 132f39e637STejun Heo * (special boot time alloc/init handling necessary as these areas 142f39e637STejun Heo * need to be brought up before allocation services are running). 152f39e637STejun Heo * Unit grows as necessary and all units grow or shrink in unison. 1688999a89STejun Heo * When a chunk is filled up, another chunk is allocated. 17fbf59bc9STejun Heo * 18fbf59bc9STejun Heo * c0 c1 c2 19fbf59bc9STejun Heo * ------------------- ------------------- ------------ 20fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 22fbf59bc9STejun Heo * 23fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 24fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 252f39e637STejun Heo * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 262f39e637STejun Heo * cpus. On NUMA, the mapping can be non-linear and even sparse. 272f39e637STejun Heo * Percpu access can be done by configuring percpu base registers 282f39e637STejun Heo * according to cpu to unit mapping and pcpu_unit_size. 29fbf59bc9STejun Heo * 302f39e637STejun Heo * There are usually many small percpu allocations many of them being 312f39e637STejun Heo * as small as 4 bytes. The allocator organizes chunks into lists 32fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 33fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 344785879eSNamhyung Kim * guaranteed to be equal to or larger than the maximum contiguous 35fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 36fbf59bc9STejun Heo * chunk maps unnecessarily. 37fbf59bc9STejun Heo * 38fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 39fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 40fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 41fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 42fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 43e1b9aa3fSChristoph Lameter * Chunks can be determined from the address using the index field 44e1b9aa3fSChristoph Lameter * in the page struct. The index field contains a pointer to the chunk. 45fbf59bc9STejun Heo * 464091fb95SMasahiro Yamada * To use this allocator, arch code should do the following: 47fbf59bc9STejun Heo * 48fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49e0100983STejun Heo * regular address to percpu pointer and back if they need to be 50e0100983STejun Heo * different from the default 51fbf59bc9STejun Heo * 528d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 538d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 54fbf59bc9STejun Heo */ 55fbf59bc9STejun Heo 56870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 57870d4b12SJoe Perches 58fbf59bc9STejun Heo #include <linux/bitmap.h> 59fbf59bc9STejun Heo #include <linux/bootmem.h> 60fd1e8a1fSTejun Heo #include <linux/err.h> 61fbf59bc9STejun Heo #include <linux/list.h> 62a530b795STejun Heo #include <linux/log2.h> 63fbf59bc9STejun Heo #include <linux/mm.h> 64fbf59bc9STejun Heo #include <linux/module.h> 65fbf59bc9STejun Heo #include <linux/mutex.h> 66fbf59bc9STejun Heo #include <linux/percpu.h> 67fbf59bc9STejun Heo #include <linux/pfn.h> 68fbf59bc9STejun Heo #include <linux/slab.h> 69ccea34b5STejun Heo #include <linux/spinlock.h> 70fbf59bc9STejun Heo #include <linux/vmalloc.h> 71a56dbddfSTejun Heo #include <linux/workqueue.h> 72f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 73fbf59bc9STejun Heo 74fbf59bc9STejun Heo #include <asm/cacheflush.h> 75e0100983STejun Heo #include <asm/sections.h> 76fbf59bc9STejun Heo #include <asm/tlbflush.h> 773b034b0dSVivek Goyal #include <asm/io.h> 78fbf59bc9STejun Heo 79df95e795SDennis Zhou #define CREATE_TRACE_POINTS 80df95e795SDennis Zhou #include <trace/events/percpu.h> 81df95e795SDennis Zhou 828fa3ed80SDennis Zhou #include "percpu-internal.h" 838fa3ed80SDennis Zhou 84fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 85fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 869c824b6aSTejun Heo #define PCPU_ATOMIC_MAP_MARGIN_LOW 32 879c824b6aSTejun Heo #define PCPU_ATOMIC_MAP_MARGIN_HIGH 64 881a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2 891a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4 90fbf59bc9STejun Heo 91bbddff05STejun Heo #ifdef CONFIG_SMP 92e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 93e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 94e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 9543cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 9643cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 9743cf38ebSTejun Heo (unsigned long)__per_cpu_start) 98e0100983STejun Heo #endif 99e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 100e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 10143cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 10243cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 10343cf38ebSTejun Heo (unsigned long)__per_cpu_start) 104e0100983STejun Heo #endif 105bbddff05STejun Heo #else /* CONFIG_SMP */ 106bbddff05STejun Heo /* on UP, it's always identity mapped */ 107bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 108bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 109bbddff05STejun Heo #endif /* CONFIG_SMP */ 110e0100983STejun Heo 1111328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init; 1121328710bSDaniel Micay static int pcpu_unit_size __ro_after_init; 1131328710bSDaniel Micay static int pcpu_nr_units __ro_after_init; 1141328710bSDaniel Micay static int pcpu_atom_size __ro_after_init; 1158fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init; 1161328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init; 117fbf59bc9STejun Heo 118a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 1191328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init; 1201328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init; 1212f39e637STejun Heo 122fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 1231328710bSDaniel Micay void *pcpu_base_addr __ro_after_init; 124fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 125fbf59bc9STejun Heo 1261328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 1271328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 1282f39e637STejun Heo 1296563297cSTejun Heo /* group information, used for vm allocation */ 1301328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init; 1311328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init; 1321328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init; 1336563297cSTejun Heo 134ae9e6bc9STejun Heo /* 135ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 136ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 137ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 138ae9e6bc9STejun Heo */ 1398fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 140ae9e6bc9STejun Heo 141ae9e6bc9STejun Heo /* 142ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 143ae9e6bc9STejun Heo * chunk and serves it for reserved allocations. The amount of 144ae9e6bc9STejun Heo * reserved offset is in pcpu_reserved_chunk_limit. When reserved 145ae9e6bc9STejun Heo * area doesn't exist, the following variables contain NULL and 0 146ae9e6bc9STejun Heo * respectively. 147ae9e6bc9STejun Heo */ 1488fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 1491328710bSDaniel Micay static int pcpu_reserved_chunk_limit __ro_after_init; 150edcb4639STejun Heo 1518fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 1526710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 153fbf59bc9STejun Heo 1548fa3ed80SDennis Zhou struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */ 155fbf59bc9STejun Heo 1564f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */ 1574f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks); 1584f996e23STejun Heo 159b539b87fSTejun Heo /* 160b539b87fSTejun Heo * The number of empty populated pages, protected by pcpu_lock. The 161b539b87fSTejun Heo * reserved chunk doesn't contribute to the count. 162b539b87fSTejun Heo */ 163b539b87fSTejun Heo static int pcpu_nr_empty_pop_pages; 164b539b87fSTejun Heo 1651a4d7607STejun Heo /* 1661a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We 1671a4d7607STejun Heo * try to keep the number of populated free pages between 1681a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 1691a4d7607STejun Heo * empty chunk. 1701a4d7607STejun Heo */ 171fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 172fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 1731a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly; 1741a4d7607STejun Heo static bool pcpu_atomic_alloc_failed; 1751a4d7607STejun Heo 1761a4d7607STejun Heo static void pcpu_schedule_balance_work(void) 1771a4d7607STejun Heo { 1781a4d7607STejun Heo if (pcpu_async_enabled) 1791a4d7607STejun Heo schedule_work(&pcpu_balance_work); 1801a4d7607STejun Heo } 181a56dbddfSTejun Heo 182020ec653STejun Heo static bool pcpu_addr_in_first_chunk(void *addr) 183020ec653STejun Heo { 184020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 185020ec653STejun Heo 186020ec653STejun Heo return addr >= first_start && addr < first_start + pcpu_unit_size; 187020ec653STejun Heo } 188020ec653STejun Heo 189020ec653STejun Heo static bool pcpu_addr_in_reserved_chunk(void *addr) 190020ec653STejun Heo { 191020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 192020ec653STejun Heo 193020ec653STejun Heo return addr >= first_start && 194020ec653STejun Heo addr < first_start + pcpu_reserved_chunk_limit; 195020ec653STejun Heo } 196020ec653STejun Heo 197d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 198fbf59bc9STejun Heo { 199cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 200fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 201fbf59bc9STejun Heo } 202fbf59bc9STejun Heo 203d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 204d9b55eebSTejun Heo { 205d9b55eebSTejun Heo if (size == pcpu_unit_size) 206d9b55eebSTejun Heo return pcpu_nr_slots - 1; 207d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 208d9b55eebSTejun Heo } 209d9b55eebSTejun Heo 210fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 211fbf59bc9STejun Heo { 212fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 213fbf59bc9STejun Heo return 0; 214fbf59bc9STejun Heo 215fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 216fbf59bc9STejun Heo } 217fbf59bc9STejun Heo 21888999a89STejun Heo /* set the pointer to a chunk in a page struct */ 21988999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 22088999a89STejun Heo { 22188999a89STejun Heo page->index = (unsigned long)pcpu; 22288999a89STejun Heo } 22388999a89STejun Heo 22488999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 22588999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 22688999a89STejun Heo { 22788999a89STejun Heo return (struct pcpu_chunk *)page->index; 22888999a89STejun Heo } 22988999a89STejun Heo 23088999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 231fbf59bc9STejun Heo { 2322f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 233fbf59bc9STejun Heo } 234fbf59bc9STejun Heo 2359983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 236fbf59bc9STejun Heo unsigned int cpu, int page_idx) 237fbf59bc9STejun Heo { 238bba174f5STejun Heo return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 239fb435d52STejun Heo (page_idx << PAGE_SHIFT); 240fbf59bc9STejun Heo } 241fbf59bc9STejun Heo 24288999a89STejun Heo static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, 24388999a89STejun Heo int *rs, int *re, int end) 244ce3141a2STejun Heo { 245ce3141a2STejun Heo *rs = find_next_zero_bit(chunk->populated, end, *rs); 246ce3141a2STejun Heo *re = find_next_bit(chunk->populated, end, *rs + 1); 247ce3141a2STejun Heo } 248ce3141a2STejun Heo 24988999a89STejun Heo static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, 25088999a89STejun Heo int *rs, int *re, int end) 251ce3141a2STejun Heo { 252ce3141a2STejun Heo *rs = find_next_bit(chunk->populated, end, *rs); 253ce3141a2STejun Heo *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 254ce3141a2STejun Heo } 255ce3141a2STejun Heo 256ce3141a2STejun Heo /* 257ce3141a2STejun Heo * (Un)populated page region iterators. Iterate over (un)populated 258b595076aSUwe Kleine-König * page regions between @start and @end in @chunk. @rs and @re should 259ce3141a2STejun Heo * be integer variables and will be set to start and end page index of 260ce3141a2STejun Heo * the current region. 261ce3141a2STejun Heo */ 262ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 263ce3141a2STejun Heo for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 264ce3141a2STejun Heo (rs) < (re); \ 265ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 266ce3141a2STejun Heo 267ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 268ce3141a2STejun Heo for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 269ce3141a2STejun Heo (rs) < (re); \ 270ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 271ce3141a2STejun Heo 272fbf59bc9STejun Heo /** 27390459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 2741880d93bSTejun Heo * @size: bytes to allocate 275fbf59bc9STejun Heo * 2761880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 27790459ce0SBob Liu * kzalloc() is used; otherwise, vzalloc() is used. The returned 2781880d93bSTejun Heo * memory is always zeroed. 279fbf59bc9STejun Heo * 280ccea34b5STejun Heo * CONTEXT: 281ccea34b5STejun Heo * Does GFP_KERNEL allocation. 282ccea34b5STejun Heo * 283fbf59bc9STejun Heo * RETURNS: 2841880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 285fbf59bc9STejun Heo */ 28690459ce0SBob Liu static void *pcpu_mem_zalloc(size_t size) 287fbf59bc9STejun Heo { 288099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 289099a19d9STejun Heo return NULL; 290099a19d9STejun Heo 291fbf59bc9STejun Heo if (size <= PAGE_SIZE) 2921880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 2937af4c093SJesper Juhl else 2947af4c093SJesper Juhl return vzalloc(size); 2951880d93bSTejun Heo } 296fbf59bc9STejun Heo 2971880d93bSTejun Heo /** 2981880d93bSTejun Heo * pcpu_mem_free - free memory 2991880d93bSTejun Heo * @ptr: memory to free 3001880d93bSTejun Heo * 30190459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 3021880d93bSTejun Heo */ 3031d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr) 3041880d93bSTejun Heo { 3051d5cfdb0STetsuo Handa kvfree(ptr); 306fbf59bc9STejun Heo } 307fbf59bc9STejun Heo 308fbf59bc9STejun Heo /** 309b539b87fSTejun Heo * pcpu_count_occupied_pages - count the number of pages an area occupies 310b539b87fSTejun Heo * @chunk: chunk of interest 311b539b87fSTejun Heo * @i: index of the area in question 312b539b87fSTejun Heo * 313b539b87fSTejun Heo * Count the number of pages chunk's @i'th area occupies. When the area's 314b539b87fSTejun Heo * start and/or end address isn't aligned to page boundary, the straddled 315b539b87fSTejun Heo * page is included in the count iff the rest of the page is free. 316b539b87fSTejun Heo */ 317b539b87fSTejun Heo static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i) 318b539b87fSTejun Heo { 319b539b87fSTejun Heo int off = chunk->map[i] & ~1; 320b539b87fSTejun Heo int end = chunk->map[i + 1] & ~1; 321b539b87fSTejun Heo 322b539b87fSTejun Heo if (!PAGE_ALIGNED(off) && i > 0) { 323b539b87fSTejun Heo int prev = chunk->map[i - 1]; 324b539b87fSTejun Heo 325b539b87fSTejun Heo if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE)) 326b539b87fSTejun Heo off = round_down(off, PAGE_SIZE); 327b539b87fSTejun Heo } 328b539b87fSTejun Heo 329b539b87fSTejun Heo if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) { 330b539b87fSTejun Heo int next = chunk->map[i + 1]; 331b539b87fSTejun Heo int nend = chunk->map[i + 2] & ~1; 332b539b87fSTejun Heo 333b539b87fSTejun Heo if (!(next & 1) && nend >= round_up(end, PAGE_SIZE)) 334b539b87fSTejun Heo end = round_up(end, PAGE_SIZE); 335b539b87fSTejun Heo } 336b539b87fSTejun Heo 337b539b87fSTejun Heo return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0); 338b539b87fSTejun Heo } 339b539b87fSTejun Heo 340b539b87fSTejun Heo /** 341fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 342fbf59bc9STejun Heo * @chunk: chunk of interest 343fbf59bc9STejun Heo * @oslot: the previous slot it was on 344fbf59bc9STejun Heo * 345fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 346fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 347edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 348edcb4639STejun Heo * chunk slots. 349ccea34b5STejun Heo * 350ccea34b5STejun Heo * CONTEXT: 351ccea34b5STejun Heo * pcpu_lock. 352fbf59bc9STejun Heo */ 353fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 354fbf59bc9STejun Heo { 355fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 356fbf59bc9STejun Heo 357edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 358fbf59bc9STejun Heo if (oslot < nslot) 359fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 360fbf59bc9STejun Heo else 361fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 362fbf59bc9STejun Heo } 363fbf59bc9STejun Heo } 364fbf59bc9STejun Heo 365fbf59bc9STejun Heo /** 366833af842STejun Heo * pcpu_need_to_extend - determine whether chunk area map needs to be extended 367833af842STejun Heo * @chunk: chunk of interest 3689c824b6aSTejun Heo * @is_atomic: the allocation context 3699f7dcf22STejun Heo * 3709c824b6aSTejun Heo * Determine whether area map of @chunk needs to be extended. If 3719c824b6aSTejun Heo * @is_atomic, only the amount necessary for a new allocation is 3729c824b6aSTejun Heo * considered; however, async extension is scheduled if the left amount is 3739c824b6aSTejun Heo * low. If !@is_atomic, it aims for more empty space. Combined, this 3749c824b6aSTejun Heo * ensures that the map is likely to have enough available space to 3759c824b6aSTejun Heo * accomodate atomic allocations which can't extend maps directly. 3769f7dcf22STejun Heo * 377ccea34b5STejun Heo * CONTEXT: 378833af842STejun Heo * pcpu_lock. 379ccea34b5STejun Heo * 3809f7dcf22STejun Heo * RETURNS: 381833af842STejun Heo * New target map allocation length if extension is necessary, 0 382833af842STejun Heo * otherwise. 3839f7dcf22STejun Heo */ 3849c824b6aSTejun Heo static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) 3859f7dcf22STejun Heo { 3869c824b6aSTejun Heo int margin, new_alloc; 3879f7dcf22STejun Heo 3884f996e23STejun Heo lockdep_assert_held(&pcpu_lock); 3894f996e23STejun Heo 3909c824b6aSTejun Heo if (is_atomic) { 3919c824b6aSTejun Heo margin = 3; 3929c824b6aSTejun Heo 3939c824b6aSTejun Heo if (chunk->map_alloc < 3944f996e23STejun Heo chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) { 3954f996e23STejun Heo if (list_empty(&chunk->map_extend_list)) { 3964f996e23STejun Heo list_add_tail(&chunk->map_extend_list, 3974f996e23STejun Heo &pcpu_map_extend_chunks); 3984f996e23STejun Heo pcpu_schedule_balance_work(); 3994f996e23STejun Heo } 4004f996e23STejun Heo } 4019c824b6aSTejun Heo } else { 4029c824b6aSTejun Heo margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; 4039c824b6aSTejun Heo } 4049c824b6aSTejun Heo 4059c824b6aSTejun Heo if (chunk->map_alloc >= chunk->map_used + margin) 4069f7dcf22STejun Heo return 0; 4079f7dcf22STejun Heo 4089f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 4099c824b6aSTejun Heo while (new_alloc < chunk->map_used + margin) 4109f7dcf22STejun Heo new_alloc *= 2; 4119f7dcf22STejun Heo 412833af842STejun Heo return new_alloc; 413ccea34b5STejun Heo } 414ccea34b5STejun Heo 415833af842STejun Heo /** 416833af842STejun Heo * pcpu_extend_area_map - extend area map of a chunk 417833af842STejun Heo * @chunk: chunk of interest 418833af842STejun Heo * @new_alloc: new target allocation length of the area map 419833af842STejun Heo * 420833af842STejun Heo * Extend area map of @chunk to have @new_alloc entries. 421833af842STejun Heo * 422833af842STejun Heo * CONTEXT: 423833af842STejun Heo * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 424833af842STejun Heo * 425833af842STejun Heo * RETURNS: 426833af842STejun Heo * 0 on success, -errno on failure. 427ccea34b5STejun Heo */ 428833af842STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 429833af842STejun Heo { 430833af842STejun Heo int *old = NULL, *new = NULL; 431833af842STejun Heo size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 432833af842STejun Heo unsigned long flags; 4339f7dcf22STejun Heo 4346710e594STejun Heo lockdep_assert_held(&pcpu_alloc_mutex); 4356710e594STejun Heo 43690459ce0SBob Liu new = pcpu_mem_zalloc(new_size); 437833af842STejun Heo if (!new) 438833af842STejun Heo return -ENOMEM; 439833af842STejun Heo 440833af842STejun Heo /* acquire pcpu_lock and switch to new area map */ 441833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 442833af842STejun Heo 443833af842STejun Heo if (new_alloc <= chunk->map_alloc) 444833af842STejun Heo goto out_unlock; 445833af842STejun Heo 446833af842STejun Heo old_size = chunk->map_alloc * sizeof(chunk->map[0]); 447a002d148SHuang Shijie old = chunk->map; 448a002d148SHuang Shijie 449a002d148SHuang Shijie memcpy(new, old, old_size); 4509f7dcf22STejun Heo 4519f7dcf22STejun Heo chunk->map_alloc = new_alloc; 4529f7dcf22STejun Heo chunk->map = new; 453833af842STejun Heo new = NULL; 454833af842STejun Heo 455833af842STejun Heo out_unlock: 456833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 457833af842STejun Heo 458833af842STejun Heo /* 459833af842STejun Heo * pcpu_mem_free() might end up calling vfree() which uses 460833af842STejun Heo * IRQ-unsafe lock and thus can't be called under pcpu_lock. 461833af842STejun Heo */ 4621d5cfdb0STetsuo Handa pcpu_mem_free(old); 4631d5cfdb0STetsuo Handa pcpu_mem_free(new); 464833af842STejun Heo 4659f7dcf22STejun Heo return 0; 4669f7dcf22STejun Heo } 4679f7dcf22STejun Heo 4689f7dcf22STejun Heo /** 469a16037c8STejun Heo * pcpu_fit_in_area - try to fit the requested allocation in a candidate area 470a16037c8STejun Heo * @chunk: chunk the candidate area belongs to 471a16037c8STejun Heo * @off: the offset to the start of the candidate area 472a16037c8STejun Heo * @this_size: the size of the candidate area 473a16037c8STejun Heo * @size: the size of the target allocation 474a16037c8STejun Heo * @align: the alignment of the target allocation 475a16037c8STejun Heo * @pop_only: only allocate from already populated region 476a16037c8STejun Heo * 477a16037c8STejun Heo * We're trying to allocate @size bytes aligned at @align. @chunk's area 478a16037c8STejun Heo * at @off sized @this_size is a candidate. This function determines 479a16037c8STejun Heo * whether the target allocation fits in the candidate area and returns the 480a16037c8STejun Heo * number of bytes to pad after @off. If the target area doesn't fit, -1 481a16037c8STejun Heo * is returned. 482a16037c8STejun Heo * 483a16037c8STejun Heo * If @pop_only is %true, this function only considers the already 484a16037c8STejun Heo * populated part of the candidate area. 485a16037c8STejun Heo */ 486a16037c8STejun Heo static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, 487a16037c8STejun Heo int size, int align, bool pop_only) 488a16037c8STejun Heo { 489a16037c8STejun Heo int cand_off = off; 490a16037c8STejun Heo 491a16037c8STejun Heo while (true) { 492a16037c8STejun Heo int head = ALIGN(cand_off, align) - off; 493a16037c8STejun Heo int page_start, page_end, rs, re; 494a16037c8STejun Heo 495a16037c8STejun Heo if (this_size < head + size) 496a16037c8STejun Heo return -1; 497a16037c8STejun Heo 498a16037c8STejun Heo if (!pop_only) 499a16037c8STejun Heo return head; 500a16037c8STejun Heo 501a16037c8STejun Heo /* 502a16037c8STejun Heo * If the first unpopulated page is beyond the end of the 503a16037c8STejun Heo * allocation, the whole allocation is populated; 504a16037c8STejun Heo * otherwise, retry from the end of the unpopulated area. 505a16037c8STejun Heo */ 506a16037c8STejun Heo page_start = PFN_DOWN(head + off); 507a16037c8STejun Heo page_end = PFN_UP(head + off + size); 508a16037c8STejun Heo 509a16037c8STejun Heo rs = page_start; 510a16037c8STejun Heo pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); 511a16037c8STejun Heo if (rs >= page_end) 512a16037c8STejun Heo return head; 513a16037c8STejun Heo cand_off = re * PAGE_SIZE; 514a16037c8STejun Heo } 515a16037c8STejun Heo } 516a16037c8STejun Heo 517a16037c8STejun Heo /** 518fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 519fbf59bc9STejun Heo * @chunk: chunk of interest 520cae3aeb8STejun Heo * @size: wanted size in bytes 521fbf59bc9STejun Heo * @align: wanted align 522a16037c8STejun Heo * @pop_only: allocate only from the populated area 523b539b87fSTejun Heo * @occ_pages_p: out param for the number of pages the area occupies 524fbf59bc9STejun Heo * 525fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 526fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 527fbf59bc9STejun Heo * populate or map the area. 528fbf59bc9STejun Heo * 5299f7dcf22STejun Heo * @chunk->map must have at least two free slots. 5309f7dcf22STejun Heo * 531ccea34b5STejun Heo * CONTEXT: 532ccea34b5STejun Heo * pcpu_lock. 533ccea34b5STejun Heo * 534fbf59bc9STejun Heo * RETURNS: 5359f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 5369f7dcf22STejun Heo * found. 537fbf59bc9STejun Heo */ 538a16037c8STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, 539b539b87fSTejun Heo bool pop_only, int *occ_pages_p) 540fbf59bc9STejun Heo { 541fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 542fbf59bc9STejun Heo int max_contig = 0; 543fbf59bc9STejun Heo int i, off; 5443d331ad7SAl Viro bool seen_free = false; 545723ad1d9SAl Viro int *p; 546fbf59bc9STejun Heo 5473d331ad7SAl Viro for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { 548fbf59bc9STejun Heo int head, tail; 549723ad1d9SAl Viro int this_size; 550723ad1d9SAl Viro 551723ad1d9SAl Viro off = *p; 552723ad1d9SAl Viro if (off & 1) 553723ad1d9SAl Viro continue; 554fbf59bc9STejun Heo 555723ad1d9SAl Viro this_size = (p[1] & ~1) - off; 556a16037c8STejun Heo 557a16037c8STejun Heo head = pcpu_fit_in_area(chunk, off, this_size, size, align, 558a16037c8STejun Heo pop_only); 559a16037c8STejun Heo if (head < 0) { 5603d331ad7SAl Viro if (!seen_free) { 5613d331ad7SAl Viro chunk->first_free = i; 5623d331ad7SAl Viro seen_free = true; 5633d331ad7SAl Viro } 564723ad1d9SAl Viro max_contig = max(this_size, max_contig); 565fbf59bc9STejun Heo continue; 566fbf59bc9STejun Heo } 567fbf59bc9STejun Heo 568fbf59bc9STejun Heo /* 569fbf59bc9STejun Heo * If head is small or the previous block is free, 570fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 571fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 572fbf59bc9STejun Heo * uncommon for percpu allocations. 573fbf59bc9STejun Heo */ 574723ad1d9SAl Viro if (head && (head < sizeof(int) || !(p[-1] & 1))) { 57521ddfd38SJianyu Zhan *p = off += head; 576723ad1d9SAl Viro if (p[-1] & 1) 577fbf59bc9STejun Heo chunk->free_size -= head; 57821ddfd38SJianyu Zhan else 57921ddfd38SJianyu Zhan max_contig = max(*p - p[-1], max_contig); 580723ad1d9SAl Viro this_size -= head; 581fbf59bc9STejun Heo head = 0; 582fbf59bc9STejun Heo } 583fbf59bc9STejun Heo 584fbf59bc9STejun Heo /* if tail is small, just keep it around */ 585723ad1d9SAl Viro tail = this_size - head - size; 586723ad1d9SAl Viro if (tail < sizeof(int)) { 587fbf59bc9STejun Heo tail = 0; 588723ad1d9SAl Viro size = this_size - head; 589723ad1d9SAl Viro } 590fbf59bc9STejun Heo 591fbf59bc9STejun Heo /* split if warranted */ 592fbf59bc9STejun Heo if (head || tail) { 593706c16f2SAl Viro int nr_extra = !!head + !!tail; 594706c16f2SAl Viro 595706c16f2SAl Viro /* insert new subblocks */ 596723ad1d9SAl Viro memmove(p + nr_extra + 1, p + 1, 597706c16f2SAl Viro sizeof(chunk->map[0]) * (chunk->map_used - i)); 598706c16f2SAl Viro chunk->map_used += nr_extra; 599706c16f2SAl Viro 600fbf59bc9STejun Heo if (head) { 6013d331ad7SAl Viro if (!seen_free) { 6023d331ad7SAl Viro chunk->first_free = i; 6033d331ad7SAl Viro seen_free = true; 6043d331ad7SAl Viro } 605723ad1d9SAl Viro *++p = off += head; 606723ad1d9SAl Viro ++i; 607706c16f2SAl Viro max_contig = max(head, max_contig); 608fbf59bc9STejun Heo } 609706c16f2SAl Viro if (tail) { 610723ad1d9SAl Viro p[1] = off + size; 611706c16f2SAl Viro max_contig = max(tail, max_contig); 612706c16f2SAl Viro } 613fbf59bc9STejun Heo } 614fbf59bc9STejun Heo 6153d331ad7SAl Viro if (!seen_free) 6163d331ad7SAl Viro chunk->first_free = i + 1; 6173d331ad7SAl Viro 618fbf59bc9STejun Heo /* update hint and mark allocated */ 619723ad1d9SAl Viro if (i + 1 == chunk->map_used) 620fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 621fbf59bc9STejun Heo else 622fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 623fbf59bc9STejun Heo max_contig); 624fbf59bc9STejun Heo 625723ad1d9SAl Viro chunk->free_size -= size; 626723ad1d9SAl Viro *p |= 1; 627fbf59bc9STejun Heo 628b539b87fSTejun Heo *occ_pages_p = pcpu_count_occupied_pages(chunk, i); 629fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 630fbf59bc9STejun Heo return off; 631fbf59bc9STejun Heo } 632fbf59bc9STejun Heo 633fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 634fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 635fbf59bc9STejun Heo 6369f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 6379f7dcf22STejun Heo return -1; 638fbf59bc9STejun Heo } 639fbf59bc9STejun Heo 640fbf59bc9STejun Heo /** 641fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 642fbf59bc9STejun Heo * @chunk: chunk of interest 643fbf59bc9STejun Heo * @freeme: offset of area to free 644b539b87fSTejun Heo * @occ_pages_p: out param for the number of pages the area occupies 645fbf59bc9STejun Heo * 646fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 647fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 648fbf59bc9STejun Heo * the area. 649ccea34b5STejun Heo * 650ccea34b5STejun Heo * CONTEXT: 651ccea34b5STejun Heo * pcpu_lock. 652fbf59bc9STejun Heo */ 653b539b87fSTejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme, 654b539b87fSTejun Heo int *occ_pages_p) 655fbf59bc9STejun Heo { 656fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 657723ad1d9SAl Viro int off = 0; 658723ad1d9SAl Viro unsigned i, j; 659723ad1d9SAl Viro int to_free = 0; 660723ad1d9SAl Viro int *p; 661fbf59bc9STejun Heo 6625ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock); 66330a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk); 6645ccd30e4SDennis Zhou 665723ad1d9SAl Viro freeme |= 1; /* we are searching for <given offset, in use> pair */ 666723ad1d9SAl Viro 667723ad1d9SAl Viro i = 0; 668723ad1d9SAl Viro j = chunk->map_used; 669723ad1d9SAl Viro while (i != j) { 670723ad1d9SAl Viro unsigned k = (i + j) / 2; 671723ad1d9SAl Viro off = chunk->map[k]; 672723ad1d9SAl Viro if (off < freeme) 673723ad1d9SAl Viro i = k + 1; 674723ad1d9SAl Viro else if (off > freeme) 675723ad1d9SAl Viro j = k; 676723ad1d9SAl Viro else 677723ad1d9SAl Viro i = j = k; 678723ad1d9SAl Viro } 679fbf59bc9STejun Heo BUG_ON(off != freeme); 680fbf59bc9STejun Heo 6813d331ad7SAl Viro if (i < chunk->first_free) 6823d331ad7SAl Viro chunk->first_free = i; 6833d331ad7SAl Viro 684723ad1d9SAl Viro p = chunk->map + i; 685723ad1d9SAl Viro *p = off &= ~1; 686723ad1d9SAl Viro chunk->free_size += (p[1] & ~1) - off; 687fbf59bc9STejun Heo 688b539b87fSTejun Heo *occ_pages_p = pcpu_count_occupied_pages(chunk, i); 689b539b87fSTejun Heo 690fbf59bc9STejun Heo /* merge with next? */ 691723ad1d9SAl Viro if (!(p[1] & 1)) 692723ad1d9SAl Viro to_free++; 693723ad1d9SAl Viro /* merge with previous? */ 694723ad1d9SAl Viro if (i > 0 && !(p[-1] & 1)) { 695723ad1d9SAl Viro to_free++; 696723ad1d9SAl Viro i--; 697723ad1d9SAl Viro p--; 698723ad1d9SAl Viro } 699723ad1d9SAl Viro if (to_free) { 700723ad1d9SAl Viro chunk->map_used -= to_free; 701723ad1d9SAl Viro memmove(p + 1, p + 1 + to_free, 702723ad1d9SAl Viro (chunk->map_used - i) * sizeof(chunk->map[0])); 703fbf59bc9STejun Heo } 704fbf59bc9STejun Heo 705723ad1d9SAl Viro chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); 706fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 707fbf59bc9STejun Heo } 708fbf59bc9STejun Heo 7096081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void) 7106081089fSTejun Heo { 7116081089fSTejun Heo struct pcpu_chunk *chunk; 7126081089fSTejun Heo 71390459ce0SBob Liu chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); 7146081089fSTejun Heo if (!chunk) 7156081089fSTejun Heo return NULL; 7166081089fSTejun Heo 71790459ce0SBob Liu chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * 71890459ce0SBob Liu sizeof(chunk->map[0])); 7196081089fSTejun Heo if (!chunk->map) { 7201d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 7216081089fSTejun Heo return NULL; 7226081089fSTejun Heo } 7236081089fSTejun Heo 7246081089fSTejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 725723ad1d9SAl Viro chunk->map[0] = 0; 726723ad1d9SAl Viro chunk->map[1] = pcpu_unit_size | 1; 727723ad1d9SAl Viro chunk->map_used = 1; 72830a5b536SDennis Zhou chunk->has_reserved = false; 7296081089fSTejun Heo 7306081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 7314f996e23STejun Heo INIT_LIST_HEAD(&chunk->map_extend_list); 7326081089fSTejun Heo chunk->free_size = pcpu_unit_size; 7336081089fSTejun Heo chunk->contig_hint = pcpu_unit_size; 7346081089fSTejun Heo 7356081089fSTejun Heo return chunk; 7366081089fSTejun Heo } 7376081089fSTejun Heo 7386081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 7396081089fSTejun Heo { 7406081089fSTejun Heo if (!chunk) 7416081089fSTejun Heo return; 7421d5cfdb0STetsuo Handa pcpu_mem_free(chunk->map); 7431d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 7446081089fSTejun Heo } 7456081089fSTejun Heo 746b539b87fSTejun Heo /** 747b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 748b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 749b539b87fSTejun Heo * @page_start: the start page 750b539b87fSTejun Heo * @page_end: the end page 751b539b87fSTejun Heo * 752b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 753b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 754b539b87fSTejun Heo * successful population. 755b539b87fSTejun Heo */ 756b539b87fSTejun Heo static void pcpu_chunk_populated(struct pcpu_chunk *chunk, 757b539b87fSTejun Heo int page_start, int page_end) 758b539b87fSTejun Heo { 759b539b87fSTejun Heo int nr = page_end - page_start; 760b539b87fSTejun Heo 761b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 762b539b87fSTejun Heo 763b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 764b539b87fSTejun Heo chunk->nr_populated += nr; 765b539b87fSTejun Heo pcpu_nr_empty_pop_pages += nr; 766b539b87fSTejun Heo } 767b539b87fSTejun Heo 768b539b87fSTejun Heo /** 769b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 770b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 771b539b87fSTejun Heo * @page_start: the start page 772b539b87fSTejun Heo * @page_end: the end page 773b539b87fSTejun Heo * 774b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 775b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 776b539b87fSTejun Heo * each successful depopulation. 777b539b87fSTejun Heo */ 778b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 779b539b87fSTejun Heo int page_start, int page_end) 780b539b87fSTejun Heo { 781b539b87fSTejun Heo int nr = page_end - page_start; 782b539b87fSTejun Heo 783b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 784b539b87fSTejun Heo 785b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 786b539b87fSTejun Heo chunk->nr_populated -= nr; 787b539b87fSTejun Heo pcpu_nr_empty_pop_pages -= nr; 788b539b87fSTejun Heo } 789b539b87fSTejun Heo 790fbf59bc9STejun Heo /* 7919f645532STejun Heo * Chunk management implementation. 792fbf59bc9STejun Heo * 7939f645532STejun Heo * To allow different implementations, chunk alloc/free and 7949f645532STejun Heo * [de]population are implemented in a separate file which is pulled 7959f645532STejun Heo * into this file and compiled together. The following functions 7969f645532STejun Heo * should be implemented. 797ccea34b5STejun Heo * 7989f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 7999f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 8009f645532STejun Heo * pcpu_create_chunk - create a new chunk 8019f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 8029f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 8039f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 804fbf59bc9STejun Heo */ 8059f645532STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); 8069f645532STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); 8079f645532STejun Heo static struct pcpu_chunk *pcpu_create_chunk(void); 8089f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 8099f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 8109f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 811fbf59bc9STejun Heo 812b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 813b0c9778bSTejun Heo #include "percpu-km.c" 814b0c9778bSTejun Heo #else 8159f645532STejun Heo #include "percpu-vm.c" 816b0c9778bSTejun Heo #endif 817fbf59bc9STejun Heo 818fbf59bc9STejun Heo /** 81988999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 82088999a89STejun Heo * @addr: address for which the chunk needs to be determined. 82188999a89STejun Heo * 82288999a89STejun Heo * RETURNS: 82388999a89STejun Heo * The address of the found chunk. 82488999a89STejun Heo */ 82588999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 82688999a89STejun Heo { 82788999a89STejun Heo /* is it in the first chunk? */ 82888999a89STejun Heo if (pcpu_addr_in_first_chunk(addr)) { 82988999a89STejun Heo /* is it in the reserved area? */ 83088999a89STejun Heo if (pcpu_addr_in_reserved_chunk(addr)) 83188999a89STejun Heo return pcpu_reserved_chunk; 83288999a89STejun Heo return pcpu_first_chunk; 83388999a89STejun Heo } 83488999a89STejun Heo 83588999a89STejun Heo /* 83688999a89STejun Heo * The address is relative to unit0 which might be unused and 83788999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 83888999a89STejun Heo * current processor before looking it up in the vmalloc 83988999a89STejun Heo * space. Note that any possible cpu id can be used here, so 84088999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 84188999a89STejun Heo */ 84288999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 8439f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 84488999a89STejun Heo } 84588999a89STejun Heo 84688999a89STejun Heo /** 847edcb4639STejun Heo * pcpu_alloc - the percpu allocator 848cae3aeb8STejun Heo * @size: size of area to allocate in bytes 849fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 850edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 8515835d96eSTejun Heo * @gfp: allocation flags 852fbf59bc9STejun Heo * 8535835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 8545835d96eSTejun Heo * contain %GFP_KERNEL, the allocation is atomic. 855fbf59bc9STejun Heo * 856fbf59bc9STejun Heo * RETURNS: 857fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 858fbf59bc9STejun Heo */ 8595835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 8605835d96eSTejun Heo gfp_t gfp) 861fbf59bc9STejun Heo { 862f2badb0cSTejun Heo static int warn_limit = 10; 863fbf59bc9STejun Heo struct pcpu_chunk *chunk; 864f2badb0cSTejun Heo const char *err; 8656ae833c7STejun Heo bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 866b539b87fSTejun Heo int occ_pages = 0; 867b38d08f3STejun Heo int slot, off, new_alloc, cpu, ret; 868403a91b1SJiri Kosina unsigned long flags; 869f528f0b8SCatalin Marinas void __percpu *ptr; 870fbf59bc9STejun Heo 871723ad1d9SAl Viro /* 872723ad1d9SAl Viro * We want the lowest bit of offset available for in-use/free 8732f69fa82SViro * indicator, so force >= 16bit alignment and make size even. 874723ad1d9SAl Viro */ 875723ad1d9SAl Viro if (unlikely(align < 2)) 876723ad1d9SAl Viro align = 2; 877723ad1d9SAl Viro 878fb009e3aSChristoph Lameter size = ALIGN(size, 2); 8792f69fa82SViro 8803ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 8813ca45a46Szijun_hu !is_power_of_2(align))) { 882756a025fSJoe Perches WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", 883756a025fSJoe Perches size, align); 884fbf59bc9STejun Heo return NULL; 885fbf59bc9STejun Heo } 886fbf59bc9STejun Heo 8876710e594STejun Heo if (!is_atomic) 8886710e594STejun Heo mutex_lock(&pcpu_alloc_mutex); 8896710e594STejun Heo 890403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 891fbf59bc9STejun Heo 892edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 893edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 894edcb4639STejun Heo chunk = pcpu_reserved_chunk; 895833af842STejun Heo 896833af842STejun Heo if (size > chunk->contig_hint) { 897833af842STejun Heo err = "alloc from reserved chunk failed"; 898ccea34b5STejun Heo goto fail_unlock; 899f2badb0cSTejun Heo } 900833af842STejun Heo 9019c824b6aSTejun Heo while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) { 902833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 9035835d96eSTejun Heo if (is_atomic || 9045835d96eSTejun Heo pcpu_extend_area_map(chunk, new_alloc) < 0) { 905833af842STejun Heo err = "failed to extend area map of reserved chunk"; 906b38d08f3STejun Heo goto fail; 907833af842STejun Heo } 908833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 909833af842STejun Heo } 910833af842STejun Heo 911b539b87fSTejun Heo off = pcpu_alloc_area(chunk, size, align, is_atomic, 912b539b87fSTejun Heo &occ_pages); 913edcb4639STejun Heo if (off >= 0) 914edcb4639STejun Heo goto area_found; 915833af842STejun Heo 916f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 917ccea34b5STejun Heo goto fail_unlock; 918edcb4639STejun Heo } 919edcb4639STejun Heo 920ccea34b5STejun Heo restart: 921edcb4639STejun Heo /* search through normal chunks */ 922fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 923fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 924fbf59bc9STejun Heo if (size > chunk->contig_hint) 925fbf59bc9STejun Heo continue; 926ccea34b5STejun Heo 9279c824b6aSTejun Heo new_alloc = pcpu_need_to_extend(chunk, is_atomic); 928833af842STejun Heo if (new_alloc) { 9295835d96eSTejun Heo if (is_atomic) 9305835d96eSTejun Heo continue; 931833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 932833af842STejun Heo if (pcpu_extend_area_map(chunk, 933833af842STejun Heo new_alloc) < 0) { 934f2badb0cSTejun Heo err = "failed to extend area map"; 935b38d08f3STejun Heo goto fail; 936833af842STejun Heo } 937833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 938833af842STejun Heo /* 939833af842STejun Heo * pcpu_lock has been dropped, need to 940833af842STejun Heo * restart cpu_slot list walking. 941833af842STejun Heo */ 942833af842STejun Heo goto restart; 943ccea34b5STejun Heo } 944ccea34b5STejun Heo 945b539b87fSTejun Heo off = pcpu_alloc_area(chunk, size, align, is_atomic, 946b539b87fSTejun Heo &occ_pages); 947fbf59bc9STejun Heo if (off >= 0) 948fbf59bc9STejun Heo goto area_found; 949fbf59bc9STejun Heo } 950fbf59bc9STejun Heo } 951fbf59bc9STejun Heo 952403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 953ccea34b5STejun Heo 954b38d08f3STejun Heo /* 955b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 956b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 957b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 958b38d08f3STejun Heo */ 959*11df02bfSDennis Zhou if (is_atomic) { 960*11df02bfSDennis Zhou err = "atomic alloc failed, no space left"; 9615835d96eSTejun Heo goto fail; 962*11df02bfSDennis Zhou } 9635835d96eSTejun Heo 964b38d08f3STejun Heo if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 9656081089fSTejun Heo chunk = pcpu_create_chunk(); 966f2badb0cSTejun Heo if (!chunk) { 967f2badb0cSTejun Heo err = "failed to allocate new chunk"; 968b38d08f3STejun Heo goto fail; 969f2badb0cSTejun Heo } 970ccea34b5STejun Heo 971403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 972fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 973b38d08f3STejun Heo } else { 974b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 975b38d08f3STejun Heo } 976b38d08f3STejun Heo 977ccea34b5STejun Heo goto restart; 978fbf59bc9STejun Heo 979fbf59bc9STejun Heo area_found: 98030a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size); 981403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 982ccea34b5STejun Heo 983dca49645STejun Heo /* populate if not all pages are already there */ 9845835d96eSTejun Heo if (!is_atomic) { 985e04d3208STejun Heo int page_start, page_end, rs, re; 986e04d3208STejun Heo 987dca49645STejun Heo page_start = PFN_DOWN(off); 988dca49645STejun Heo page_end = PFN_UP(off + size); 989dca49645STejun Heo 990a93ace48STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 991dca49645STejun Heo WARN_ON(chunk->immutable); 992dca49645STejun Heo 993b38d08f3STejun Heo ret = pcpu_populate_chunk(chunk, rs, re); 994b38d08f3STejun Heo 995403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 996b38d08f3STejun Heo if (ret) { 997b539b87fSTejun Heo pcpu_free_area(chunk, off, &occ_pages); 998f2badb0cSTejun Heo err = "failed to populate"; 999ccea34b5STejun Heo goto fail_unlock; 1000fbf59bc9STejun Heo } 1001b539b87fSTejun Heo pcpu_chunk_populated(chunk, rs, re); 1002b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1003dca49645STejun Heo } 1004dca49645STejun Heo 1005ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1006e04d3208STejun Heo } 1007ccea34b5STejun Heo 1008320661b0STahsin Erdogan if (chunk != pcpu_reserved_chunk) { 1009320661b0STahsin Erdogan spin_lock_irqsave(&pcpu_lock, flags); 1010b539b87fSTejun Heo pcpu_nr_empty_pop_pages -= occ_pages; 1011320661b0STahsin Erdogan spin_unlock_irqrestore(&pcpu_lock, flags); 1012320661b0STahsin Erdogan } 1013b539b87fSTejun Heo 10141a4d7607STejun Heo if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 10151a4d7607STejun Heo pcpu_schedule_balance_work(); 10161a4d7607STejun Heo 1017dca49645STejun Heo /* clear the areas and return address relative to base address */ 1018dca49645STejun Heo for_each_possible_cpu(cpu) 1019dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1020dca49645STejun Heo 1021f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 10228a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp); 1023df95e795SDennis Zhou 1024df95e795SDennis Zhou trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1025df95e795SDennis Zhou chunk->base_addr, off, ptr); 1026df95e795SDennis Zhou 1027f528f0b8SCatalin Marinas return ptr; 1028ccea34b5STejun Heo 1029ccea34b5STejun Heo fail_unlock: 1030403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1031b38d08f3STejun Heo fail: 1032df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1033df95e795SDennis Zhou 10345835d96eSTejun Heo if (!is_atomic && warn_limit) { 1035870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 10365835d96eSTejun Heo size, align, is_atomic, err); 1037f2badb0cSTejun Heo dump_stack(); 1038f2badb0cSTejun Heo if (!--warn_limit) 1039870d4b12SJoe Perches pr_info("limit reached, disable warning\n"); 1040f2badb0cSTejun Heo } 10411a4d7607STejun Heo if (is_atomic) { 10421a4d7607STejun Heo /* see the flag handling in pcpu_blance_workfn() */ 10431a4d7607STejun Heo pcpu_atomic_alloc_failed = true; 10441a4d7607STejun Heo pcpu_schedule_balance_work(); 10456710e594STejun Heo } else { 10466710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex); 10471a4d7607STejun Heo } 1048ccea34b5STejun Heo return NULL; 1049fbf59bc9STejun Heo } 1050edcb4639STejun Heo 1051edcb4639STejun Heo /** 10525835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1053edcb4639STejun Heo * @size: size of area to allocate in bytes 1054edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 10555835d96eSTejun Heo * @gfp: allocation flags 1056edcb4639STejun Heo * 10575835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 10585835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 10595835d96eSTejun Heo * be called from any context but is a lot more likely to fail. 1060ccea34b5STejun Heo * 1061edcb4639STejun Heo * RETURNS: 1062edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1063edcb4639STejun Heo */ 10645835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 10655835d96eSTejun Heo { 10665835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 10675835d96eSTejun Heo } 10685835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 10695835d96eSTejun Heo 10705835d96eSTejun Heo /** 10715835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 10725835d96eSTejun Heo * @size: size of area to allocate in bytes 10735835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 10745835d96eSTejun Heo * 10755835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 10765835d96eSTejun Heo */ 107743cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1078edcb4639STejun Heo { 10795835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1080edcb4639STejun Heo } 1081fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1082fbf59bc9STejun Heo 1083edcb4639STejun Heo /** 1084edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1085edcb4639STejun Heo * @size: size of area to allocate in bytes 1086edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1087edcb4639STejun Heo * 10889329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 10899329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 10909329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 10919329ba97STejun Heo * Might trigger writeouts. 1092edcb4639STejun Heo * 1093ccea34b5STejun Heo * CONTEXT: 1094ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1095ccea34b5STejun Heo * 1096edcb4639STejun Heo * RETURNS: 1097edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1098edcb4639STejun Heo */ 109943cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1100edcb4639STejun Heo { 11015835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1102edcb4639STejun Heo } 1103edcb4639STejun Heo 1104a56dbddfSTejun Heo /** 11051a4d7607STejun Heo * pcpu_balance_workfn - manage the amount of free chunks and populated pages 1106a56dbddfSTejun Heo * @work: unused 1107a56dbddfSTejun Heo * 1108a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 1109a56dbddfSTejun Heo */ 1110fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work) 1111fbf59bc9STejun Heo { 1112fe6bd8c3STejun Heo LIST_HEAD(to_free); 1113fe6bd8c3STejun Heo struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1114a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 11151a4d7607STejun Heo int slot, nr_to_pop, ret; 1116a56dbddfSTejun Heo 11171a4d7607STejun Heo /* 11181a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM 11191a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one. 11201a4d7607STejun Heo */ 1121ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1122ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1123a56dbddfSTejun Heo 1124fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 11258d408b4bSTejun Heo WARN_ON(chunk->immutable); 1126a56dbddfSTejun Heo 1127a56dbddfSTejun Heo /* spare the first one */ 1128fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1129a56dbddfSTejun Heo continue; 1130a56dbddfSTejun Heo 11314f996e23STejun Heo list_del_init(&chunk->map_extend_list); 1132fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 1133a56dbddfSTejun Heo } 1134a56dbddfSTejun Heo 1135ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1136a56dbddfSTejun Heo 1137fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 1138a93ace48STejun Heo int rs, re; 1139dca49645STejun Heo 1140a93ace48STejun Heo pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { 1141a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 1142b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 1143b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 1144b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 1145a93ace48STejun Heo } 11466081089fSTejun Heo pcpu_destroy_chunk(chunk); 1147fbf59bc9STejun Heo } 1148971f3918STejun Heo 11494f996e23STejun Heo /* service chunks which requested async area map extension */ 11504f996e23STejun Heo do { 11514f996e23STejun Heo int new_alloc = 0; 11524f996e23STejun Heo 11534f996e23STejun Heo spin_lock_irq(&pcpu_lock); 11544f996e23STejun Heo 11554f996e23STejun Heo chunk = list_first_entry_or_null(&pcpu_map_extend_chunks, 11564f996e23STejun Heo struct pcpu_chunk, map_extend_list); 11574f996e23STejun Heo if (chunk) { 11584f996e23STejun Heo list_del_init(&chunk->map_extend_list); 11594f996e23STejun Heo new_alloc = pcpu_need_to_extend(chunk, false); 11604f996e23STejun Heo } 11614f996e23STejun Heo 11624f996e23STejun Heo spin_unlock_irq(&pcpu_lock); 11634f996e23STejun Heo 11644f996e23STejun Heo if (new_alloc) 11654f996e23STejun Heo pcpu_extend_area_map(chunk, new_alloc); 11664f996e23STejun Heo } while (chunk); 11674f996e23STejun Heo 11681a4d7607STejun Heo /* 11691a4d7607STejun Heo * Ensure there are certain number of free populated pages for 11701a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic 11711a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation 11721a4d7607STejun Heo * failed previously, always populate the maximum amount. This 11731a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping 11741a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not 11751a4d7607STejun Heo * something we support properly and can be highly unreliable and 11761a4d7607STejun Heo * inefficient. 11771a4d7607STejun Heo */ 11781a4d7607STejun Heo retry_pop: 11791a4d7607STejun Heo if (pcpu_atomic_alloc_failed) { 11801a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 11811a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */ 11821a4d7607STejun Heo pcpu_atomic_alloc_failed = false; 11831a4d7607STejun Heo } else { 11841a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 11851a4d7607STejun Heo pcpu_nr_empty_pop_pages, 11861a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH); 11871a4d7607STejun Heo } 11881a4d7607STejun Heo 11891a4d7607STejun Heo for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { 11901a4d7607STejun Heo int nr_unpop = 0, rs, re; 11911a4d7607STejun Heo 11921a4d7607STejun Heo if (!nr_to_pop) 11931a4d7607STejun Heo break; 11941a4d7607STejun Heo 11951a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 11961a4d7607STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 11971a4d7607STejun Heo nr_unpop = pcpu_unit_pages - chunk->nr_populated; 11981a4d7607STejun Heo if (nr_unpop) 11991a4d7607STejun Heo break; 12001a4d7607STejun Heo } 12011a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 12021a4d7607STejun Heo 12031a4d7607STejun Heo if (!nr_unpop) 12041a4d7607STejun Heo continue; 12051a4d7607STejun Heo 12061a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */ 12071a4d7607STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) { 12081a4d7607STejun Heo int nr = min(re - rs, nr_to_pop); 12091a4d7607STejun Heo 12101a4d7607STejun Heo ret = pcpu_populate_chunk(chunk, rs, rs + nr); 12111a4d7607STejun Heo if (!ret) { 12121a4d7607STejun Heo nr_to_pop -= nr; 12131a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 12141a4d7607STejun Heo pcpu_chunk_populated(chunk, rs, rs + nr); 12151a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 12161a4d7607STejun Heo } else { 12171a4d7607STejun Heo nr_to_pop = 0; 12181a4d7607STejun Heo } 12191a4d7607STejun Heo 12201a4d7607STejun Heo if (!nr_to_pop) 12211a4d7607STejun Heo break; 12221a4d7607STejun Heo } 12231a4d7607STejun Heo } 12241a4d7607STejun Heo 12251a4d7607STejun Heo if (nr_to_pop) { 12261a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */ 12271a4d7607STejun Heo chunk = pcpu_create_chunk(); 12281a4d7607STejun Heo if (chunk) { 12291a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 12301a4d7607STejun Heo pcpu_chunk_relocate(chunk, -1); 12311a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 12321a4d7607STejun Heo goto retry_pop; 12331a4d7607STejun Heo } 12341a4d7607STejun Heo } 12351a4d7607STejun Heo 1236971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1237a56dbddfSTejun Heo } 1238fbf59bc9STejun Heo 1239fbf59bc9STejun Heo /** 1240fbf59bc9STejun Heo * free_percpu - free percpu area 1241fbf59bc9STejun Heo * @ptr: pointer to area to free 1242fbf59bc9STejun Heo * 1243ccea34b5STejun Heo * Free percpu area @ptr. 1244ccea34b5STejun Heo * 1245ccea34b5STejun Heo * CONTEXT: 1246ccea34b5STejun Heo * Can be called from atomic context. 1247fbf59bc9STejun Heo */ 124843cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 1249fbf59bc9STejun Heo { 1250129182e5SAndrew Morton void *addr; 1251fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1252ccea34b5STejun Heo unsigned long flags; 1253b539b87fSTejun Heo int off, occ_pages; 1254fbf59bc9STejun Heo 1255fbf59bc9STejun Heo if (!ptr) 1256fbf59bc9STejun Heo return; 1257fbf59bc9STejun Heo 1258f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 1259f528f0b8SCatalin Marinas 1260129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 1261129182e5SAndrew Morton 1262ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1263fbf59bc9STejun Heo 1264fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1265bba174f5STejun Heo off = addr - chunk->base_addr; 1266fbf59bc9STejun Heo 1267b539b87fSTejun Heo pcpu_free_area(chunk, off, &occ_pages); 1268b539b87fSTejun Heo 1269b539b87fSTejun Heo if (chunk != pcpu_reserved_chunk) 1270b539b87fSTejun Heo pcpu_nr_empty_pop_pages += occ_pages; 1271fbf59bc9STejun Heo 1272a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 1273fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 1274fbf59bc9STejun Heo struct pcpu_chunk *pos; 1275fbf59bc9STejun Heo 1276a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1277fbf59bc9STejun Heo if (pos != chunk) { 12781a4d7607STejun Heo pcpu_schedule_balance_work(); 1279fbf59bc9STejun Heo break; 1280fbf59bc9STejun Heo } 1281fbf59bc9STejun Heo } 1282fbf59bc9STejun Heo 1283df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr); 1284df95e795SDennis Zhou 1285ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1286fbf59bc9STejun Heo } 1287fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1288fbf59bc9STejun Heo 1289383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 1290383776faSThomas Gleixner { 1291383776faSThomas Gleixner #ifdef CONFIG_SMP 1292383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start; 1293383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 1294383776faSThomas Gleixner unsigned int cpu; 1295383776faSThomas Gleixner 1296383776faSThomas Gleixner for_each_possible_cpu(cpu) { 1297383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu); 1298383776faSThomas Gleixner void *va = (void *)addr; 1299383776faSThomas Gleixner 1300383776faSThomas Gleixner if (va >= start && va < start + static_size) { 13018ce371f9SPeter Zijlstra if (can_addr) { 1302383776faSThomas Gleixner *can_addr = (unsigned long) (va - start); 13038ce371f9SPeter Zijlstra *can_addr += (unsigned long) 13048ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id()); 13058ce371f9SPeter Zijlstra } 1306383776faSThomas Gleixner return true; 1307383776faSThomas Gleixner } 1308383776faSThomas Gleixner } 1309383776faSThomas Gleixner #endif 1310383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */ 1311383776faSThomas Gleixner return false; 1312383776faSThomas Gleixner } 1313383776faSThomas Gleixner 13143b034b0dSVivek Goyal /** 131510fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 131610fad5e4STejun Heo * @addr: address to test 131710fad5e4STejun Heo * 131810fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 131910fad5e4STejun Heo * static percpu areas are not considered. For those, use 132010fad5e4STejun Heo * is_module_percpu_address(). 132110fad5e4STejun Heo * 132210fad5e4STejun Heo * RETURNS: 132310fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 132410fad5e4STejun Heo */ 132510fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 132610fad5e4STejun Heo { 1327383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL); 132810fad5e4STejun Heo } 132910fad5e4STejun Heo 133010fad5e4STejun Heo /** 13313b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 13323b034b0dSVivek Goyal * @addr: the address to be converted to physical address 13333b034b0dSVivek Goyal * 13343b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 13353b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 13363b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 13373b034b0dSVivek Goyal * until this function finishes. 13383b034b0dSVivek Goyal * 133967589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 134067589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 134167589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 134267589c71SDave Young * km) provides translation. 134367589c71SDave Young * 1344bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the 134567589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 134667589c71SDave Young * actually works, and the verification can discover both bugs in percpu 134767589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 134867589c71SDave Young * code. 134967589c71SDave Young * 13503b034b0dSVivek Goyal * RETURNS: 13513b034b0dSVivek Goyal * The physical address for @addr. 13523b034b0dSVivek Goyal */ 13533b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 13543b034b0dSVivek Goyal { 13559983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 13569983b6f0STejun Heo bool in_first_chunk = false; 1357a855b84cSTejun Heo unsigned long first_low, first_high; 13589983b6f0STejun Heo unsigned int cpu; 13599983b6f0STejun Heo 13609983b6f0STejun Heo /* 1361a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 13629983b6f0STejun Heo * necessary but will speed up lookups of addresses which 13639983b6f0STejun Heo * aren't in the first chunk. 13649983b6f0STejun Heo */ 1365a855b84cSTejun Heo first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); 1366a855b84cSTejun Heo first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, 13679983b6f0STejun Heo pcpu_unit_pages); 1368a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 1369a855b84cSTejun Heo (unsigned long)addr < first_high) { 13709983b6f0STejun Heo for_each_possible_cpu(cpu) { 13719983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 13729983b6f0STejun Heo 13739983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 13749983b6f0STejun Heo in_first_chunk = true; 13759983b6f0STejun Heo break; 13769983b6f0STejun Heo } 13779983b6f0STejun Heo } 13789983b6f0STejun Heo } 13799983b6f0STejun Heo 13809983b6f0STejun Heo if (in_first_chunk) { 1381eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 13823b034b0dSVivek Goyal return __pa(addr); 13833b034b0dSVivek Goyal else 13849f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 13859f57bd4dSEugene Surovegin offset_in_page(addr); 1386020ec653STejun Heo } else 13879f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 13889f57bd4dSEugene Surovegin offset_in_page(addr); 13893b034b0dSVivek Goyal } 13903b034b0dSVivek Goyal 1391fbf59bc9STejun Heo /** 1392fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 1393fd1e8a1fSTejun Heo * @nr_groups: the number of groups 1394fd1e8a1fSTejun Heo * @nr_units: the number of units 1395033e48fbSTejun Heo * 1396fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 1397fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 1398fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 1399fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1400fd1e8a1fSTejun Heo * pointer of other groups. 1401033e48fbSTejun Heo * 1402033e48fbSTejun Heo * RETURNS: 1403fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 1404fd1e8a1fSTejun Heo * failure. 1405033e48fbSTejun Heo */ 1406fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1407fd1e8a1fSTejun Heo int nr_units) 1408fd1e8a1fSTejun Heo { 1409fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1410fd1e8a1fSTejun Heo size_t base_size, ai_size; 1411fd1e8a1fSTejun Heo void *ptr; 1412fd1e8a1fSTejun Heo int unit; 1413fd1e8a1fSTejun Heo 1414fd1e8a1fSTejun Heo base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1415fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 1416fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1417fd1e8a1fSTejun Heo 1418999c17e3SSantosh Shilimkar ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); 1419fd1e8a1fSTejun Heo if (!ptr) 1420fd1e8a1fSTejun Heo return NULL; 1421fd1e8a1fSTejun Heo ai = ptr; 1422fd1e8a1fSTejun Heo ptr += base_size; 1423fd1e8a1fSTejun Heo 1424fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 1425fd1e8a1fSTejun Heo 1426fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 1427fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 1428fd1e8a1fSTejun Heo 1429fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 1430fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 1431fd1e8a1fSTejun Heo 1432fd1e8a1fSTejun Heo return ai; 1433fd1e8a1fSTejun Heo } 1434fd1e8a1fSTejun Heo 1435fd1e8a1fSTejun Heo /** 1436fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 1437fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 1438fd1e8a1fSTejun Heo * 1439fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1440fd1e8a1fSTejun Heo */ 1441fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1442fd1e8a1fSTejun Heo { 1443999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 1444fd1e8a1fSTejun Heo } 1445fd1e8a1fSTejun Heo 1446fd1e8a1fSTejun Heo /** 1447fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1448fd1e8a1fSTejun Heo * @lvl: loglevel 1449fd1e8a1fSTejun Heo * @ai: allocation info to dump 1450fd1e8a1fSTejun Heo * 1451fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 1452fd1e8a1fSTejun Heo */ 1453fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 1454fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 1455033e48fbSTejun Heo { 1456fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 1457033e48fbSTejun Heo char empty_str[] = "--------"; 1458fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 1459fd1e8a1fSTejun Heo int group, v; 1460fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 1461033e48fbSTejun Heo 1462fd1e8a1fSTejun Heo v = ai->nr_groups; 1463033e48fbSTejun Heo while (v /= 10) 1464fd1e8a1fSTejun Heo group_width++; 1465033e48fbSTejun Heo 1466fd1e8a1fSTejun Heo v = num_possible_cpus(); 1467fd1e8a1fSTejun Heo while (v /= 10) 1468fd1e8a1fSTejun Heo cpu_width++; 1469fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1470033e48fbSTejun Heo 1471fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 1472fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 1473fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 1474033e48fbSTejun Heo 1475fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1476fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1477fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1478fd1e8a1fSTejun Heo 1479fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 1480fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 1481fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 1482fd1e8a1fSTejun Heo 1483fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 1484fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 1485fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 1486fd1e8a1fSTejun Heo if (!(alloc % apl)) { 14871170532bSJoe Perches pr_cont("\n"); 1488fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 1489033e48fbSTejun Heo } 14901170532bSJoe Perches pr_cont("[%0*d] ", group_width, group); 1491fd1e8a1fSTejun Heo 1492fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 1493fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 14941170532bSJoe Perches pr_cont("%0*d ", 14951170532bSJoe Perches cpu_width, gi->cpu_map[unit]); 1496033e48fbSTejun Heo else 14971170532bSJoe Perches pr_cont("%s ", empty_str); 1498033e48fbSTejun Heo } 1499fd1e8a1fSTejun Heo } 15001170532bSJoe Perches pr_cont("\n"); 1501033e48fbSTejun Heo } 1502033e48fbSTejun Heo 1503fbf59bc9STejun Heo /** 15048d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 1505fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 150638a6be52STejun Heo * @base_addr: mapped address 1507fbf59bc9STejun Heo * 15088d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 15098d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 151038a6be52STejun Heo * setup path. 15118d408b4bSTejun Heo * 1512fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 1513fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 15148d408b4bSTejun Heo * 1515fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 1516fd1e8a1fSTejun Heo * 1517fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1518edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1519edcb4639STejun Heo * the first chunk such that it's available only through reserved 1520edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1521edcb4639STejun Heo * static areas on architectures where the addressing model has 1522edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1523edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1524edcb4639STejun Heo * 1525fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 1526fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 1527fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 15286074d5b0STejun Heo * 1529fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1530fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 1531fd1e8a1fSTejun Heo * @ai->dyn_size. 15328d408b4bSTejun Heo * 1533fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 1534fd1e8a1fSTejun Heo * for vm areas. 15358d408b4bSTejun Heo * 1536fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 1537fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 1538fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 1539fd1e8a1fSTejun Heo * 1540fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 1541fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 1542fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 1543fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 1544fd1e8a1fSTejun Heo * all units is assumed. 15458d408b4bSTejun Heo * 154638a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 154738a6be52STejun Heo * copied static data to each unit. 1548fbf59bc9STejun Heo * 1549edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1550edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1551edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1552edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1553edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1554edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1555edcb4639STejun Heo * 1556fbf59bc9STejun Heo * RETURNS: 1557fb435d52STejun Heo * 0 on success, -errno on failure. 1558fbf59bc9STejun Heo */ 1559fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1560fd1e8a1fSTejun Heo void *base_addr) 1561fbf59bc9STejun Heo { 1562099a19d9STejun Heo static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1563099a19d9STejun Heo static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1564fd1e8a1fSTejun Heo size_t dyn_size = ai->dyn_size; 1565fd1e8a1fSTejun Heo size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1566edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 15676563297cSTejun Heo unsigned long *group_offsets; 15686563297cSTejun Heo size_t *group_sizes; 1569fb435d52STejun Heo unsigned long *unit_off; 1570fbf59bc9STejun Heo unsigned int cpu; 1571fd1e8a1fSTejun Heo int *unit_map; 1572fd1e8a1fSTejun Heo int group, unit, i; 1573fbf59bc9STejun Heo 1574635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 1575635b75fcSTejun Heo if (unlikely(cond)) { \ 1576870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \ 1577870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \ 1578807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \ 1579635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1580635b75fcSTejun Heo BUG(); \ 1581635b75fcSTejun Heo } \ 1582635b75fcSTejun Heo } while (0) 1583635b75fcSTejun Heo 15842f39e637STejun Heo /* sanity checks */ 1585635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1586bbddff05STejun Heo #ifdef CONFIG_SMP 1587635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 1588f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 1589bbddff05STejun Heo #endif 1590635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 1591f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 1592635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1593f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 1594635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1595099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 15969f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 15978d408b4bSTejun Heo 15986563297cSTejun Heo /* process group information and build config tables accordingly */ 1599999c17e3SSantosh Shilimkar group_offsets = memblock_virt_alloc(ai->nr_groups * 1600999c17e3SSantosh Shilimkar sizeof(group_offsets[0]), 0); 1601999c17e3SSantosh Shilimkar group_sizes = memblock_virt_alloc(ai->nr_groups * 1602999c17e3SSantosh Shilimkar sizeof(group_sizes[0]), 0); 1603999c17e3SSantosh Shilimkar unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); 1604999c17e3SSantosh Shilimkar unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); 16052f39e637STejun Heo 1606fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1607ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 1608a855b84cSTejun Heo 1609a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 1610a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 16112f39e637STejun Heo 1612fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1613fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 16142f39e637STejun Heo 16156563297cSTejun Heo group_offsets[group] = gi->base_offset; 16166563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 16176563297cSTejun Heo 1618fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 1619fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 1620fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 1621fd1e8a1fSTejun Heo continue; 1622fd1e8a1fSTejun Heo 16239f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 1624635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1625635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1626fd1e8a1fSTejun Heo 1627fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 1628fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1629fb435d52STejun Heo 1630a855b84cSTejun Heo /* determine low/high unit_cpu */ 1631a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 1632a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 1633a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 1634a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 1635a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 1636a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 16370fc0531eSLinus Torvalds } 16380fc0531eSLinus Torvalds } 1639fd1e8a1fSTejun Heo pcpu_nr_units = unit; 16402f39e637STejun Heo 16412f39e637STejun Heo for_each_possible_cpu(cpu) 1642635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1643635b75fcSTejun Heo 1644635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 1645635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 1646bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 16472f39e637STejun Heo 16486563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 16496563297cSTejun Heo pcpu_group_offsets = group_offsets; 16506563297cSTejun Heo pcpu_group_sizes = group_sizes; 1651fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 1652fb435d52STejun Heo pcpu_unit_offsets = unit_off; 16532f39e637STejun Heo 16542f39e637STejun Heo /* determine basic parameters */ 1655fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1656d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 16576563297cSTejun Heo pcpu_atom_size = ai->atom_size; 1658ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1659ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1660cafe8816STejun Heo 166130a5b536SDennis Zhou pcpu_stats_save_ai(ai); 166230a5b536SDennis Zhou 1663d9b55eebSTejun Heo /* 1664d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1665d9b55eebSTejun Heo * empty chunks. 1666d9b55eebSTejun Heo */ 1667d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1668999c17e3SSantosh Shilimkar pcpu_slot = memblock_virt_alloc( 1669999c17e3SSantosh Shilimkar pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); 1670fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1671fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1672fbf59bc9STejun Heo 1673edcb4639STejun Heo /* 1674edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1675edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1676edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1677edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1678edcb4639STejun Heo * static percpu allocation). 1679edcb4639STejun Heo */ 1680999c17e3SSantosh Shilimkar schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 16812441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 16824f996e23STejun Heo INIT_LIST_HEAD(&schunk->map_extend_list); 1683bba174f5STejun Heo schunk->base_addr = base_addr; 168461ace7faSTejun Heo schunk->map = smap; 168561ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 168638a6be52STejun Heo schunk->immutable = true; 1687ce3141a2STejun Heo bitmap_fill(schunk->populated, pcpu_unit_pages); 1688b539b87fSTejun Heo schunk->nr_populated = pcpu_unit_pages; 1689edcb4639STejun Heo 1690fd1e8a1fSTejun Heo if (ai->reserved_size) { 1691fd1e8a1fSTejun Heo schunk->free_size = ai->reserved_size; 1692ae9e6bc9STejun Heo pcpu_reserved_chunk = schunk; 1693fd1e8a1fSTejun Heo pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; 1694edcb4639STejun Heo } else { 16952441d15cSTejun Heo schunk->free_size = dyn_size; 1696edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1697edcb4639STejun Heo } 16982441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1699fbf59bc9STejun Heo 1700723ad1d9SAl Viro schunk->map[0] = 1; 1701723ad1d9SAl Viro schunk->map[1] = ai->static_size; 1702723ad1d9SAl Viro schunk->map_used = 1; 170361ace7faSTejun Heo if (schunk->free_size) 1704292c24a0SBaoquan He schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size; 1705292c24a0SBaoquan He schunk->map[schunk->map_used] |= 1; 170630a5b536SDennis Zhou schunk->has_reserved = true; 170761ace7faSTejun Heo 1708edcb4639STejun Heo /* init dynamic chunk if necessary */ 1709edcb4639STejun Heo if (dyn_size) { 1710999c17e3SSantosh Shilimkar dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1711edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 17124f996e23STejun Heo INIT_LIST_HEAD(&dchunk->map_extend_list); 1713bba174f5STejun Heo dchunk->base_addr = base_addr; 1714edcb4639STejun Heo dchunk->map = dmap; 1715edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 171638a6be52STejun Heo dchunk->immutable = true; 1717ce3141a2STejun Heo bitmap_fill(dchunk->populated, pcpu_unit_pages); 1718b539b87fSTejun Heo dchunk->nr_populated = pcpu_unit_pages; 1719edcb4639STejun Heo 1720edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1721723ad1d9SAl Viro dchunk->map[0] = 1; 1722723ad1d9SAl Viro dchunk->map[1] = pcpu_reserved_chunk_limit; 1723723ad1d9SAl Viro dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1; 1724723ad1d9SAl Viro dchunk->map_used = 2; 172530a5b536SDennis Zhou dchunk->has_reserved = true; 1726edcb4639STejun Heo } 1727edcb4639STejun Heo 17282441d15cSTejun Heo /* link the first chunk in */ 1729ae9e6bc9STejun Heo pcpu_first_chunk = dchunk ?: schunk; 1730b539b87fSTejun Heo pcpu_nr_empty_pop_pages += 1731b539b87fSTejun Heo pcpu_count_occupied_pages(pcpu_first_chunk, 1); 1732ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 1733fbf59bc9STejun Heo 173430a5b536SDennis Zhou pcpu_stats_chunk_alloc(); 1735df95e795SDennis Zhou trace_percpu_create_chunk(base_addr); 173630a5b536SDennis Zhou 1737fbf59bc9STejun Heo /* we're done */ 1738bba174f5STejun Heo pcpu_base_addr = base_addr; 1739fb435d52STejun Heo return 0; 1740fbf59bc9STejun Heo } 174166c3a757STejun Heo 1742bbddff05STejun Heo #ifdef CONFIG_SMP 1743bbddff05STejun Heo 174417f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 1745f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 1746f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 1747f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 1748f58dc01bSTejun Heo }; 174966c3a757STejun Heo 1750f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1751f58dc01bSTejun Heo 1752f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 175366c3a757STejun Heo { 17545479c78aSCyrill Gorcunov if (!str) 17555479c78aSCyrill Gorcunov return -EINVAL; 17565479c78aSCyrill Gorcunov 1757f58dc01bSTejun Heo if (0) 1758f58dc01bSTejun Heo /* nada */; 1759f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1760f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 1761f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 1762f58dc01bSTejun Heo #endif 1763f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1764f58dc01bSTejun Heo else if (!strcmp(str, "page")) 1765f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 1766f58dc01bSTejun Heo #endif 1767f58dc01bSTejun Heo else 1768870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str); 176966c3a757STejun Heo 1770f58dc01bSTejun Heo return 0; 177166c3a757STejun Heo } 1772f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 177366c3a757STejun Heo 17743c9a024fSTejun Heo /* 17753c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 17763c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 17773c9a024fSTejun Heo * to be used. 17783c9a024fSTejun Heo */ 177908fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 178008fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 17813c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 17823c9a024fSTejun Heo #endif 17833c9a024fSTejun Heo 17843c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 17853c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 17863c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 17873c9a024fSTejun Heo #endif 17883c9a024fSTejun Heo 17893c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 17903c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 17913c9a024fSTejun Heo /** 1792fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1793fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1794fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1795fbf59bc9STejun Heo * @atom_size: allocation atom size 1796fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1797fbf59bc9STejun Heo * 1798fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 1799fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 1800fbf59bc9STejun Heo * atom size and distances between CPUs. 1801fbf59bc9STejun Heo * 1802bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of 1803fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 1804fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 1805fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 1806fbf59bc9STejun Heo * of allocated virtual address space. 1807fbf59bc9STejun Heo * 1808fbf59bc9STejun Heo * RETURNS: 1809fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 1810fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 1811fbf59bc9STejun Heo */ 1812fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1813fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 1814fbf59bc9STejun Heo size_t atom_size, 1815fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1816fbf59bc9STejun Heo { 1817fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 1818fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 1819fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 1820fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 1821fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 1822fbf59bc9STejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1823fbf59bc9STejun Heo int last_allocs, group, unit; 1824fbf59bc9STejun Heo unsigned int cpu, tcpu; 1825fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 1826fbf59bc9STejun Heo unsigned int *cpu_map; 1827fbf59bc9STejun Heo 1828fbf59bc9STejun Heo /* this function may be called multiple times */ 1829fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 1830fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 1831fbf59bc9STejun Heo 1832fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 1833fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 1834fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 1835fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 1836fbf59bc9STejun Heo 1837fbf59bc9STejun Heo /* 1838fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 1839fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 184025985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 1841fbf59bc9STejun Heo * or larger than min_unit_size. 1842fbf59bc9STejun Heo */ 1843fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1844fbf59bc9STejun Heo 1845fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 1846fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 1847f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 1848fbf59bc9STejun Heo upa--; 1849fbf59bc9STejun Heo max_upa = upa; 1850fbf59bc9STejun Heo 1851fbf59bc9STejun Heo /* group cpus according to their proximity */ 1852fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 1853fbf59bc9STejun Heo group = 0; 1854fbf59bc9STejun Heo next_group: 1855fbf59bc9STejun Heo for_each_possible_cpu(tcpu) { 1856fbf59bc9STejun Heo if (cpu == tcpu) 1857fbf59bc9STejun Heo break; 1858fbf59bc9STejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 1859fbf59bc9STejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1860fbf59bc9STejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1861fbf59bc9STejun Heo group++; 1862fbf59bc9STejun Heo nr_groups = max(nr_groups, group + 1); 1863fbf59bc9STejun Heo goto next_group; 1864fbf59bc9STejun Heo } 1865fbf59bc9STejun Heo } 1866fbf59bc9STejun Heo group_map[cpu] = group; 1867fbf59bc9STejun Heo group_cnt[group]++; 1868fbf59bc9STejun Heo } 1869fbf59bc9STejun Heo 1870fbf59bc9STejun Heo /* 1871fbf59bc9STejun Heo * Expand unit size until address space usage goes over 75% 1872fbf59bc9STejun Heo * and then as much as possible without using more address 1873fbf59bc9STejun Heo * space. 1874fbf59bc9STejun Heo */ 1875fbf59bc9STejun Heo last_allocs = INT_MAX; 1876fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 1877fbf59bc9STejun Heo int allocs = 0, wasted = 0; 1878fbf59bc9STejun Heo 1879f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 1880fbf59bc9STejun Heo continue; 1881fbf59bc9STejun Heo 1882fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 1883fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1884fbf59bc9STejun Heo allocs += this_allocs; 1885fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 1886fbf59bc9STejun Heo } 1887fbf59bc9STejun Heo 1888fbf59bc9STejun Heo /* 1889fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 1890fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 1891fbf59bc9STejun Heo * passes the following check. 1892fbf59bc9STejun Heo */ 1893fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 1894fbf59bc9STejun Heo continue; 1895fbf59bc9STejun Heo 1896fbf59bc9STejun Heo /* and then don't consume more memory */ 1897fbf59bc9STejun Heo if (allocs > last_allocs) 1898fbf59bc9STejun Heo break; 1899fbf59bc9STejun Heo last_allocs = allocs; 1900fbf59bc9STejun Heo best_upa = upa; 1901fbf59bc9STejun Heo } 1902fbf59bc9STejun Heo upa = best_upa; 1903fbf59bc9STejun Heo 1904fbf59bc9STejun Heo /* allocate and fill alloc_info */ 1905fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 1906fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 1907fbf59bc9STejun Heo 1908fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1909fbf59bc9STejun Heo if (!ai) 1910fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 1911fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 1912fbf59bc9STejun Heo 1913fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 1914fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 1915fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 1916fbf59bc9STejun Heo } 1917fbf59bc9STejun Heo 1918fbf59bc9STejun Heo ai->static_size = static_size; 1919fbf59bc9STejun Heo ai->reserved_size = reserved_size; 1920fbf59bc9STejun Heo ai->dyn_size = dyn_size; 1921fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 1922fbf59bc9STejun Heo ai->atom_size = atom_size; 1923fbf59bc9STejun Heo ai->alloc_size = alloc_size; 1924fbf59bc9STejun Heo 1925fbf59bc9STejun Heo for (group = 0, unit = 0; group_cnt[group]; group++) { 1926fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1927fbf59bc9STejun Heo 1928fbf59bc9STejun Heo /* 1929fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 1930fbf59bc9STejun Heo * back-to-back. The caller should update this to 1931fbf59bc9STejun Heo * reflect actual allocation. 1932fbf59bc9STejun Heo */ 1933fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 1934fbf59bc9STejun Heo 1935fbf59bc9STejun Heo for_each_possible_cpu(cpu) 1936fbf59bc9STejun Heo if (group_map[cpu] == group) 1937fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 1938fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 1939fbf59bc9STejun Heo unit += gi->nr_units; 1940fbf59bc9STejun Heo } 1941fbf59bc9STejun Heo BUG_ON(unit != nr_units); 1942fbf59bc9STejun Heo 1943fbf59bc9STejun Heo return ai; 1944fbf59bc9STejun Heo } 19453c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 1946fbf59bc9STejun Heo 19473c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 194866c3a757STejun Heo /** 194966c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 195066c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 19514ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1952c8826dd5STejun Heo * @atom_size: allocation atom size 1953c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1954c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 195525985edcSLucas De Marchi * @free_fn: function to free percpu page 195666c3a757STejun Heo * 195766c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 195866c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 195966c3a757STejun Heo * 196066c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 1961c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 1962c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 1963c8826dd5STejun Heo * aligned to @atom_size. 1964c8826dd5STejun Heo * 1965c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 1966c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 1967c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 1968c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 1969c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 1970c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 197166c3a757STejun Heo * 19724ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 197366c3a757STejun Heo * 197466c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 1975c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 197666c3a757STejun Heo * 197766c3a757STejun Heo * RETURNS: 1978fb435d52STejun Heo * 0 on success, -errno on failure. 197966c3a757STejun Heo */ 19804ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 1981c8826dd5STejun Heo size_t atom_size, 1982c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1983c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1984c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 198566c3a757STejun Heo { 1986c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 1987c8826dd5STejun Heo void **areas = NULL; 1988fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 198993c76b6bSzijun_hu size_t size_sum, areas_size; 199093c76b6bSzijun_hu unsigned long max_distance; 19919b739662Szijun_hu int group, i, highest_group, rc; 199266c3a757STejun Heo 1993c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1994c8826dd5STejun Heo cpu_distance_fn); 1995fd1e8a1fSTejun Heo if (IS_ERR(ai)) 1996fd1e8a1fSTejun Heo return PTR_ERR(ai); 199766c3a757STejun Heo 1998fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1999c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 200066c3a757STejun Heo 2001999c17e3SSantosh Shilimkar areas = memblock_virt_alloc_nopanic(areas_size, 0); 2002c8826dd5STejun Heo if (!areas) { 2003fb435d52STejun Heo rc = -ENOMEM; 2004c8826dd5STejun Heo goto out_free; 2005fa8a7094STejun Heo } 200666c3a757STejun Heo 20079b739662Szijun_hu /* allocate, copy and determine base address & max_distance */ 20089b739662Szijun_hu highest_group = 0; 2009c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2010c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2011c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 2012c8826dd5STejun Heo void *ptr; 201366c3a757STejun Heo 2014c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 2015c8826dd5STejun Heo cpu = gi->cpu_map[i]; 2016c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 2017c8826dd5STejun Heo 2018c8826dd5STejun Heo /* allocate space for the whole group */ 2019c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 2020c8826dd5STejun Heo if (!ptr) { 2021c8826dd5STejun Heo rc = -ENOMEM; 2022c8826dd5STejun Heo goto out_free_areas; 2023c8826dd5STejun Heo } 2024f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2025f528f0b8SCatalin Marinas kmemleak_free(ptr); 2026c8826dd5STejun Heo areas[group] = ptr; 2027c8826dd5STejun Heo 2028c8826dd5STejun Heo base = min(ptr, base); 20299b739662Szijun_hu if (ptr > areas[highest_group]) 20309b739662Szijun_hu highest_group = group; 20319b739662Szijun_hu } 20329b739662Szijun_hu max_distance = areas[highest_group] - base; 20339b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 20349b739662Szijun_hu 20359b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */ 20369b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) { 20379b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 20389b739662Szijun_hu max_distance, VMALLOC_TOTAL); 20399b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 20409b739662Szijun_hu /* and fail if we have fallback */ 20419b739662Szijun_hu rc = -EINVAL; 20429b739662Szijun_hu goto out_free_areas; 20439b739662Szijun_hu #endif 204442b64281STejun Heo } 204542b64281STejun Heo 204642b64281STejun Heo /* 204742b64281STejun Heo * Copy data and free unused parts. This should happen after all 204842b64281STejun Heo * allocations are complete; otherwise, we may end up with 204942b64281STejun Heo * overlapping groups. 205042b64281STejun Heo */ 205142b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 205242b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 205342b64281STejun Heo void *ptr = areas[group]; 2054c8826dd5STejun Heo 2055c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 2056c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 2057c8826dd5STejun Heo /* unused unit, free whole */ 2058c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 2059c8826dd5STejun Heo continue; 2060c8826dd5STejun Heo } 2061c8826dd5STejun Heo /* copy and return the unused part */ 2062fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 2063c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 2064c8826dd5STejun Heo } 206566c3a757STejun Heo } 206666c3a757STejun Heo 2067c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 20686ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2069c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 20706ea529a2STejun Heo } 2071c8826dd5STejun Heo 2072870d4b12SJoe Perches pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 2073fd1e8a1fSTejun Heo PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 2074fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 207566c3a757STejun Heo 2076fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, base); 2077c8826dd5STejun Heo goto out_free; 2078c8826dd5STejun Heo 2079c8826dd5STejun Heo out_free_areas: 2080c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 2081f851c8d8SMichael Holzheu if (areas[group]) 2082c8826dd5STejun Heo free_fn(areas[group], 2083c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 2084c8826dd5STejun Heo out_free: 2085fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2086c8826dd5STejun Heo if (areas) 2087999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 2088fb435d52STejun Heo return rc; 2089d4b95f80STejun Heo } 20903c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 2091d4b95f80STejun Heo 20923c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 2093d4b95f80STejun Heo /** 209400ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2095d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2096d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 209725985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 2098d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 2099d4b95f80STejun Heo * 210000ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 210100ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 2102d4b95f80STejun Heo * 2103d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 2104d4b95f80STejun Heo * page-by-page into vmalloc area. 2105d4b95f80STejun Heo * 2106d4b95f80STejun Heo * RETURNS: 2107fb435d52STejun Heo * 0 on success, -errno on failure. 2108d4b95f80STejun Heo */ 2109fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 2110d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2111d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 2112d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 2113d4b95f80STejun Heo { 21148f05a6a6STejun Heo static struct vm_struct vm; 2115fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 211600ae4064STejun Heo char psize_str[16]; 2117ce3141a2STejun Heo int unit_pages; 2118d4b95f80STejun Heo size_t pages_size; 2119ce3141a2STejun Heo struct page **pages; 2120fb435d52STejun Heo int unit, i, j, rc; 21218f606604Szijun_hu int upa; 21228f606604Szijun_hu int nr_g0_units; 2123d4b95f80STejun Heo 212400ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 212500ae4064STejun Heo 21264ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 2127fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2128fd1e8a1fSTejun Heo return PTR_ERR(ai); 2129fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 21308f606604Szijun_hu upa = ai->alloc_size/ai->unit_size; 21318f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa); 21328f606604Szijun_hu if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) { 21338f606604Szijun_hu pcpu_free_alloc_info(ai); 21348f606604Szijun_hu return -EINVAL; 21358f606604Szijun_hu } 2136fd1e8a1fSTejun Heo 2137fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 2138d4b95f80STejun Heo 2139d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 2140fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2141fd1e8a1fSTejun Heo sizeof(pages[0])); 2142999c17e3SSantosh Shilimkar pages = memblock_virt_alloc(pages_size, 0); 2143d4b95f80STejun Heo 21448f05a6a6STejun Heo /* allocate pages */ 2145d4b95f80STejun Heo j = 0; 21468f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) { 2147fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 21488f606604Szijun_hu for (i = 0; i < unit_pages; i++) { 2149d4b95f80STejun Heo void *ptr; 2150d4b95f80STejun Heo 21513cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2152d4b95f80STejun Heo if (!ptr) { 2153870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n", 2154598d8091SJoe Perches psize_str, cpu); 2155d4b95f80STejun Heo goto enomem; 2156d4b95f80STejun Heo } 2157f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2158f528f0b8SCatalin Marinas kmemleak_free(ptr); 2159ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 2160d4b95f80STejun Heo } 21618f606604Szijun_hu } 2162d4b95f80STejun Heo 21638f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 21648f05a6a6STejun Heo vm.flags = VM_ALLOC; 2165fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 21668f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 21678f05a6a6STejun Heo 2168fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 21691d9d3257STejun Heo unsigned long unit_addr = 2170fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 21718f05a6a6STejun Heo 2172ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 21738f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 21748f05a6a6STejun Heo 21758f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 2176fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2177ce3141a2STejun Heo unit_pages); 2178fb435d52STejun Heo if (rc < 0) 2179fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 21808f05a6a6STejun Heo 21818f05a6a6STejun Heo /* 21828f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 21838f05a6a6STejun Heo * cache for the linear mapping here - something 21848f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 21858f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 21868f05a6a6STejun Heo * data structures are not set up yet. 21878f05a6a6STejun Heo */ 21888f05a6a6STejun Heo 21898f05a6a6STejun Heo /* copy static data */ 2190fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 219166c3a757STejun Heo } 219266c3a757STejun Heo 219366c3a757STejun Heo /* we're ready, commit */ 2194870d4b12SJoe Perches pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n", 2195fd1e8a1fSTejun Heo unit_pages, psize_str, vm.addr, ai->static_size, 2196fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 219766c3a757STejun Heo 2198fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, vm.addr); 2199d4b95f80STejun Heo goto out_free_ar; 2200d4b95f80STejun Heo 2201d4b95f80STejun Heo enomem: 2202d4b95f80STejun Heo while (--j >= 0) 2203ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 2204fb435d52STejun Heo rc = -ENOMEM; 2205d4b95f80STejun Heo out_free_ar: 2206999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 2207fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2208fb435d52STejun Heo return rc; 220966c3a757STejun Heo } 22103c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 2211d4b95f80STejun Heo 2212bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 22138c4bfc6eSTejun Heo /* 2214bbddff05STejun Heo * Generic SMP percpu area setup. 2215e74e3962STejun Heo * 2216e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 2217e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 2218e74e3962STejun Heo * important because many archs have addressing restrictions and might 2219e74e3962STejun Heo * fail if the percpu area is located far away from the previous 2220e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 2221e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 2222e74e3962STejun Heo * on the physical linear memory mapping which uses large page 2223e74e3962STejun Heo * mappings on applicable archs. 2224e74e3962STejun Heo */ 2225e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2226e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 2227e74e3962STejun Heo 2228c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2229c8826dd5STejun Heo size_t align) 2230c8826dd5STejun Heo { 2231999c17e3SSantosh Shilimkar return memblock_virt_alloc_from_nopanic( 2232999c17e3SSantosh Shilimkar size, align, __pa(MAX_DMA_ADDRESS)); 2233c8826dd5STejun Heo } 2234c8826dd5STejun Heo 2235c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2236c8826dd5STejun Heo { 2237999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 2238c8826dd5STejun Heo } 2239c8826dd5STejun Heo 2240e74e3962STejun Heo void __init setup_per_cpu_areas(void) 2241e74e3962STejun Heo { 2242e74e3962STejun Heo unsigned long delta; 2243e74e3962STejun Heo unsigned int cpu; 2244fb435d52STejun Heo int rc; 2245e74e3962STejun Heo 2246e74e3962STejun Heo /* 2247e74e3962STejun Heo * Always reserve area for module percpu variables. That's 2248e74e3962STejun Heo * what the legacy allocator did. 2249e74e3962STejun Heo */ 2250fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2251c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2252c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2253fb435d52STejun Heo if (rc < 0) 2254bbddff05STejun Heo panic("Failed to initialize percpu areas."); 2255e74e3962STejun Heo 2256e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2257e74e3962STejun Heo for_each_possible_cpu(cpu) 2258fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2259e74e3962STejun Heo } 2260e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2261099a19d9STejun Heo 2262bbddff05STejun Heo #else /* CONFIG_SMP */ 2263bbddff05STejun Heo 2264bbddff05STejun Heo /* 2265bbddff05STejun Heo * UP percpu area setup. 2266bbddff05STejun Heo * 2267bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 2268bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 2269bbddff05STejun Heo * variables and don't require any special preparation. 2270bbddff05STejun Heo */ 2271bbddff05STejun Heo void __init setup_per_cpu_areas(void) 2272bbddff05STejun Heo { 2273bbddff05STejun Heo const size_t unit_size = 2274bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 2275bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 2276bbddff05STejun Heo struct pcpu_alloc_info *ai; 2277bbddff05STejun Heo void *fc; 2278bbddff05STejun Heo 2279bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 2280999c17e3SSantosh Shilimkar fc = memblock_virt_alloc_from_nopanic(unit_size, 2281999c17e3SSantosh Shilimkar PAGE_SIZE, 2282999c17e3SSantosh Shilimkar __pa(MAX_DMA_ADDRESS)); 2283bbddff05STejun Heo if (!ai || !fc) 2284bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 2285100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2286100d13c3SCatalin Marinas kmemleak_free(fc); 2287bbddff05STejun Heo 2288bbddff05STejun Heo ai->dyn_size = unit_size; 2289bbddff05STejun Heo ai->unit_size = unit_size; 2290bbddff05STejun Heo ai->atom_size = unit_size; 2291bbddff05STejun Heo ai->alloc_size = unit_size; 2292bbddff05STejun Heo ai->groups[0].nr_units = 1; 2293bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 2294bbddff05STejun Heo 2295bbddff05STejun Heo if (pcpu_setup_first_chunk(ai, fc) < 0) 2296bbddff05STejun Heo panic("Failed to initialize percpu areas."); 2297bbddff05STejun Heo } 2298bbddff05STejun Heo 2299bbddff05STejun Heo #endif /* CONFIG_SMP */ 2300bbddff05STejun Heo 2301099a19d9STejun Heo /* 2302099a19d9STejun Heo * First and reserved chunks are initialized with temporary allocation 2303099a19d9STejun Heo * map in initdata so that they can be used before slab is online. 2304099a19d9STejun Heo * This function is called after slab is brought up and replaces those 2305099a19d9STejun Heo * with properly allocated maps. 2306099a19d9STejun Heo */ 2307099a19d9STejun Heo void __init percpu_init_late(void) 2308099a19d9STejun Heo { 2309099a19d9STejun Heo struct pcpu_chunk *target_chunks[] = 2310099a19d9STejun Heo { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; 2311099a19d9STejun Heo struct pcpu_chunk *chunk; 2312099a19d9STejun Heo unsigned long flags; 2313099a19d9STejun Heo int i; 2314099a19d9STejun Heo 2315099a19d9STejun Heo for (i = 0; (chunk = target_chunks[i]); i++) { 2316099a19d9STejun Heo int *map; 2317099a19d9STejun Heo const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); 2318099a19d9STejun Heo 2319099a19d9STejun Heo BUILD_BUG_ON(size > PAGE_SIZE); 2320099a19d9STejun Heo 232190459ce0SBob Liu map = pcpu_mem_zalloc(size); 2322099a19d9STejun Heo BUG_ON(!map); 2323099a19d9STejun Heo 2324099a19d9STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 2325099a19d9STejun Heo memcpy(map, chunk->map, size); 2326099a19d9STejun Heo chunk->map = map; 2327099a19d9STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 2328099a19d9STejun Heo } 2329099a19d9STejun Heo } 23301a4d7607STejun Heo 23311a4d7607STejun Heo /* 23321a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or 23331a4d7607STejun Heo * workqueue is available. Plug async management until everything is up 23341a4d7607STejun Heo * and running. 23351a4d7607STejun Heo */ 23361a4d7607STejun Heo static int __init percpu_enable_async(void) 23371a4d7607STejun Heo { 23381a4d7607STejun Heo pcpu_async_enabled = true; 23391a4d7607STejun Heo return 0; 23401a4d7607STejun Heo } 23411a4d7607STejun Heo subsys_initcall(percpu_enable_async); 2342