155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2fbf59bc9STejun Heo /*
388999a89STejun Heo * mm/percpu.c - percpu memory allocator
4fbf59bc9STejun Heo *
5fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH
6fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7fbf59bc9STejun Heo *
85e81ee3eSDennis Zhou (Facebook) * Copyright (C) 2017 Facebook Inc.
9bfacd38fSDennis Zhou * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
105e81ee3eSDennis Zhou (Facebook) *
119c015162SDennis Zhou (Facebook) * The percpu allocator handles both static and dynamic areas. Percpu
129c015162SDennis Zhou (Facebook) * areas are allocated in chunks which are divided into units. There is
139c015162SDennis Zhou (Facebook) * a 1-to-1 mapping for units to possible cpus. These units are grouped
149c015162SDennis Zhou (Facebook) * based on NUMA properties of the machine.
15fbf59bc9STejun Heo *
16fbf59bc9STejun Heo * c0 c1 c2
17fbf59bc9STejun Heo * ------------------- ------------------- ------------
18fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------
20fbf59bc9STejun Heo *
219c015162SDennis Zhou (Facebook) * Allocation is done by offsets into a unit's address space. Ie., an
229c015162SDennis Zhou (Facebook) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
239c015162SDennis Zhou (Facebook) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
249c015162SDennis Zhou (Facebook) * and even sparse. Access is handled by configuring percpu base
259c015162SDennis Zhou (Facebook) * registers according to the cpu to unit mappings and offsetting the
269c015162SDennis Zhou (Facebook) * base address using pcpu_unit_size.
27fbf59bc9STejun Heo *
289c015162SDennis Zhou (Facebook) * There is special consideration for the first chunk which must handle
299c015162SDennis Zhou (Facebook) * the static percpu variables in the kernel image as allocation services
305e81ee3eSDennis Zhou (Facebook) * are not online yet. In short, the first chunk is structured like so:
319c015162SDennis Zhou (Facebook) *
329c015162SDennis Zhou (Facebook) * <Static | [Reserved] | Dynamic>
339c015162SDennis Zhou (Facebook) *
349c015162SDennis Zhou (Facebook) * The static data is copied from the original section managed by the
359c015162SDennis Zhou (Facebook) * linker. The reserved section, if non-zero, primarily manages static
369c015162SDennis Zhou (Facebook) * percpu variables from kernel modules. Finally, the dynamic section
379c015162SDennis Zhou (Facebook) * takes care of normal allocations.
38fbf59bc9STejun Heo *
395e81ee3eSDennis Zhou (Facebook) * The allocator organizes chunks into lists according to free size and
403c7be18aSRoman Gushchin * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
413c7be18aSRoman Gushchin * flag should be passed. All memcg-aware allocations are sharing one set
423c7be18aSRoman Gushchin * of chunks and all unaccounted allocations and allocations performed
433c7be18aSRoman Gushchin * by processes belonging to the root memory cgroup are using the second set.
443c7be18aSRoman Gushchin *
453c7be18aSRoman Gushchin * The allocator tries to allocate from the fullest chunk first. Each chunk
463c7be18aSRoman Gushchin * is managed by a bitmap with metadata blocks. The allocation map is updated
473c7be18aSRoman Gushchin * on every allocation and free to reflect the current state while the boundary
485e81ee3eSDennis Zhou (Facebook) * map is only updated on allocation. Each metadata block contains
495e81ee3eSDennis Zhou (Facebook) * information to help mitigate the need to iterate over large portions
505e81ee3eSDennis Zhou (Facebook) * of the bitmap. The reverse mapping from page to chunk is stored in
515e81ee3eSDennis Zhou (Facebook) * the page's index. Lastly, units are lazily backed and grow in unison.
52fbf59bc9STejun Heo *
535e81ee3eSDennis Zhou (Facebook) * There is a unique conversion that goes on here between bytes and bits.
545e81ee3eSDennis Zhou (Facebook) * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
555e81ee3eSDennis Zhou (Facebook) * tracks the number of pages it is responsible for in nr_pages. Helper
565e81ee3eSDennis Zhou (Facebook) * functions are used to convert from between the bytes, bits, and blocks.
575e81ee3eSDennis Zhou (Facebook) * All hints are managed in bits unless explicitly stated.
589c015162SDennis Zhou (Facebook) *
594091fb95SMasahiro Yamada * To use this allocator, arch code should do the following:
60fbf59bc9STejun Heo *
61fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62e0100983STejun Heo * regular address to percpu pointer and back if they need to be
63e0100983STejun Heo * different from the default
64fbf59bc9STejun Heo *
658d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to
668d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area
67fbf59bc9STejun Heo */
68fbf59bc9STejun Heo
69870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70870d4b12SJoe Perches
71fbf59bc9STejun Heo #include <linux/bitmap.h>
72d7d29ac7SWonhyuk Yang #include <linux/cpumask.h>
7357c8a661SMike Rapoport #include <linux/memblock.h>
74fd1e8a1fSTejun Heo #include <linux/err.h>
75fbf59bc9STejun Heo #include <linux/list.h>
76a530b795STejun Heo #include <linux/log2.h>
77fbf59bc9STejun Heo #include <linux/mm.h>
78fbf59bc9STejun Heo #include <linux/module.h>
79fbf59bc9STejun Heo #include <linux/mutex.h>
80fbf59bc9STejun Heo #include <linux/percpu.h>
81fbf59bc9STejun Heo #include <linux/pfn.h>
82fbf59bc9STejun Heo #include <linux/slab.h>
83ccea34b5STejun Heo #include <linux/spinlock.h>
84fbf59bc9STejun Heo #include <linux/vmalloc.h>
85a56dbddfSTejun Heo #include <linux/workqueue.h>
86f528f0b8SCatalin Marinas #include <linux/kmemleak.h>
8771546d10STejun Heo #include <linux/sched.h>
8828307d93SFilipe Manana #include <linux/sched/mm.h>
893c7be18aSRoman Gushchin #include <linux/memcontrol.h>
90fbf59bc9STejun Heo
91fbf59bc9STejun Heo #include <asm/cacheflush.h>
92e0100983STejun Heo #include <asm/sections.h>
93fbf59bc9STejun Heo #include <asm/tlbflush.h>
943b034b0dSVivek Goyal #include <asm/io.h>
95fbf59bc9STejun Heo
96df95e795SDennis Zhou #define CREATE_TRACE_POINTS
97df95e795SDennis Zhou #include <trace/events/percpu.h>
98df95e795SDennis Zhou
998fa3ed80SDennis Zhou #include "percpu-internal.h"
1008fa3ed80SDennis Zhou
101ac9380f6SRoman Gushchin /*
102ac9380f6SRoman Gushchin * The slots are sorted by the size of the biggest continuous free area.
103ac9380f6SRoman Gushchin * 1-31 bytes share the same slot.
104ac9380f6SRoman Gushchin */
10540064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT 5
1068744d859SDennis Zhou /* chunks in slots below this are subject to being sidelined on failed alloc */
1078744d859SDennis Zhou #define PCPU_SLOT_FAIL_THRESHOLD 3
10840064aecSDennis Zhou (Facebook)
1091a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2
1101a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4
111fbf59bc9STejun Heo
112bbddff05STejun Heo #ifdef CONFIG_SMP
113e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
114e0100983STejun Heo #ifndef __addr_to_pcpu_ptr
115e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \
11643cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \
11743cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \
11843cf38ebSTejun Heo (unsigned long)__per_cpu_start)
119e0100983STejun Heo #endif
120e0100983STejun Heo #ifndef __pcpu_ptr_to_addr
121e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \
12243cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \
12343cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \
12443cf38ebSTejun Heo (unsigned long)__per_cpu_start)
125e0100983STejun Heo #endif
126bbddff05STejun Heo #else /* CONFIG_SMP */
127bbddff05STejun Heo /* on UP, it's always identity mapped */
128bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
129bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
130bbddff05STejun Heo #endif /* CONFIG_SMP */
131e0100983STejun Heo
1321328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init;
1331328710bSDaniel Micay static int pcpu_unit_size __ro_after_init;
1341328710bSDaniel Micay static int pcpu_nr_units __ro_after_init;
1351328710bSDaniel Micay static int pcpu_atom_size __ro_after_init;
1368fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init;
1378d55ba5dSWei Yongjun static int pcpu_free_slot __ro_after_init;
138f1833241SRoman Gushchin int pcpu_sidelined_slot __ro_after_init;
139f1833241SRoman Gushchin int pcpu_to_depopulate_slot __ro_after_init;
1401328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init;
141fbf59bc9STejun Heo
142a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */
1431328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init;
1441328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init;
1452f39e637STejun Heo
146fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */
1471328710bSDaniel Micay void *pcpu_base_addr __ro_after_init;
148fbf59bc9STejun Heo
1491328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
1501328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
1512f39e637STejun Heo
1526563297cSTejun Heo /* group information, used for vm allocation */
1531328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init;
1541328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init;
1551328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init;
1566563297cSTejun Heo
157ae9e6bc9STejun Heo /*
158ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other
159ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different
160ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area.
161ae9e6bc9STejun Heo */
1628fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
163ae9e6bc9STejun Heo
164ae9e6bc9STejun Heo /*
165ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first
166e2266705SDennis Zhou (Facebook) * chunk and serves it for reserved allocations. When the reserved
167e2266705SDennis Zhou (Facebook) * region doesn't exist, the following variable is NULL.
168ae9e6bc9STejun Heo */
1698fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
170edcb4639STejun Heo
1718fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
1726710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
173fbf59bc9STejun Heo
1743c7be18aSRoman Gushchin struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
175fbf59bc9STejun Heo
176b539b87fSTejun Heo /*
177faf65ddeSRoman Gushchin * The number of empty populated pages, protected by pcpu_lock.
1780760fa3dSRoman Gushchin * The reserved chunk doesn't contribute to the count.
179b539b87fSTejun Heo */
180faf65ddeSRoman Gushchin int pcpu_nr_empty_pop_pages;
181b539b87fSTejun Heo
1821a4d7607STejun Heo /*
1837e8a6304SDennis Zhou (Facebook) * The number of populated pages in use by the allocator, protected by
1847e8a6304SDennis Zhou (Facebook) * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
1857e8a6304SDennis Zhou (Facebook) * allocated/deallocated, it is allocated/deallocated in all units of a chunk
1867e8a6304SDennis Zhou (Facebook) * and increments/decrements this count by 1).
1877e8a6304SDennis Zhou (Facebook) */
1887e8a6304SDennis Zhou (Facebook) static unsigned long pcpu_nr_populated;
1897e8a6304SDennis Zhou (Facebook)
1907e8a6304SDennis Zhou (Facebook) /*
1911a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We
1921a4d7607STejun Heo * try to keep the number of populated free pages between
1931a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
1941a4d7607STejun Heo * empty chunk.
1951a4d7607STejun Heo */
196fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work);
197fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
1981a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly;
1991a4d7607STejun Heo static bool pcpu_atomic_alloc_failed;
2001a4d7607STejun Heo
pcpu_schedule_balance_work(void)2011a4d7607STejun Heo static void pcpu_schedule_balance_work(void)
2021a4d7607STejun Heo {
2031a4d7607STejun Heo if (pcpu_async_enabled)
2041a4d7607STejun Heo schedule_work(&pcpu_balance_work);
2051a4d7607STejun Heo }
206a56dbddfSTejun Heo
207c0ebfdc3SDennis Zhou (Facebook) /**
208560f2c23SDennis Zhou (Facebook) * pcpu_addr_in_chunk - check if the address is served from this chunk
209560f2c23SDennis Zhou (Facebook) * @chunk: chunk of interest
210560f2c23SDennis Zhou (Facebook) * @addr: percpu address
211c0ebfdc3SDennis Zhou (Facebook) *
212c0ebfdc3SDennis Zhou (Facebook) * RETURNS:
213560f2c23SDennis Zhou (Facebook) * True if the address is served from this chunk.
214c0ebfdc3SDennis Zhou (Facebook) */
pcpu_addr_in_chunk(struct pcpu_chunk * chunk,void * addr)215560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
216020ec653STejun Heo {
217c0ebfdc3SDennis Zhou (Facebook) void *start_addr, *end_addr;
218020ec653STejun Heo
219560f2c23SDennis Zhou (Facebook) if (!chunk)
220c0ebfdc3SDennis Zhou (Facebook) return false;
221c0ebfdc3SDennis Zhou (Facebook)
222560f2c23SDennis Zhou (Facebook) start_addr = chunk->base_addr + chunk->start_offset;
223560f2c23SDennis Zhou (Facebook) end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
224560f2c23SDennis Zhou (Facebook) chunk->end_offset;
225c0ebfdc3SDennis Zhou (Facebook)
226c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr;
227020ec653STejun Heo }
228020ec653STejun Heo
__pcpu_size_to_slot(int size)229d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size)
230fbf59bc9STejun Heo {
231cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */
232fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
233fbf59bc9STejun Heo }
234fbf59bc9STejun Heo
pcpu_size_to_slot(int size)235d9b55eebSTejun Heo static int pcpu_size_to_slot(int size)
236d9b55eebSTejun Heo {
237d9b55eebSTejun Heo if (size == pcpu_unit_size)
2381c29a3ceSDennis Zhou return pcpu_free_slot;
239d9b55eebSTejun Heo return __pcpu_size_to_slot(size);
240d9b55eebSTejun Heo }
241d9b55eebSTejun Heo
pcpu_chunk_slot(const struct pcpu_chunk * chunk)242fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
243fbf59bc9STejun Heo {
24492c14cabSDennis Zhou const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
24592c14cabSDennis Zhou
24692c14cabSDennis Zhou if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
24792c14cabSDennis Zhou chunk_md->contig_hint == 0)
248fbf59bc9STejun Heo return 0;
249fbf59bc9STejun Heo
25092c14cabSDennis Zhou return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
251fbf59bc9STejun Heo }
252fbf59bc9STejun Heo
25388999a89STejun Heo /* set the pointer to a chunk in a page struct */
pcpu_set_page_chunk(struct page * page,struct pcpu_chunk * pcpu)25488999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
25588999a89STejun Heo {
25688999a89STejun Heo page->index = (unsigned long)pcpu;
25788999a89STejun Heo }
25888999a89STejun Heo
25988999a89STejun Heo /* obtain pointer to a chunk from a page struct */
pcpu_get_page_chunk(struct page * page)26088999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
26188999a89STejun Heo {
26288999a89STejun Heo return (struct pcpu_chunk *)page->index;
26388999a89STejun Heo }
26488999a89STejun Heo
pcpu_page_idx(unsigned int cpu,int page_idx)26588999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
266fbf59bc9STejun Heo {
2672f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
268fbf59bc9STejun Heo }
269fbf59bc9STejun Heo
pcpu_unit_page_offset(unsigned int cpu,int page_idx)270c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
271c0ebfdc3SDennis Zhou (Facebook) {
272c0ebfdc3SDennis Zhou (Facebook) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
273c0ebfdc3SDennis Zhou (Facebook) }
274c0ebfdc3SDennis Zhou (Facebook)
pcpu_chunk_addr(struct pcpu_chunk * chunk,unsigned int cpu,int page_idx)2759983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
276fbf59bc9STejun Heo unsigned int cpu, int page_idx)
277fbf59bc9STejun Heo {
278c0ebfdc3SDennis Zhou (Facebook) return (unsigned long)chunk->base_addr +
279c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(cpu, page_idx);
280fbf59bc9STejun Heo }
281fbf59bc9STejun Heo
282ca460b3cSDennis Zhou (Facebook) /*
283ca460b3cSDennis Zhou (Facebook) * The following are helper functions to help access bitmaps and convert
284ca460b3cSDennis Zhou (Facebook) * between bitmap offsets to address offsets.
285ca460b3cSDennis Zhou (Facebook) */
pcpu_index_alloc_map(struct pcpu_chunk * chunk,int index)286ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
287ca460b3cSDennis Zhou (Facebook) {
288ca460b3cSDennis Zhou (Facebook) return chunk->alloc_map +
289ca460b3cSDennis Zhou (Facebook) (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
290ca460b3cSDennis Zhou (Facebook) }
291ca460b3cSDennis Zhou (Facebook)
pcpu_off_to_block_index(int off)292ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off)
293ca460b3cSDennis Zhou (Facebook) {
294ca460b3cSDennis Zhou (Facebook) return off / PCPU_BITMAP_BLOCK_BITS;
295ca460b3cSDennis Zhou (Facebook) }
296ca460b3cSDennis Zhou (Facebook)
pcpu_off_to_block_off(int off)297ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off)
298ca460b3cSDennis Zhou (Facebook) {
299ca460b3cSDennis Zhou (Facebook) return off & (PCPU_BITMAP_BLOCK_BITS - 1);
300ca460b3cSDennis Zhou (Facebook) }
301ca460b3cSDennis Zhou (Facebook)
pcpu_block_off_to_off(int index,int off)302b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off)
303b185cd0dSDennis Zhou (Facebook) {
304b185cd0dSDennis Zhou (Facebook) return index * PCPU_BITMAP_BLOCK_BITS + off;
305b185cd0dSDennis Zhou (Facebook) }
306b185cd0dSDennis Zhou (Facebook)
3078ea2e1e3SRoman Gushchin /**
3088ea2e1e3SRoman Gushchin * pcpu_check_block_hint - check against the contig hint
3098ea2e1e3SRoman Gushchin * @block: block of interest
3108ea2e1e3SRoman Gushchin * @bits: size of allocation
3118ea2e1e3SRoman Gushchin * @align: alignment of area (max PAGE_SIZE)
3128ea2e1e3SRoman Gushchin *
3138ea2e1e3SRoman Gushchin * Check to see if the allocation can fit in the block's contig hint.
3148ea2e1e3SRoman Gushchin * Note, a chunk uses the same hints as a block so this can also check against
3158ea2e1e3SRoman Gushchin * the chunk's contig hint.
3168ea2e1e3SRoman Gushchin */
pcpu_check_block_hint(struct pcpu_block_md * block,int bits,size_t align)3178ea2e1e3SRoman Gushchin static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
3188ea2e1e3SRoman Gushchin size_t align)
3198ea2e1e3SRoman Gushchin {
3208ea2e1e3SRoman Gushchin int bit_off = ALIGN(block->contig_hint_start, align) -
3218ea2e1e3SRoman Gushchin block->contig_hint_start;
3228ea2e1e3SRoman Gushchin
3238ea2e1e3SRoman Gushchin return bit_off + bits <= block->contig_hint;
3248ea2e1e3SRoman Gushchin }
3258ea2e1e3SRoman Gushchin
326382b88e9SDennis Zhou /*
327382b88e9SDennis Zhou * pcpu_next_hint - determine which hint to use
328382b88e9SDennis Zhou * @block: block of interest
329382b88e9SDennis Zhou * @alloc_bits: size of allocation
330382b88e9SDennis Zhou *
331382b88e9SDennis Zhou * This determines if we should scan based on the scan_hint or first_free.
332382b88e9SDennis Zhou * In general, we want to scan from first_free to fulfill allocations by
333382b88e9SDennis Zhou * first fit. However, if we know a scan_hint at position scan_hint_start
334382b88e9SDennis Zhou * cannot fulfill an allocation, we can begin scanning from there knowing
335382b88e9SDennis Zhou * the contig_hint will be our fallback.
336382b88e9SDennis Zhou */
pcpu_next_hint(struct pcpu_block_md * block,int alloc_bits)337382b88e9SDennis Zhou static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
338382b88e9SDennis Zhou {
339382b88e9SDennis Zhou /*
340382b88e9SDennis Zhou * The three conditions below determine if we can skip past the
341382b88e9SDennis Zhou * scan_hint. First, does the scan hint exist. Second, is the
342382b88e9SDennis Zhou * contig_hint after the scan_hint (possibly not true iff
343382b88e9SDennis Zhou * contig_hint == scan_hint). Third, is the allocation request
344382b88e9SDennis Zhou * larger than the scan_hint.
345382b88e9SDennis Zhou */
346382b88e9SDennis Zhou if (block->scan_hint &&
347382b88e9SDennis Zhou block->contig_hint_start > block->scan_hint_start &&
348382b88e9SDennis Zhou alloc_bits > block->scan_hint)
349382b88e9SDennis Zhou return block->scan_hint_start + block->scan_hint;
350382b88e9SDennis Zhou
351382b88e9SDennis Zhou return block->first_free;
352382b88e9SDennis Zhou }
353382b88e9SDennis Zhou
354fbf59bc9STejun Heo /**
355525ca84dSDennis Zhou (Facebook) * pcpu_next_md_free_region - finds the next hint free area
356525ca84dSDennis Zhou (Facebook) * @chunk: chunk of interest
357525ca84dSDennis Zhou (Facebook) * @bit_off: chunk offset
358525ca84dSDennis Zhou (Facebook) * @bits: size of free area
359525ca84dSDennis Zhou (Facebook) *
360525ca84dSDennis Zhou (Facebook) * Helper function for pcpu_for_each_md_free_region. It checks
361525ca84dSDennis Zhou (Facebook) * block->contig_hint and performs aggregation across blocks to find the
362525ca84dSDennis Zhou (Facebook) * next hint. It modifies bit_off and bits in-place to be consumed in the
363525ca84dSDennis Zhou (Facebook) * loop.
364525ca84dSDennis Zhou (Facebook) */
pcpu_next_md_free_region(struct pcpu_chunk * chunk,int * bit_off,int * bits)365525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
366525ca84dSDennis Zhou (Facebook) int *bits)
367525ca84dSDennis Zhou (Facebook) {
368525ca84dSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off);
369525ca84dSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off);
370525ca84dSDennis Zhou (Facebook) struct pcpu_block_md *block;
371525ca84dSDennis Zhou (Facebook)
372525ca84dSDennis Zhou (Facebook) *bits = 0;
373525ca84dSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
374525ca84dSDennis Zhou (Facebook) block++, i++) {
375525ca84dSDennis Zhou (Facebook) /* handles contig area across blocks */
376525ca84dSDennis Zhou (Facebook) if (*bits) {
377525ca84dSDennis Zhou (Facebook) *bits += block->left_free;
378525ca84dSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
379525ca84dSDennis Zhou (Facebook) continue;
380525ca84dSDennis Zhou (Facebook) return;
381525ca84dSDennis Zhou (Facebook) }
382525ca84dSDennis Zhou (Facebook)
383525ca84dSDennis Zhou (Facebook) /*
384525ca84dSDennis Zhou (Facebook) * This checks three things. First is there a contig_hint to
385525ca84dSDennis Zhou (Facebook) * check. Second, have we checked this hint before by
386525ca84dSDennis Zhou (Facebook) * comparing the block_off. Third, is this the same as the
387525ca84dSDennis Zhou (Facebook) * right contig hint. In the last case, it spills over into
388525ca84dSDennis Zhou (Facebook) * the next block and should be handled by the contig area
389525ca84dSDennis Zhou (Facebook) * across blocks code.
390525ca84dSDennis Zhou (Facebook) */
391525ca84dSDennis Zhou (Facebook) *bits = block->contig_hint;
392525ca84dSDennis Zhou (Facebook) if (*bits && block->contig_hint_start >= block_off &&
393525ca84dSDennis Zhou (Facebook) *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
394525ca84dSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i,
395525ca84dSDennis Zhou (Facebook) block->contig_hint_start);
396525ca84dSDennis Zhou (Facebook) return;
397525ca84dSDennis Zhou (Facebook) }
3981fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */
3991fa4df3eSDennis Zhou block_off = 0;
400525ca84dSDennis Zhou (Facebook)
401525ca84dSDennis Zhou (Facebook) *bits = block->right_free;
402525ca84dSDennis Zhou (Facebook) *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
403525ca84dSDennis Zhou (Facebook) }
404525ca84dSDennis Zhou (Facebook) }
405525ca84dSDennis Zhou (Facebook)
406b4c2116cSDennis Zhou (Facebook) /**
407b4c2116cSDennis Zhou (Facebook) * pcpu_next_fit_region - finds fit areas for a given allocation request
408b4c2116cSDennis Zhou (Facebook) * @chunk: chunk of interest
409b4c2116cSDennis Zhou (Facebook) * @alloc_bits: size of allocation
410b4c2116cSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE)
411b4c2116cSDennis Zhou (Facebook) * @bit_off: chunk offset
412b4c2116cSDennis Zhou (Facebook) * @bits: size of free area
413b4c2116cSDennis Zhou (Facebook) *
414b4c2116cSDennis Zhou (Facebook) * Finds the next free region that is viable for use with a given size and
415b4c2116cSDennis Zhou (Facebook) * alignment. This only returns if there is a valid area to be used for this
416b4c2116cSDennis Zhou (Facebook) * allocation. block->first_free is returned if the allocation request fits
417b4c2116cSDennis Zhou (Facebook) * within the block to see if the request can be fulfilled prior to the contig
418b4c2116cSDennis Zhou (Facebook) * hint.
419b4c2116cSDennis Zhou (Facebook) */
pcpu_next_fit_region(struct pcpu_chunk * chunk,int alloc_bits,int align,int * bit_off,int * bits)420b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
421b4c2116cSDennis Zhou (Facebook) int align, int *bit_off, int *bits)
422b4c2116cSDennis Zhou (Facebook) {
423b4c2116cSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off);
424b4c2116cSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off);
425b4c2116cSDennis Zhou (Facebook) struct pcpu_block_md *block;
426b4c2116cSDennis Zhou (Facebook)
427b4c2116cSDennis Zhou (Facebook) *bits = 0;
428b4c2116cSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
429b4c2116cSDennis Zhou (Facebook) block++, i++) {
430b4c2116cSDennis Zhou (Facebook) /* handles contig area across blocks */
431b4c2116cSDennis Zhou (Facebook) if (*bits) {
432b4c2116cSDennis Zhou (Facebook) *bits += block->left_free;
433b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits)
434b4c2116cSDennis Zhou (Facebook) return;
435b4c2116cSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
436b4c2116cSDennis Zhou (Facebook) continue;
437b4c2116cSDennis Zhou (Facebook) }
438b4c2116cSDennis Zhou (Facebook)
439b4c2116cSDennis Zhou (Facebook) /* check block->contig_hint */
440b4c2116cSDennis Zhou (Facebook) *bits = ALIGN(block->contig_hint_start, align) -
441b4c2116cSDennis Zhou (Facebook) block->contig_hint_start;
442b4c2116cSDennis Zhou (Facebook) /*
443b4c2116cSDennis Zhou (Facebook) * This uses the block offset to determine if this has been
444b4c2116cSDennis Zhou (Facebook) * checked in the prior iteration.
445b4c2116cSDennis Zhou (Facebook) */
446b4c2116cSDennis Zhou (Facebook) if (block->contig_hint &&
447b4c2116cSDennis Zhou (Facebook) block->contig_hint_start >= block_off &&
448b4c2116cSDennis Zhou (Facebook) block->contig_hint >= *bits + alloc_bits) {
449382b88e9SDennis Zhou int start = pcpu_next_hint(block, alloc_bits);
450382b88e9SDennis Zhou
451b4c2116cSDennis Zhou (Facebook) *bits += alloc_bits + block->contig_hint_start -
452382b88e9SDennis Zhou start;
453382b88e9SDennis Zhou *bit_off = pcpu_block_off_to_off(i, start);
454b4c2116cSDennis Zhou (Facebook) return;
455b4c2116cSDennis Zhou (Facebook) }
4561fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */
4571fa4df3eSDennis Zhou block_off = 0;
458b4c2116cSDennis Zhou (Facebook)
459b4c2116cSDennis Zhou (Facebook) *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
460b4c2116cSDennis Zhou (Facebook) align);
461b4c2116cSDennis Zhou (Facebook) *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
462b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, *bit_off);
463b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits)
464b4c2116cSDennis Zhou (Facebook) return;
465b4c2116cSDennis Zhou (Facebook) }
466b4c2116cSDennis Zhou (Facebook)
467b4c2116cSDennis Zhou (Facebook) /* no valid offsets were found - fail condition */
468b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_chunk_map_bits(chunk);
469b4c2116cSDennis Zhou (Facebook) }
470b4c2116cSDennis Zhou (Facebook)
471525ca84dSDennis Zhou (Facebook) /*
472525ca84dSDennis Zhou (Facebook) * Metadata free area iterators. These perform aggregation of free areas
473525ca84dSDennis Zhou (Facebook) * based on the metadata blocks and return the offset @bit_off and size in
474b4c2116cSDennis Zhou (Facebook) * bits of the free area @bits. pcpu_for_each_fit_region only returns when
475b4c2116cSDennis Zhou (Facebook) * a fit is found for the allocation request.
476525ca84dSDennis Zhou (Facebook) */
477525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
478525ca84dSDennis Zhou (Facebook) for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
479525ca84dSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \
480525ca84dSDennis Zhou (Facebook) (bit_off) += (bits) + 1, \
481525ca84dSDennis Zhou (Facebook) pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
482525ca84dSDennis Zhou (Facebook)
483b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
484b4c2116cSDennis Zhou (Facebook) for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
485b4c2116cSDennis Zhou (Facebook) &(bits)); \
486b4c2116cSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \
487b4c2116cSDennis Zhou (Facebook) (bit_off) += (bits), \
488b4c2116cSDennis Zhou (Facebook) pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
489b4c2116cSDennis Zhou (Facebook) &(bits)))
490b4c2116cSDennis Zhou (Facebook)
491525ca84dSDennis Zhou (Facebook) /**
49290459ce0SBob Liu * pcpu_mem_zalloc - allocate memory
4931880d93bSTejun Heo * @size: bytes to allocate
49447504ee0SDennis Zhou * @gfp: allocation flags
495fbf59bc9STejun Heo *
4961880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
49747504ee0SDennis Zhou * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
49847504ee0SDennis Zhou * This is to facilitate passing through whitelisted flags. The
49947504ee0SDennis Zhou * returned memory is always zeroed.
500fbf59bc9STejun Heo *
501fbf59bc9STejun Heo * RETURNS:
5021880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure.
503fbf59bc9STejun Heo */
pcpu_mem_zalloc(size_t size,gfp_t gfp)50447504ee0SDennis Zhou static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
505fbf59bc9STejun Heo {
506099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available()))
507099a19d9STejun Heo return NULL;
508099a19d9STejun Heo
509fbf59bc9STejun Heo if (size <= PAGE_SIZE)
510554fef1cSDennis Zhou return kzalloc(size, gfp);
5117af4c093SJesper Juhl else
51288dca4caSChristoph Hellwig return __vmalloc(size, gfp | __GFP_ZERO);
5131880d93bSTejun Heo }
514fbf59bc9STejun Heo
5151880d93bSTejun Heo /**
5161880d93bSTejun Heo * pcpu_mem_free - free memory
5171880d93bSTejun Heo * @ptr: memory to free
5181880d93bSTejun Heo *
51990459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
5201880d93bSTejun Heo */
pcpu_mem_free(void * ptr)5211d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr)
5221880d93bSTejun Heo {
5231d5cfdb0STetsuo Handa kvfree(ptr);
524fbf59bc9STejun Heo }
525fbf59bc9STejun Heo
__pcpu_chunk_move(struct pcpu_chunk * chunk,int slot,bool move_front)5268744d859SDennis Zhou static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
5278744d859SDennis Zhou bool move_front)
5288744d859SDennis Zhou {
5298744d859SDennis Zhou if (chunk != pcpu_reserved_chunk) {
5308744d859SDennis Zhou if (move_front)
531faf65ddeSRoman Gushchin list_move(&chunk->list, &pcpu_chunk_lists[slot]);
5328744d859SDennis Zhou else
533faf65ddeSRoman Gushchin list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
5348744d859SDennis Zhou }
5358744d859SDennis Zhou }
5368744d859SDennis Zhou
pcpu_chunk_move(struct pcpu_chunk * chunk,int slot)5378744d859SDennis Zhou static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
5388744d859SDennis Zhou {
5398744d859SDennis Zhou __pcpu_chunk_move(chunk, slot, true);
5408744d859SDennis Zhou }
5418744d859SDennis Zhou
542fbf59bc9STejun Heo /**
543fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
544fbf59bc9STejun Heo * @chunk: chunk of interest
545fbf59bc9STejun Heo * @oslot: the previous slot it was on
546fbf59bc9STejun Heo *
547fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk.
548fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is
549edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on
550edcb4639STejun Heo * chunk slots.
551ccea34b5STejun Heo *
552ccea34b5STejun Heo * CONTEXT:
553ccea34b5STejun Heo * pcpu_lock.
554fbf59bc9STejun Heo */
pcpu_chunk_relocate(struct pcpu_chunk * chunk,int oslot)555fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
556fbf59bc9STejun Heo {
557fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk);
558fbf59bc9STejun Heo
559f1833241SRoman Gushchin /* leave isolated chunks in-place */
560f1833241SRoman Gushchin if (chunk->isolated)
561f1833241SRoman Gushchin return;
562f1833241SRoman Gushchin
5638744d859SDennis Zhou if (oslot != nslot)
5648744d859SDennis Zhou __pcpu_chunk_move(chunk, nslot, oslot < nslot);
56540064aecSDennis Zhou (Facebook) }
56640064aecSDennis Zhou (Facebook)
pcpu_isolate_chunk(struct pcpu_chunk * chunk)567f1833241SRoman Gushchin static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
568f1833241SRoman Gushchin {
569f1833241SRoman Gushchin lockdep_assert_held(&pcpu_lock);
570f1833241SRoman Gushchin
571f1833241SRoman Gushchin if (!chunk->isolated) {
572f1833241SRoman Gushchin chunk->isolated = true;
573faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
574f1833241SRoman Gushchin }
575faf65ddeSRoman Gushchin list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
576f1833241SRoman Gushchin }
577f1833241SRoman Gushchin
pcpu_reintegrate_chunk(struct pcpu_chunk * chunk)578f1833241SRoman Gushchin static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
579f1833241SRoman Gushchin {
580f1833241SRoman Gushchin lockdep_assert_held(&pcpu_lock);
581f1833241SRoman Gushchin
582f1833241SRoman Gushchin if (chunk->isolated) {
583f1833241SRoman Gushchin chunk->isolated = false;
584faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
585f1833241SRoman Gushchin pcpu_chunk_relocate(chunk, -1);
586f1833241SRoman Gushchin }
587f1833241SRoman Gushchin }
588f1833241SRoman Gushchin
58940064aecSDennis Zhou (Facebook) /*
590b239f7daSDennis Zhou * pcpu_update_empty_pages - update empty page counters
591b239f7daSDennis Zhou * @chunk: chunk of interest
592b239f7daSDennis Zhou * @nr: nr of empty pages
59340064aecSDennis Zhou (Facebook) *
594b239f7daSDennis Zhou * This is used to keep track of the empty pages now based on the premise
595b239f7daSDennis Zhou * a md_block covers a page. The hint update functions recognize if a block
596b239f7daSDennis Zhou * is made full or broken to calculate deltas for keeping track of free pages.
59740064aecSDennis Zhou (Facebook) */
pcpu_update_empty_pages(struct pcpu_chunk * chunk,int nr)598b239f7daSDennis Zhou static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
599b239f7daSDennis Zhou {
600b239f7daSDennis Zhou chunk->nr_empty_pop_pages += nr;
601f1833241SRoman Gushchin if (chunk != pcpu_reserved_chunk && !chunk->isolated)
602faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages += nr;
60340064aecSDennis Zhou (Facebook) }
60440064aecSDennis Zhou (Facebook)
605d9f3a01eSDennis Zhou /*
606d9f3a01eSDennis Zhou * pcpu_region_overlap - determines if two regions overlap
607d9f3a01eSDennis Zhou * @a: start of first region, inclusive
608d9f3a01eSDennis Zhou * @b: end of first region, exclusive
609d9f3a01eSDennis Zhou * @x: start of second region, inclusive
610d9f3a01eSDennis Zhou * @y: end of second region, exclusive
611d9f3a01eSDennis Zhou *
612d9f3a01eSDennis Zhou * This is used to determine if the hint region [a, b) overlaps with the
613d9f3a01eSDennis Zhou * allocated region [x, y).
614d9f3a01eSDennis Zhou */
pcpu_region_overlap(int a,int b,int x,int y)615d9f3a01eSDennis Zhou static inline bool pcpu_region_overlap(int a, int b, int x, int y)
616d9f3a01eSDennis Zhou {
617d9f3a01eSDennis Zhou return (a < y) && (x < b);
61840064aecSDennis Zhou (Facebook) }
61940064aecSDennis Zhou (Facebook)
62040064aecSDennis Zhou (Facebook) /**
621ca460b3cSDennis Zhou (Facebook) * pcpu_block_update - updates a block given a free area
622ca460b3cSDennis Zhou (Facebook) * @block: block of interest
623ca460b3cSDennis Zhou (Facebook) * @start: start offset in block
624ca460b3cSDennis Zhou (Facebook) * @end: end offset in block
625ca460b3cSDennis Zhou (Facebook) *
626ca460b3cSDennis Zhou (Facebook) * Updates a block given a known free area. The region [start, end) is
627268625a6SDennis Zhou (Facebook) * expected to be the entirety of the free area within a block. Chooses
628268625a6SDennis Zhou (Facebook) * the best starting offset if the contig hints are equal.
629ca460b3cSDennis Zhou (Facebook) */
pcpu_block_update(struct pcpu_block_md * block,int start,int end)630ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
631ca460b3cSDennis Zhou (Facebook) {
632ca460b3cSDennis Zhou (Facebook) int contig = end - start;
633ca460b3cSDennis Zhou (Facebook)
634ca460b3cSDennis Zhou (Facebook) block->first_free = min(block->first_free, start);
635ca460b3cSDennis Zhou (Facebook) if (start == 0)
636ca460b3cSDennis Zhou (Facebook) block->left_free = contig;
637ca460b3cSDennis Zhou (Facebook)
638047924c9SDennis Zhou if (end == block->nr_bits)
639ca460b3cSDennis Zhou (Facebook) block->right_free = contig;
640ca460b3cSDennis Zhou (Facebook)
641ca460b3cSDennis Zhou (Facebook) if (contig > block->contig_hint) {
642382b88e9SDennis Zhou /* promote the old contig_hint to be the new scan_hint */
643382b88e9SDennis Zhou if (start > block->contig_hint_start) {
644382b88e9SDennis Zhou if (block->contig_hint > block->scan_hint) {
645382b88e9SDennis Zhou block->scan_hint_start =
646382b88e9SDennis Zhou block->contig_hint_start;
647382b88e9SDennis Zhou block->scan_hint = block->contig_hint;
648382b88e9SDennis Zhou } else if (start < block->scan_hint_start) {
649382b88e9SDennis Zhou /*
650382b88e9SDennis Zhou * The old contig_hint == scan_hint. But, the
651382b88e9SDennis Zhou * new contig is larger so hold the invariant
652382b88e9SDennis Zhou * scan_hint_start < contig_hint_start.
653382b88e9SDennis Zhou */
654382b88e9SDennis Zhou block->scan_hint = 0;
655382b88e9SDennis Zhou }
656382b88e9SDennis Zhou } else {
657382b88e9SDennis Zhou block->scan_hint = 0;
658382b88e9SDennis Zhou }
659ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = start;
660ca460b3cSDennis Zhou (Facebook) block->contig_hint = contig;
661382b88e9SDennis Zhou } else if (contig == block->contig_hint) {
662382b88e9SDennis Zhou if (block->contig_hint_start &&
663382b88e9SDennis Zhou (!start ||
664382b88e9SDennis Zhou __ffs(start) > __ffs(block->contig_hint_start))) {
665382b88e9SDennis Zhou /* start has a better alignment so use it */
666268625a6SDennis Zhou (Facebook) block->contig_hint_start = start;
667382b88e9SDennis Zhou if (start < block->scan_hint_start &&
668382b88e9SDennis Zhou block->contig_hint > block->scan_hint)
669382b88e9SDennis Zhou block->scan_hint = 0;
670382b88e9SDennis Zhou } else if (start > block->scan_hint_start ||
671382b88e9SDennis Zhou block->contig_hint > block->scan_hint) {
672382b88e9SDennis Zhou /*
673382b88e9SDennis Zhou * Knowing contig == contig_hint, update the scan_hint
674382b88e9SDennis Zhou * if it is farther than or larger than the current
675382b88e9SDennis Zhou * scan_hint.
676382b88e9SDennis Zhou */
677382b88e9SDennis Zhou block->scan_hint_start = start;
678382b88e9SDennis Zhou block->scan_hint = contig;
679382b88e9SDennis Zhou }
680382b88e9SDennis Zhou } else {
681382b88e9SDennis Zhou /*
682382b88e9SDennis Zhou * The region is smaller than the contig_hint. So only update
683382b88e9SDennis Zhou * the scan_hint if it is larger than or equal and farther than
684382b88e9SDennis Zhou * the current scan_hint.
685382b88e9SDennis Zhou */
686382b88e9SDennis Zhou if ((start < block->contig_hint_start &&
687382b88e9SDennis Zhou (contig > block->scan_hint ||
688382b88e9SDennis Zhou (contig == block->scan_hint &&
689382b88e9SDennis Zhou start > block->scan_hint_start)))) {
690382b88e9SDennis Zhou block->scan_hint_start = start;
691382b88e9SDennis Zhou block->scan_hint = contig;
692382b88e9SDennis Zhou }
693ca460b3cSDennis Zhou (Facebook) }
694ca460b3cSDennis Zhou (Facebook) }
695ca460b3cSDennis Zhou (Facebook)
696b89462a9SDennis Zhou /*
697b89462a9SDennis Zhou * pcpu_block_update_scan - update a block given a free area from a scan
698b89462a9SDennis Zhou * @chunk: chunk of interest
699b89462a9SDennis Zhou * @bit_off: chunk offset
700b89462a9SDennis Zhou * @bits: size of free area
701b89462a9SDennis Zhou *
702b89462a9SDennis Zhou * Finding the final allocation spot first goes through pcpu_find_block_fit()
703b89462a9SDennis Zhou * to find a block that can hold the allocation and then pcpu_alloc_area()
704b89462a9SDennis Zhou * where a scan is used. When allocations require specific alignments,
705b89462a9SDennis Zhou * we can inadvertently create holes which will not be seen in the alloc
706b89462a9SDennis Zhou * or free paths.
707b89462a9SDennis Zhou *
708b89462a9SDennis Zhou * This takes a given free area hole and updates a block as it may change the
709b89462a9SDennis Zhou * scan_hint. We need to scan backwards to ensure we don't miss free bits
710b89462a9SDennis Zhou * from alignment.
711b89462a9SDennis Zhou */
pcpu_block_update_scan(struct pcpu_chunk * chunk,int bit_off,int bits)712b89462a9SDennis Zhou static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
713b89462a9SDennis Zhou int bits)
714b89462a9SDennis Zhou {
715b89462a9SDennis Zhou int s_off = pcpu_off_to_block_off(bit_off);
716b89462a9SDennis Zhou int e_off = s_off + bits;
717b89462a9SDennis Zhou int s_index, l_bit;
718b89462a9SDennis Zhou struct pcpu_block_md *block;
719b89462a9SDennis Zhou
720b89462a9SDennis Zhou if (e_off > PCPU_BITMAP_BLOCK_BITS)
721b89462a9SDennis Zhou return;
722b89462a9SDennis Zhou
723b89462a9SDennis Zhou s_index = pcpu_off_to_block_index(bit_off);
724b89462a9SDennis Zhou block = chunk->md_blocks + s_index;
725b89462a9SDennis Zhou
726b89462a9SDennis Zhou /* scan backwards in case of alignment skipping free bits */
727b89462a9SDennis Zhou l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
728b89462a9SDennis Zhou s_off = (s_off == l_bit) ? 0 : l_bit + 1;
729b89462a9SDennis Zhou
730b89462a9SDennis Zhou pcpu_block_update(block, s_off, e_off);
731b89462a9SDennis Zhou }
732b89462a9SDennis Zhou
733ca460b3cSDennis Zhou (Facebook) /**
73492c14cabSDennis Zhou * pcpu_chunk_refresh_hint - updates metadata about a chunk
73592c14cabSDennis Zhou * @chunk: chunk of interest
736d33d9f3dSDennis Zhou * @full_scan: if we should scan from the beginning
73792c14cabSDennis Zhou *
73892c14cabSDennis Zhou * Iterates over the metadata blocks to find the largest contig area.
739d33d9f3dSDennis Zhou * A full scan can be avoided on the allocation path as this is triggered
740d33d9f3dSDennis Zhou * if we broke the contig_hint. In doing so, the scan_hint will be before
741d33d9f3dSDennis Zhou * the contig_hint or after if the scan_hint == contig_hint. This cannot
742d33d9f3dSDennis Zhou * be prevented on freeing as we want to find the largest area possibly
743d33d9f3dSDennis Zhou * spanning blocks.
74492c14cabSDennis Zhou */
pcpu_chunk_refresh_hint(struct pcpu_chunk * chunk,bool full_scan)745d33d9f3dSDennis Zhou static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
74692c14cabSDennis Zhou {
74792c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md;
74892c14cabSDennis Zhou int bit_off, bits;
74992c14cabSDennis Zhou
750d33d9f3dSDennis Zhou /* promote scan_hint to contig_hint */
751d33d9f3dSDennis Zhou if (!full_scan && chunk_md->scan_hint) {
752d33d9f3dSDennis Zhou bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
753d33d9f3dSDennis Zhou chunk_md->contig_hint_start = chunk_md->scan_hint_start;
754d33d9f3dSDennis Zhou chunk_md->contig_hint = chunk_md->scan_hint;
755d33d9f3dSDennis Zhou chunk_md->scan_hint = 0;
756d33d9f3dSDennis Zhou } else {
75792c14cabSDennis Zhou bit_off = chunk_md->first_free;
758d33d9f3dSDennis Zhou chunk_md->contig_hint = 0;
759d33d9f3dSDennis Zhou }
760d33d9f3dSDennis Zhou
76192c14cabSDennis Zhou bits = 0;
762e837dfdeSDennis Zhou pcpu_for_each_md_free_region(chunk, bit_off, bits)
76392c14cabSDennis Zhou pcpu_block_update(chunk_md, bit_off, bit_off + bits);
764ca460b3cSDennis Zhou (Facebook) }
765ca460b3cSDennis Zhou (Facebook)
766ca460b3cSDennis Zhou (Facebook) /**
767ca460b3cSDennis Zhou (Facebook) * pcpu_block_refresh_hint
768ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest
769ca460b3cSDennis Zhou (Facebook) * @index: index of the metadata block
770ca460b3cSDennis Zhou (Facebook) *
771ca460b3cSDennis Zhou (Facebook) * Scans over the block beginning at first_free and updates the block
772ca460b3cSDennis Zhou (Facebook) * metadata accordingly.
773ca460b3cSDennis Zhou (Facebook) */
pcpu_block_refresh_hint(struct pcpu_chunk * chunk,int index)774ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
775ca460b3cSDennis Zhou (Facebook) {
776ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *block = chunk->md_blocks + index;
777ca460b3cSDennis Zhou (Facebook) unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
778ec288a2cSYury Norov unsigned int start, end; /* region start, region end */
779ca460b3cSDennis Zhou (Facebook)
780da3afdd5SDennis Zhou /* promote scan_hint to contig_hint */
781da3afdd5SDennis Zhou if (block->scan_hint) {
782da3afdd5SDennis Zhou start = block->scan_hint_start + block->scan_hint;
783da3afdd5SDennis Zhou block->contig_hint_start = block->scan_hint_start;
784da3afdd5SDennis Zhou block->contig_hint = block->scan_hint;
785da3afdd5SDennis Zhou block->scan_hint = 0;
786da3afdd5SDennis Zhou } else {
787da3afdd5SDennis Zhou start = block->first_free;
788ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0;
789da3afdd5SDennis Zhou }
790da3afdd5SDennis Zhou
791da3afdd5SDennis Zhou block->right_free = 0;
792ca460b3cSDennis Zhou (Facebook)
793ca460b3cSDennis Zhou (Facebook) /* iterate over free areas and update the contig hints */
794ec288a2cSYury Norov for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
795ec288a2cSYury Norov pcpu_block_update(block, start, end);
796ca460b3cSDennis Zhou (Facebook) }
797ca460b3cSDennis Zhou (Facebook)
798ca460b3cSDennis Zhou (Facebook) /**
799ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_alloc - update hint on allocation path
800ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest
801ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset
802ca460b3cSDennis Zhou (Facebook) * @bits: size of request
803fc304334SDennis Zhou (Facebook) *
804fc304334SDennis Zhou (Facebook) * Updates metadata for the allocation path. The metadata only has to be
805fc304334SDennis Zhou (Facebook) * refreshed by a full scan iff the chunk's contig hint is broken. Block level
806fc304334SDennis Zhou (Facebook) * scans are required if the block's contig hint is broken.
807ca460b3cSDennis Zhou (Facebook) */
pcpu_block_update_hint_alloc(struct pcpu_chunk * chunk,int bit_off,int bits)808ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
809ca460b3cSDennis Zhou (Facebook) int bits)
810ca460b3cSDennis Zhou (Facebook) {
81192c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md;
812b239f7daSDennis Zhou int nr_empty_pages = 0;
813ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block;
814ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */
815ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */
816ca460b3cSDennis Zhou (Facebook)
817ca460b3cSDennis Zhou (Facebook) /*
818ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets.
819ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets
820ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the
821ca460b3cSDennis Zhou (Facebook) * range.
822ca460b3cSDennis Zhou (Facebook) */
823ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off);
824ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1);
825ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off);
826ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
827ca460b3cSDennis Zhou (Facebook)
828ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index;
829ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index;
830ca460b3cSDennis Zhou (Facebook)
831ca460b3cSDennis Zhou (Facebook) /*
832ca460b3cSDennis Zhou (Facebook) * Update s_block.
833ca460b3cSDennis Zhou (Facebook) */
834b239f7daSDennis Zhou if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
835b239f7daSDennis Zhou nr_empty_pages++;
836b239f7daSDennis Zhou
83773046f8dSBaoquan He /*
83873046f8dSBaoquan He * block->first_free must be updated if the allocation takes its place.
83973046f8dSBaoquan He * If the allocation breaks the contig_hint, a scan is required to
84073046f8dSBaoquan He * restore this hint.
84173046f8dSBaoquan He */
842fc304334SDennis Zhou (Facebook) if (s_off == s_block->first_free)
843fc304334SDennis Zhou (Facebook) s_block->first_free = find_next_zero_bit(
844fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, s_index),
845fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS,
846fc304334SDennis Zhou (Facebook) s_off + bits);
847fc304334SDennis Zhou (Facebook)
848382b88e9SDennis Zhou if (pcpu_region_overlap(s_block->scan_hint_start,
849382b88e9SDennis Zhou s_block->scan_hint_start + s_block->scan_hint,
850382b88e9SDennis Zhou s_off,
851382b88e9SDennis Zhou s_off + bits))
852382b88e9SDennis Zhou s_block->scan_hint = 0;
853382b88e9SDennis Zhou
854d9f3a01eSDennis Zhou if (pcpu_region_overlap(s_block->contig_hint_start,
855d9f3a01eSDennis Zhou s_block->contig_hint_start +
856d9f3a01eSDennis Zhou s_block->contig_hint,
857d9f3a01eSDennis Zhou s_off,
858d9f3a01eSDennis Zhou s_off + bits)) {
859fc304334SDennis Zhou (Facebook) /* block contig hint is broken - scan to fix it */
860da3afdd5SDennis Zhou if (!s_off)
861da3afdd5SDennis Zhou s_block->left_free = 0;
862ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, s_index);
863fc304334SDennis Zhou (Facebook) } else {
864fc304334SDennis Zhou (Facebook) /* update left and right contig manually */
865fc304334SDennis Zhou (Facebook) s_block->left_free = min(s_block->left_free, s_off);
866fc304334SDennis Zhou (Facebook) if (s_index == e_index)
867fc304334SDennis Zhou (Facebook) s_block->right_free = min_t(int, s_block->right_free,
868fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off);
869fc304334SDennis Zhou (Facebook) else
870fc304334SDennis Zhou (Facebook) s_block->right_free = 0;
871fc304334SDennis Zhou (Facebook) }
872ca460b3cSDennis Zhou (Facebook)
873ca460b3cSDennis Zhou (Facebook) /*
874ca460b3cSDennis Zhou (Facebook) * Update e_block.
875ca460b3cSDennis Zhou (Facebook) */
876ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) {
877b239f7daSDennis Zhou if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
878b239f7daSDennis Zhou nr_empty_pages++;
879b239f7daSDennis Zhou
880fc304334SDennis Zhou (Facebook) /*
881fc304334SDennis Zhou (Facebook) * When the allocation is across blocks, the end is along
882fc304334SDennis Zhou (Facebook) * the left part of the e_block.
883fc304334SDennis Zhou (Facebook) */
884fc304334SDennis Zhou (Facebook) e_block->first_free = find_next_zero_bit(
885fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, e_index),
886fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, e_off);
887fc304334SDennis Zhou (Facebook)
888fc304334SDennis Zhou (Facebook) if (e_off == PCPU_BITMAP_BLOCK_BITS) {
889fc304334SDennis Zhou (Facebook) /* reset the block */
890fc304334SDennis Zhou (Facebook) e_block++;
891fc304334SDennis Zhou (Facebook) } else {
892382b88e9SDennis Zhou if (e_off > e_block->scan_hint_start)
893382b88e9SDennis Zhou e_block->scan_hint = 0;
894382b88e9SDennis Zhou
895da3afdd5SDennis Zhou e_block->left_free = 0;
896fc304334SDennis Zhou (Facebook) if (e_off > e_block->contig_hint_start) {
897fc304334SDennis Zhou (Facebook) /* contig hint is broken - scan to fix it */
898ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, e_index);
899fc304334SDennis Zhou (Facebook) } else {
900fc304334SDennis Zhou (Facebook) e_block->right_free =
901fc304334SDennis Zhou (Facebook) min_t(int, e_block->right_free,
902fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off);
903fc304334SDennis Zhou (Facebook) }
904fc304334SDennis Zhou (Facebook) }
905ca460b3cSDennis Zhou (Facebook)
906ca460b3cSDennis Zhou (Facebook) /* update in-between md_blocks */
907b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1);
908ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) {
909382b88e9SDennis Zhou block->scan_hint = 0;
910ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0;
911ca460b3cSDennis Zhou (Facebook) block->left_free = 0;
912ca460b3cSDennis Zhou (Facebook) block->right_free = 0;
913ca460b3cSDennis Zhou (Facebook) }
914ca460b3cSDennis Zhou (Facebook) }
915ca460b3cSDennis Zhou (Facebook)
91673046f8dSBaoquan He /*
91773046f8dSBaoquan He * If the allocation is not atomic, some blocks may not be
91873046f8dSBaoquan He * populated with pages, while we account it here. The number
91973046f8dSBaoquan He * of pages will be added back with pcpu_chunk_populated()
92073046f8dSBaoquan He * when populating pages.
92173046f8dSBaoquan He */
922b239f7daSDennis Zhou if (nr_empty_pages)
923b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr_empty_pages);
924b239f7daSDennis Zhou
925d33d9f3dSDennis Zhou if (pcpu_region_overlap(chunk_md->scan_hint_start,
926d33d9f3dSDennis Zhou chunk_md->scan_hint_start +
927d33d9f3dSDennis Zhou chunk_md->scan_hint,
928d33d9f3dSDennis Zhou bit_off,
929d33d9f3dSDennis Zhou bit_off + bits))
930d33d9f3dSDennis Zhou chunk_md->scan_hint = 0;
931d33d9f3dSDennis Zhou
932fc304334SDennis Zhou (Facebook) /*
933fc304334SDennis Zhou (Facebook) * The only time a full chunk scan is required is if the chunk
934fc304334SDennis Zhou (Facebook) * contig hint is broken. Otherwise, it means a smaller space
935fc304334SDennis Zhou (Facebook) * was used and therefore the chunk contig hint is still correct.
936fc304334SDennis Zhou (Facebook) */
93792c14cabSDennis Zhou if (pcpu_region_overlap(chunk_md->contig_hint_start,
93892c14cabSDennis Zhou chunk_md->contig_hint_start +
93992c14cabSDennis Zhou chunk_md->contig_hint,
940d9f3a01eSDennis Zhou bit_off,
941d9f3a01eSDennis Zhou bit_off + bits))
942d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, false);
943ca460b3cSDennis Zhou (Facebook) }
944ca460b3cSDennis Zhou (Facebook)
945ca460b3cSDennis Zhou (Facebook) /**
946ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_free - updates the block hints on the free path
947ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest
948ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset
949ca460b3cSDennis Zhou (Facebook) * @bits: size of request
950b185cd0dSDennis Zhou (Facebook) *
951b185cd0dSDennis Zhou (Facebook) * Updates metadata for the allocation path. This avoids a blind block
952b185cd0dSDennis Zhou (Facebook) * refresh by making use of the block contig hints. If this fails, it scans
953b185cd0dSDennis Zhou (Facebook) * forward and backward to determine the extent of the free area. This is
954b185cd0dSDennis Zhou (Facebook) * capped at the boundary of blocks.
955b185cd0dSDennis Zhou (Facebook) *
956b185cd0dSDennis Zhou (Facebook) * A chunk update is triggered if a page becomes free, a block becomes free,
957b185cd0dSDennis Zhou (Facebook) * or the free spans across blocks. This tradeoff is to minimize iterating
95892c14cabSDennis Zhou * over the block metadata to update chunk_md->contig_hint.
95992c14cabSDennis Zhou * chunk_md->contig_hint may be off by up to a page, but it will never be more
96092c14cabSDennis Zhou * than the available space. If the contig hint is contained in one block, it
96192c14cabSDennis Zhou * will be accurate.
962ca460b3cSDennis Zhou (Facebook) */
pcpu_block_update_hint_free(struct pcpu_chunk * chunk,int bit_off,int bits)963ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
964ca460b3cSDennis Zhou (Facebook) int bits)
965ca460b3cSDennis Zhou (Facebook) {
966b239f7daSDennis Zhou int nr_empty_pages = 0;
967ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block;
968ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */
969ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */
970b185cd0dSDennis Zhou (Facebook) int start, end; /* start and end of the whole free area */
971ca460b3cSDennis Zhou (Facebook)
972ca460b3cSDennis Zhou (Facebook) /*
973ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets.
974ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets
975ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the
976ca460b3cSDennis Zhou (Facebook) * range.
977ca460b3cSDennis Zhou (Facebook) */
978ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off);
979ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1);
980ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off);
981ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
982ca460b3cSDennis Zhou (Facebook)
983ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index;
984ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index;
985ca460b3cSDennis Zhou (Facebook)
986b185cd0dSDennis Zhou (Facebook) /*
987b185cd0dSDennis Zhou (Facebook) * Check if the freed area aligns with the block->contig_hint.
988b185cd0dSDennis Zhou (Facebook) * If it does, then the scan to find the beginning/end of the
989b185cd0dSDennis Zhou (Facebook) * larger free area can be avoided.
990b185cd0dSDennis Zhou (Facebook) *
991b185cd0dSDennis Zhou (Facebook) * start and end refer to beginning and end of the free area
992b185cd0dSDennis Zhou (Facebook) * within each their respective blocks. This is not necessarily
993b185cd0dSDennis Zhou (Facebook) * the entire free area as it may span blocks past the beginning
994b185cd0dSDennis Zhou (Facebook) * or end of the block.
995b185cd0dSDennis Zhou (Facebook) */
996b185cd0dSDennis Zhou (Facebook) start = s_off;
997b185cd0dSDennis Zhou (Facebook) if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
998b185cd0dSDennis Zhou (Facebook) start = s_block->contig_hint_start;
999b185cd0dSDennis Zhou (Facebook) } else {
1000b185cd0dSDennis Zhou (Facebook) /*
1001b185cd0dSDennis Zhou (Facebook) * Scan backwards to find the extent of the free area.
1002b185cd0dSDennis Zhou (Facebook) * find_last_bit returns the starting bit, so if the start bit
1003b185cd0dSDennis Zhou (Facebook) * is returned, that means there was no last bit and the
1004b185cd0dSDennis Zhou (Facebook) * remainder of the chunk is free.
1005b185cd0dSDennis Zhou (Facebook) */
1006b185cd0dSDennis Zhou (Facebook) int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1007b185cd0dSDennis Zhou (Facebook) start);
1008b185cd0dSDennis Zhou (Facebook) start = (start == l_bit) ? 0 : l_bit + 1;
1009b185cd0dSDennis Zhou (Facebook) }
1010b185cd0dSDennis Zhou (Facebook)
1011b185cd0dSDennis Zhou (Facebook) end = e_off;
1012b185cd0dSDennis Zhou (Facebook) if (e_off == e_block->contig_hint_start)
1013b185cd0dSDennis Zhou (Facebook) end = e_block->contig_hint_start + e_block->contig_hint;
1014b185cd0dSDennis Zhou (Facebook) else
1015b185cd0dSDennis Zhou (Facebook) end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1016b185cd0dSDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, end);
1017b185cd0dSDennis Zhou (Facebook)
1018ca460b3cSDennis Zhou (Facebook) /* update s_block */
1019b185cd0dSDennis Zhou (Facebook) e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
1020b239f7daSDennis Zhou if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1021b239f7daSDennis Zhou nr_empty_pages++;
1022b185cd0dSDennis Zhou (Facebook) pcpu_block_update(s_block, start, e_off);
1023ca460b3cSDennis Zhou (Facebook)
1024ca460b3cSDennis Zhou (Facebook) /* freeing in the same block */
1025ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) {
1026ca460b3cSDennis Zhou (Facebook) /* update e_block */
1027b239f7daSDennis Zhou if (end == PCPU_BITMAP_BLOCK_BITS)
1028b239f7daSDennis Zhou nr_empty_pages++;
1029b185cd0dSDennis Zhou (Facebook) pcpu_block_update(e_block, 0, end);
1030ca460b3cSDennis Zhou (Facebook)
1031ca460b3cSDennis Zhou (Facebook) /* reset md_blocks in the middle */
1032b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1);
1033ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) {
1034ca460b3cSDennis Zhou (Facebook) block->first_free = 0;
1035382b88e9SDennis Zhou block->scan_hint = 0;
1036ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = 0;
1037ca460b3cSDennis Zhou (Facebook) block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1038ca460b3cSDennis Zhou (Facebook) block->left_free = PCPU_BITMAP_BLOCK_BITS;
1039ca460b3cSDennis Zhou (Facebook) block->right_free = PCPU_BITMAP_BLOCK_BITS;
1040ca460b3cSDennis Zhou (Facebook) }
1041ca460b3cSDennis Zhou (Facebook) }
1042ca460b3cSDennis Zhou (Facebook)
1043b239f7daSDennis Zhou if (nr_empty_pages)
1044b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr_empty_pages);
1045b239f7daSDennis Zhou
1046b185cd0dSDennis Zhou (Facebook) /*
1047b239f7daSDennis Zhou * Refresh chunk metadata when the free makes a block free or spans
1048b239f7daSDennis Zhou * across blocks. The contig_hint may be off by up to a page, but if
1049b239f7daSDennis Zhou * the contig_hint is contained in a block, it will be accurate with
1050b239f7daSDennis Zhou * the else condition below.
1051b185cd0dSDennis Zhou (Facebook) */
1052b239f7daSDennis Zhou if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1053d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, true);
1054b185cd0dSDennis Zhou (Facebook) else
105592c14cabSDennis Zhou pcpu_block_update(&chunk->chunk_md,
105692c14cabSDennis Zhou pcpu_block_off_to_off(s_index, start),
105792c14cabSDennis Zhou end);
1058ca460b3cSDennis Zhou (Facebook) }
1059ca460b3cSDennis Zhou (Facebook)
1060ca460b3cSDennis Zhou (Facebook) /**
106140064aecSDennis Zhou (Facebook) * pcpu_is_populated - determines if the region is populated
106240064aecSDennis Zhou (Facebook) * @chunk: chunk of interest
106340064aecSDennis Zhou (Facebook) * @bit_off: chunk offset
106440064aecSDennis Zhou (Facebook) * @bits: size of area
106540064aecSDennis Zhou (Facebook) * @next_off: return value for the next offset to start searching
106640064aecSDennis Zhou (Facebook) *
106740064aecSDennis Zhou (Facebook) * For atomic allocations, check if the backing pages are populated.
106840064aecSDennis Zhou (Facebook) *
106940064aecSDennis Zhou (Facebook) * RETURNS:
107040064aecSDennis Zhou (Facebook) * Bool if the backing pages are populated.
107140064aecSDennis Zhou (Facebook) * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
107240064aecSDennis Zhou (Facebook) */
pcpu_is_populated(struct pcpu_chunk * chunk,int bit_off,int bits,int * next_off)107340064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
107440064aecSDennis Zhou (Facebook) int *next_off)
107540064aecSDennis Zhou (Facebook) {
1076801a5736SYury Norov unsigned int start, end;
107740064aecSDennis Zhou (Facebook)
1078801a5736SYury Norov start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1079801a5736SYury Norov end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
108040064aecSDennis Zhou (Facebook)
1081801a5736SYury Norov start = find_next_zero_bit(chunk->populated, end, start);
1082801a5736SYury Norov if (start >= end)
108340064aecSDennis Zhou (Facebook) return true;
108440064aecSDennis Zhou (Facebook)
1085801a5736SYury Norov end = find_next_bit(chunk->populated, end, start + 1);
1086801a5736SYury Norov
1087801a5736SYury Norov *next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
108840064aecSDennis Zhou (Facebook) return false;
108940064aecSDennis Zhou (Facebook) }
109040064aecSDennis Zhou (Facebook)
109140064aecSDennis Zhou (Facebook) /**
109240064aecSDennis Zhou (Facebook) * pcpu_find_block_fit - finds the block index to start searching
109340064aecSDennis Zhou (Facebook) * @chunk: chunk of interest
109440064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units
109540064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE bytes)
109640064aecSDennis Zhou (Facebook) * @pop_only: use populated regions only
109740064aecSDennis Zhou (Facebook) *
1098b4c2116cSDennis Zhou (Facebook) * Given a chunk and an allocation spec, find the offset to begin searching
1099b4c2116cSDennis Zhou (Facebook) * for a free region. This iterates over the bitmap metadata blocks to
1100b4c2116cSDennis Zhou (Facebook) * find an offset that will be guaranteed to fit the requirements. It is
1101b4c2116cSDennis Zhou (Facebook) * not quite first fit as if the allocation does not fit in the contig hint
1102b4c2116cSDennis Zhou (Facebook) * of a block or chunk, it is skipped. This errs on the side of caution
1103b4c2116cSDennis Zhou (Facebook) * to prevent excess iteration. Poor alignment can cause the allocator to
1104b4c2116cSDennis Zhou (Facebook) * skip over blocks and chunks that have valid free areas.
1105b4c2116cSDennis Zhou (Facebook) *
110640064aecSDennis Zhou (Facebook) * RETURNS:
110740064aecSDennis Zhou (Facebook) * The offset in the bitmap to begin searching.
110840064aecSDennis Zhou (Facebook) * -1 if no offset is found.
110940064aecSDennis Zhou (Facebook) */
pcpu_find_block_fit(struct pcpu_chunk * chunk,int alloc_bits,size_t align,bool pop_only)111040064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
111140064aecSDennis Zhou (Facebook) size_t align, bool pop_only)
111240064aecSDennis Zhou (Facebook) {
111392c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1114b4c2116cSDennis Zhou (Facebook) int bit_off, bits, next_off;
111540064aecSDennis Zhou (Facebook)
111613f96637SDennis Zhou (Facebook) /*
11178ea2e1e3SRoman Gushchin * This is an optimization to prevent scanning by assuming if the
11188ea2e1e3SRoman Gushchin * allocation cannot fit in the global hint, there is memory pressure
11198ea2e1e3SRoman Gushchin * and creating a new chunk would happen soon.
112013f96637SDennis Zhou (Facebook) */
11218ea2e1e3SRoman Gushchin if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
112213f96637SDennis Zhou (Facebook) return -1;
112313f96637SDennis Zhou (Facebook)
1124d33d9f3dSDennis Zhou bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1125b4c2116cSDennis Zhou (Facebook) bits = 0;
1126b4c2116cSDennis Zhou (Facebook) pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
112740064aecSDennis Zhou (Facebook) if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1128b4c2116cSDennis Zhou (Facebook) &next_off))
112940064aecSDennis Zhou (Facebook) break;
113040064aecSDennis Zhou (Facebook)
1131b4c2116cSDennis Zhou (Facebook) bit_off = next_off;
113240064aecSDennis Zhou (Facebook) bits = 0;
113340064aecSDennis Zhou (Facebook) }
113440064aecSDennis Zhou (Facebook)
113540064aecSDennis Zhou (Facebook) if (bit_off == pcpu_chunk_map_bits(chunk))
113640064aecSDennis Zhou (Facebook) return -1;
113740064aecSDennis Zhou (Facebook)
113840064aecSDennis Zhou (Facebook) return bit_off;
113940064aecSDennis Zhou (Facebook) }
114040064aecSDennis Zhou (Facebook)
1141b89462a9SDennis Zhou /*
1142b89462a9SDennis Zhou * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1143b89462a9SDennis Zhou * @map: the address to base the search on
1144b89462a9SDennis Zhou * @size: the bitmap size in bits
1145b89462a9SDennis Zhou * @start: the bitnumber to start searching at
1146b89462a9SDennis Zhou * @nr: the number of zeroed bits we're looking for
1147b89462a9SDennis Zhou * @align_mask: alignment mask for zero area
1148b89462a9SDennis Zhou * @largest_off: offset of the largest area skipped
1149b89462a9SDennis Zhou * @largest_bits: size of the largest area skipped
1150b89462a9SDennis Zhou *
1151b89462a9SDennis Zhou * The @align_mask should be one less than a power of 2.
1152b89462a9SDennis Zhou *
1153b89462a9SDennis Zhou * This is a modified version of bitmap_find_next_zero_area_off() to remember
1154b89462a9SDennis Zhou * the largest area that was skipped. This is imperfect, but in general is
1155b89462a9SDennis Zhou * good enough. The largest remembered region is the largest failed region
1156b89462a9SDennis Zhou * seen. This does not include anything we possibly skipped due to alignment.
1157b89462a9SDennis Zhou * pcpu_block_update_scan() does scan backwards to try and recover what was
1158b89462a9SDennis Zhou * lost to alignment. While this can cause scanning to miss earlier possible
1159b89462a9SDennis Zhou * free areas, smaller allocations will eventually fill those holes.
1160b89462a9SDennis Zhou */
pcpu_find_zero_area(unsigned long * map,unsigned long size,unsigned long start,unsigned long nr,unsigned long align_mask,unsigned long * largest_off,unsigned long * largest_bits)1161b89462a9SDennis Zhou static unsigned long pcpu_find_zero_area(unsigned long *map,
1162b89462a9SDennis Zhou unsigned long size,
1163b89462a9SDennis Zhou unsigned long start,
1164b89462a9SDennis Zhou unsigned long nr,
1165b89462a9SDennis Zhou unsigned long align_mask,
1166b89462a9SDennis Zhou unsigned long *largest_off,
1167b89462a9SDennis Zhou unsigned long *largest_bits)
1168b89462a9SDennis Zhou {
1169b89462a9SDennis Zhou unsigned long index, end, i, area_off, area_bits;
1170b89462a9SDennis Zhou again:
1171b89462a9SDennis Zhou index = find_next_zero_bit(map, size, start);
1172b89462a9SDennis Zhou
1173b89462a9SDennis Zhou /* Align allocation */
1174b89462a9SDennis Zhou index = __ALIGN_MASK(index, align_mask);
1175b89462a9SDennis Zhou area_off = index;
1176b89462a9SDennis Zhou
1177b89462a9SDennis Zhou end = index + nr;
1178b89462a9SDennis Zhou if (end > size)
1179b89462a9SDennis Zhou return end;
1180b89462a9SDennis Zhou i = find_next_bit(map, end, index);
1181b89462a9SDennis Zhou if (i < end) {
1182b89462a9SDennis Zhou area_bits = i - area_off;
1183b89462a9SDennis Zhou /* remember largest unused area with best alignment */
1184b89462a9SDennis Zhou if (area_bits > *largest_bits ||
1185b89462a9SDennis Zhou (area_bits == *largest_bits && *largest_off &&
1186b89462a9SDennis Zhou (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1187b89462a9SDennis Zhou *largest_off = area_off;
1188b89462a9SDennis Zhou *largest_bits = area_bits;
1189b89462a9SDennis Zhou }
1190b89462a9SDennis Zhou
1191b89462a9SDennis Zhou start = i + 1;
1192b89462a9SDennis Zhou goto again;
1193b89462a9SDennis Zhou }
1194b89462a9SDennis Zhou return index;
1195b89462a9SDennis Zhou }
1196b89462a9SDennis Zhou
119740064aecSDennis Zhou (Facebook) /**
119840064aecSDennis Zhou (Facebook) * pcpu_alloc_area - allocates an area from a pcpu_chunk
119940064aecSDennis Zhou (Facebook) * @chunk: chunk of interest
120040064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units
120140064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE)
120240064aecSDennis Zhou (Facebook) * @start: bit_off to start searching
120340064aecSDennis Zhou (Facebook) *
120440064aecSDennis Zhou (Facebook) * This function takes in a @start offset to begin searching to fit an
1205b4c2116cSDennis Zhou (Facebook) * allocation of @alloc_bits with alignment @align. It needs to scan
1206b4c2116cSDennis Zhou (Facebook) * the allocation map because if it fits within the block's contig hint,
1207b4c2116cSDennis Zhou (Facebook) * @start will be block->first_free. This is an attempt to fill the
1208b4c2116cSDennis Zhou (Facebook) * allocation prior to breaking the contig hint. The allocation and
1209b4c2116cSDennis Zhou (Facebook) * boundary maps are updated accordingly if it confirms a valid
1210b4c2116cSDennis Zhou (Facebook) * free area.
121140064aecSDennis Zhou (Facebook) *
121240064aecSDennis Zhou (Facebook) * RETURNS:
121340064aecSDennis Zhou (Facebook) * Allocated addr offset in @chunk on success.
121440064aecSDennis Zhou (Facebook) * -1 if no matching area is found.
121540064aecSDennis Zhou (Facebook) */
pcpu_alloc_area(struct pcpu_chunk * chunk,int alloc_bits,size_t align,int start)121640064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
121740064aecSDennis Zhou (Facebook) size_t align, int start)
121840064aecSDennis Zhou (Facebook) {
121992c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md;
122040064aecSDennis Zhou (Facebook) size_t align_mask = (align) ? (align - 1) : 0;
1221b89462a9SDennis Zhou unsigned long area_off = 0, area_bits = 0;
122240064aecSDennis Zhou (Facebook) int bit_off, end, oslot;
12239f7dcf22STejun Heo
12244f996e23STejun Heo lockdep_assert_held(&pcpu_lock);
12254f996e23STejun Heo
122640064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk);
1227833af842STejun Heo
1228833af842STejun Heo /*
122940064aecSDennis Zhou (Facebook) * Search to find a fit.
1230833af842STejun Heo */
12318c43004aSDennis Zhou end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
12328c43004aSDennis Zhou pcpu_chunk_map_bits(chunk));
1233b89462a9SDennis Zhou bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1234b89462a9SDennis Zhou align_mask, &area_off, &area_bits);
123540064aecSDennis Zhou (Facebook) if (bit_off >= end)
1236a16037c8STejun Heo return -1;
1237a16037c8STejun Heo
1238b89462a9SDennis Zhou if (area_bits)
1239b89462a9SDennis Zhou pcpu_block_update_scan(chunk, area_off, area_bits);
1240b89462a9SDennis Zhou
124140064aecSDennis Zhou (Facebook) /* update alloc map */
124240064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1243a16037c8STejun Heo
124440064aecSDennis Zhou (Facebook) /* update boundary map */
124540064aecSDennis Zhou (Facebook) set_bit(bit_off, chunk->bound_map);
124640064aecSDennis Zhou (Facebook) bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
124740064aecSDennis Zhou (Facebook) set_bit(bit_off + alloc_bits, chunk->bound_map);
1248a16037c8STejun Heo
124940064aecSDennis Zhou (Facebook) chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
125040064aecSDennis Zhou (Facebook)
125186b442fbSDennis Zhou (Facebook) /* update first free bit */
125292c14cabSDennis Zhou if (bit_off == chunk_md->first_free)
125392c14cabSDennis Zhou chunk_md->first_free = find_next_zero_bit(
125486b442fbSDennis Zhou (Facebook) chunk->alloc_map,
125586b442fbSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk),
125686b442fbSDennis Zhou (Facebook) bit_off + alloc_bits);
125786b442fbSDennis Zhou (Facebook)
1258ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
125940064aecSDennis Zhou (Facebook)
126040064aecSDennis Zhou (Facebook) pcpu_chunk_relocate(chunk, oslot);
126140064aecSDennis Zhou (Facebook)
126240064aecSDennis Zhou (Facebook) return bit_off * PCPU_MIN_ALLOC_SIZE;
1263a16037c8STejun Heo }
1264a16037c8STejun Heo
1265a16037c8STejun Heo /**
126640064aecSDennis Zhou (Facebook) * pcpu_free_area - frees the corresponding offset
1267fbf59bc9STejun Heo * @chunk: chunk of interest
126840064aecSDennis Zhou (Facebook) * @off: addr offset into chunk
1269fbf59bc9STejun Heo *
127040064aecSDennis Zhou (Facebook) * This function determines the size of an allocation to free using
127140064aecSDennis Zhou (Facebook) * the boundary bitmap and clears the allocation map.
12725b32af91SRoman Gushchin *
12735b32af91SRoman Gushchin * RETURNS:
12745b32af91SRoman Gushchin * Number of freed bytes.
1275fbf59bc9STejun Heo */
pcpu_free_area(struct pcpu_chunk * chunk,int off)12765b32af91SRoman Gushchin static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1277fbf59bc9STejun Heo {
127892c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md;
12795b32af91SRoman Gushchin int bit_off, bits, end, oslot, freed;
1280fbf59bc9STejun Heo
12815ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock);
128230a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk);
12835ccd30e4SDennis Zhou
128440064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk);
1285723ad1d9SAl Viro
128640064aecSDennis Zhou (Facebook) bit_off = off / PCPU_MIN_ALLOC_SIZE;
1287fbf59bc9STejun Heo
128840064aecSDennis Zhou (Facebook) /* find end index */
128940064aecSDennis Zhou (Facebook) end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
129040064aecSDennis Zhou (Facebook) bit_off + 1);
129140064aecSDennis Zhou (Facebook) bits = end - bit_off;
129240064aecSDennis Zhou (Facebook) bitmap_clear(chunk->alloc_map, bit_off, bits);
12933d331ad7SAl Viro
12945b32af91SRoman Gushchin freed = bits * PCPU_MIN_ALLOC_SIZE;
12955b32af91SRoman Gushchin
129640064aecSDennis Zhou (Facebook) /* update metadata */
12975b32af91SRoman Gushchin chunk->free_bytes += freed;
1298fbf59bc9STejun Heo
129986b442fbSDennis Zhou (Facebook) /* update first free bit */
130092c14cabSDennis Zhou chunk_md->first_free = min(chunk_md->first_free, bit_off);
130186b442fbSDennis Zhou (Facebook)
1302ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_free(chunk, bit_off, bits);
1303b539b87fSTejun Heo
1304fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot);
13055b32af91SRoman Gushchin
13065b32af91SRoman Gushchin return freed;
1307fbf59bc9STejun Heo }
1308fbf59bc9STejun Heo
pcpu_init_md_block(struct pcpu_block_md * block,int nr_bits)1309047924c9SDennis Zhou static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1310047924c9SDennis Zhou {
1311047924c9SDennis Zhou block->scan_hint = 0;
1312047924c9SDennis Zhou block->contig_hint = nr_bits;
1313047924c9SDennis Zhou block->left_free = nr_bits;
1314047924c9SDennis Zhou block->right_free = nr_bits;
1315047924c9SDennis Zhou block->first_free = 0;
1316047924c9SDennis Zhou block->nr_bits = nr_bits;
1317047924c9SDennis Zhou }
1318047924c9SDennis Zhou
pcpu_init_md_blocks(struct pcpu_chunk * chunk)1319ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1320ca460b3cSDennis Zhou (Facebook) {
1321ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *md_block;
1322ca460b3cSDennis Zhou (Facebook)
132392c14cabSDennis Zhou /* init the chunk's block */
132492c14cabSDennis Zhou pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
132592c14cabSDennis Zhou
1326ca460b3cSDennis Zhou (Facebook) for (md_block = chunk->md_blocks;
1327ca460b3cSDennis Zhou (Facebook) md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1328047924c9SDennis Zhou md_block++)
1329047924c9SDennis Zhou pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1330ca460b3cSDennis Zhou (Facebook) }
1331ca460b3cSDennis Zhou (Facebook)
133240064aecSDennis Zhou (Facebook) /**
133340064aecSDennis Zhou (Facebook) * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
133440064aecSDennis Zhou (Facebook) * @tmp_addr: the start of the region served
133540064aecSDennis Zhou (Facebook) * @map_size: size of the region served
133640064aecSDennis Zhou (Facebook) *
133740064aecSDennis Zhou (Facebook) * This is responsible for creating the chunks that serve the first chunk. The
133840064aecSDennis Zhou (Facebook) * base_addr is page aligned down of @tmp_addr while the region end is page
133940064aecSDennis Zhou (Facebook) * aligned up. Offsets are kept track of to determine the region served. All
134040064aecSDennis Zhou (Facebook) * this is done to appease the bitmap allocator in avoiding partial blocks.
134140064aecSDennis Zhou (Facebook) *
134240064aecSDennis Zhou (Facebook) * RETURNS:
134340064aecSDennis Zhou (Facebook) * Chunk serving the region at @tmp_addr of @map_size.
134440064aecSDennis Zhou (Facebook) */
pcpu_alloc_first_chunk(unsigned long tmp_addr,int map_size)1345c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
134640064aecSDennis Zhou (Facebook) int map_size)
134710edf5b0SDennis Zhou (Facebook) {
134810edf5b0SDennis Zhou (Facebook) struct pcpu_chunk *chunk;
13493289e053SBaoquan He unsigned long aligned_addr;
135040064aecSDennis Zhou (Facebook) int start_offset, offset_bits, region_size, region_bits;
1351f655f405SMike Rapoport size_t alloc_size;
1352c0ebfdc3SDennis Zhou (Facebook)
1353c0ebfdc3SDennis Zhou (Facebook) /* region calculations */
1354c0ebfdc3SDennis Zhou (Facebook) aligned_addr = tmp_addr & PAGE_MASK;
1355c0ebfdc3SDennis Zhou (Facebook)
1356c0ebfdc3SDennis Zhou (Facebook) start_offset = tmp_addr - aligned_addr;
13573289e053SBaoquan He region_size = ALIGN(start_offset + map_size, PAGE_SIZE);
135810edf5b0SDennis Zhou (Facebook)
1359c0ebfdc3SDennis Zhou (Facebook) /* allocate chunk */
136061cf93d3SDennis Zhou alloc_size = struct_size(chunk, populated,
136161cf93d3SDennis Zhou BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1362f655f405SMike Rapoport chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1363f655f405SMike Rapoport if (!chunk)
1364f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
1365f655f405SMike Rapoport alloc_size);
1366c0ebfdc3SDennis Zhou (Facebook)
136710edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->list);
1368c0ebfdc3SDennis Zhou (Facebook)
1369c0ebfdc3SDennis Zhou (Facebook) chunk->base_addr = (void *)aligned_addr;
137010edf5b0SDennis Zhou (Facebook) chunk->start_offset = start_offset;
13716b9d7c8eSDennis Zhou (Facebook) chunk->end_offset = region_size - chunk->start_offset - map_size;
1372c0ebfdc3SDennis Zhou (Facebook)
13738ab16c43SDennis Zhou (Facebook) chunk->nr_pages = region_size >> PAGE_SHIFT;
137440064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk);
1375c0ebfdc3SDennis Zhou (Facebook)
1376f655f405SMike Rapoport alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1377f655f405SMike Rapoport chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1378f655f405SMike Rapoport if (!chunk->alloc_map)
1379f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
1380f655f405SMike Rapoport alloc_size);
1381f655f405SMike Rapoport
1382f655f405SMike Rapoport alloc_size =
1383f655f405SMike Rapoport BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1384f655f405SMike Rapoport chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1385f655f405SMike Rapoport if (!chunk->bound_map)
1386f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
1387f655f405SMike Rapoport alloc_size);
1388f655f405SMike Rapoport
1389f655f405SMike Rapoport alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1390f655f405SMike Rapoport chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1391f655f405SMike Rapoport if (!chunk->md_blocks)
1392f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
1393f655f405SMike Rapoport alloc_size);
1394f655f405SMike Rapoport
13953c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
1396faf65ddeSRoman Gushchin /* first chunk is free to use */
13973c7be18aSRoman Gushchin chunk->obj_cgroups = NULL;
13983c7be18aSRoman Gushchin #endif
1399ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk);
140010edf5b0SDennis Zhou (Facebook)
140110edf5b0SDennis Zhou (Facebook) /* manage populated page bitmap */
140210edf5b0SDennis Zhou (Facebook) chunk->immutable = true;
14038ab16c43SDennis Zhou (Facebook) bitmap_fill(chunk->populated, chunk->nr_pages);
14048ab16c43SDennis Zhou (Facebook) chunk->nr_populated = chunk->nr_pages;
1405b239f7daSDennis Zhou chunk->nr_empty_pop_pages = chunk->nr_pages;
140610edf5b0SDennis Zhou (Facebook)
140740064aecSDennis Zhou (Facebook) chunk->free_bytes = map_size;
1408c0ebfdc3SDennis Zhou (Facebook)
1409c0ebfdc3SDennis Zhou (Facebook) if (chunk->start_offset) {
1410c0ebfdc3SDennis Zhou (Facebook) /* hide the beginning of the bitmap */
141140064aecSDennis Zhou (Facebook) offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
141240064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 0, offset_bits);
141340064aecSDennis Zhou (Facebook) set_bit(0, chunk->bound_map);
141440064aecSDennis Zhou (Facebook) set_bit(offset_bits, chunk->bound_map);
1415ca460b3cSDennis Zhou (Facebook)
141692c14cabSDennis Zhou chunk->chunk_md.first_free = offset_bits;
141786b442fbSDennis Zhou (Facebook)
1418ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1419c0ebfdc3SDennis Zhou (Facebook) }
1420c0ebfdc3SDennis Zhou (Facebook)
14216b9d7c8eSDennis Zhou (Facebook) if (chunk->end_offset) {
14226b9d7c8eSDennis Zhou (Facebook) /* hide the end of the bitmap */
142340064aecSDennis Zhou (Facebook) offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
142440064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map,
142540064aecSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk) - offset_bits,
142640064aecSDennis Zhou (Facebook) offset_bits);
142740064aecSDennis Zhou (Facebook) set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
142840064aecSDennis Zhou (Facebook) chunk->bound_map);
142940064aecSDennis Zhou (Facebook) set_bit(region_bits, chunk->bound_map);
14306b9d7c8eSDennis Zhou (Facebook)
1431ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1432ca460b3cSDennis Zhou (Facebook) - offset_bits, offset_bits);
1433ca460b3cSDennis Zhou (Facebook) }
143440064aecSDennis Zhou (Facebook)
143510edf5b0SDennis Zhou (Facebook) return chunk;
143610edf5b0SDennis Zhou (Facebook) }
143710edf5b0SDennis Zhou (Facebook)
pcpu_alloc_chunk(gfp_t gfp)1438faf65ddeSRoman Gushchin static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
14396081089fSTejun Heo {
14406081089fSTejun Heo struct pcpu_chunk *chunk;
144140064aecSDennis Zhou (Facebook) int region_bits;
14426081089fSTejun Heo
144347504ee0SDennis Zhou chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
14446081089fSTejun Heo if (!chunk)
14456081089fSTejun Heo return NULL;
14466081089fSTejun Heo
14476081089fSTejun Heo INIT_LIST_HEAD(&chunk->list);
1448c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages;
144940064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk);
145040064aecSDennis Zhou (Facebook)
145140064aecSDennis Zhou (Facebook) chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
145247504ee0SDennis Zhou sizeof(chunk->alloc_map[0]), gfp);
145340064aecSDennis Zhou (Facebook) if (!chunk->alloc_map)
145440064aecSDennis Zhou (Facebook) goto alloc_map_fail;
145540064aecSDennis Zhou (Facebook)
145640064aecSDennis Zhou (Facebook) chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
145747504ee0SDennis Zhou sizeof(chunk->bound_map[0]), gfp);
145840064aecSDennis Zhou (Facebook) if (!chunk->bound_map)
145940064aecSDennis Zhou (Facebook) goto bound_map_fail;
146040064aecSDennis Zhou (Facebook)
1461ca460b3cSDennis Zhou (Facebook) chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
146247504ee0SDennis Zhou sizeof(chunk->md_blocks[0]), gfp);
1463ca460b3cSDennis Zhou (Facebook) if (!chunk->md_blocks)
1464ca460b3cSDennis Zhou (Facebook) goto md_blocks_fail;
1465ca460b3cSDennis Zhou (Facebook)
14663c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
1467faf65ddeSRoman Gushchin if (!mem_cgroup_kmem_disabled()) {
14683c7be18aSRoman Gushchin chunk->obj_cgroups =
14693c7be18aSRoman Gushchin pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
14703c7be18aSRoman Gushchin sizeof(struct obj_cgroup *), gfp);
14713c7be18aSRoman Gushchin if (!chunk->obj_cgroups)
14723c7be18aSRoman Gushchin goto objcg_fail;
14733c7be18aSRoman Gushchin }
14743c7be18aSRoman Gushchin #endif
14753c7be18aSRoman Gushchin
1476ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk);
1477ca460b3cSDennis Zhou (Facebook)
147840064aecSDennis Zhou (Facebook) /* init metadata */
147940064aecSDennis Zhou (Facebook) chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1480c0ebfdc3SDennis Zhou (Facebook)
14816081089fSTejun Heo return chunk;
148240064aecSDennis Zhou (Facebook)
14833c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
14843c7be18aSRoman Gushchin objcg_fail:
14853c7be18aSRoman Gushchin pcpu_mem_free(chunk->md_blocks);
14863c7be18aSRoman Gushchin #endif
1487ca460b3cSDennis Zhou (Facebook) md_blocks_fail:
1488ca460b3cSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map);
148940064aecSDennis Zhou (Facebook) bound_map_fail:
149040064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map);
149140064aecSDennis Zhou (Facebook) alloc_map_fail:
149240064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk);
149340064aecSDennis Zhou (Facebook)
149440064aecSDennis Zhou (Facebook) return NULL;
14956081089fSTejun Heo }
14966081089fSTejun Heo
pcpu_free_chunk(struct pcpu_chunk * chunk)14976081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk)
14986081089fSTejun Heo {
14996081089fSTejun Heo if (!chunk)
15006081089fSTejun Heo return;
15013c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
15023c7be18aSRoman Gushchin pcpu_mem_free(chunk->obj_cgroups);
15033c7be18aSRoman Gushchin #endif
15046685b357SMike Rapoport pcpu_mem_free(chunk->md_blocks);
150540064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map);
150640064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map);
15071d5cfdb0STetsuo Handa pcpu_mem_free(chunk);
15086081089fSTejun Heo }
15096081089fSTejun Heo
1510b539b87fSTejun Heo /**
1511b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping
1512b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated
1513b539b87fSTejun Heo * @page_start: the start page
1514b539b87fSTejun Heo * @page_end: the end page
1515b539b87fSTejun Heo *
1516b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1517b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each
1518b539b87fSTejun Heo * successful population.
1519b539b87fSTejun Heo */
pcpu_chunk_populated(struct pcpu_chunk * chunk,int page_start,int page_end)152040064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1521b239f7daSDennis Zhou int page_end)
1522b539b87fSTejun Heo {
1523b539b87fSTejun Heo int nr = page_end - page_start;
1524b539b87fSTejun Heo
1525b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock);
1526b539b87fSTejun Heo
1527b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr);
1528b539b87fSTejun Heo chunk->nr_populated += nr;
15297e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += nr;
153040064aecSDennis Zhou (Facebook)
1531b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr);
153240064aecSDennis Zhou (Facebook) }
1533b539b87fSTejun Heo
1534b539b87fSTejun Heo /**
1535b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping
1536b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated
1537b539b87fSTejun Heo * @page_start: the start page
1538b539b87fSTejun Heo * @page_end: the end page
1539b539b87fSTejun Heo *
1540b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1541b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after
1542b539b87fSTejun Heo * each successful depopulation.
1543b539b87fSTejun Heo */
pcpu_chunk_depopulated(struct pcpu_chunk * chunk,int page_start,int page_end)1544b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1545b539b87fSTejun Heo int page_start, int page_end)
1546b539b87fSTejun Heo {
1547b539b87fSTejun Heo int nr = page_end - page_start;
1548b539b87fSTejun Heo
1549b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock);
1550b539b87fSTejun Heo
1551b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr);
1552b539b87fSTejun Heo chunk->nr_populated -= nr;
15537e8a6304SDennis Zhou (Facebook) pcpu_nr_populated -= nr;
1554b239f7daSDennis Zhou
1555b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr);
1556b539b87fSTejun Heo }
1557b539b87fSTejun Heo
1558fbf59bc9STejun Heo /*
15599f645532STejun Heo * Chunk management implementation.
1560fbf59bc9STejun Heo *
15619f645532STejun Heo * To allow different implementations, chunk alloc/free and
15629f645532STejun Heo * [de]population are implemented in a separate file which is pulled
15639f645532STejun Heo * into this file and compiled together. The following functions
15649f645532STejun Heo * should be implemented.
1565ccea34b5STejun Heo *
15669f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk
15679f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk
156893274f1dSDennis Zhou * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
15699f645532STejun Heo * pcpu_create_chunk - create a new chunk
15709f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
15719f645532STejun Heo * pcpu_addr_to_page - translate address to physical address
15729f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1573fbf59bc9STejun Heo */
157415d9f3d1SDennis Zhou static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
157547504ee0SDennis Zhou int page_start, int page_end, gfp_t gfp);
157615d9f3d1SDennis Zhou static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
157715d9f3d1SDennis Zhou int page_start, int page_end);
157893274f1dSDennis Zhou static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
157993274f1dSDennis Zhou int page_start, int page_end);
1580faf65ddeSRoman Gushchin static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
15819f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
15829f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr);
15839f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1584fbf59bc9STejun Heo
1585b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM
1586b0c9778bSTejun Heo #include "percpu-km.c"
1587b0c9778bSTejun Heo #else
15889f645532STejun Heo #include "percpu-vm.c"
1589b0c9778bSTejun Heo #endif
1590fbf59bc9STejun Heo
1591fbf59bc9STejun Heo /**
159288999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address
159388999a89STejun Heo * @addr: address for which the chunk needs to be determined.
159488999a89STejun Heo *
1595c0ebfdc3SDennis Zhou (Facebook) * This is an internal function that handles all but static allocations.
1596c0ebfdc3SDennis Zhou (Facebook) * Static percpu address values should never be passed into the allocator.
1597c0ebfdc3SDennis Zhou (Facebook) *
159888999a89STejun Heo * RETURNS:
159988999a89STejun Heo * The address of the found chunk.
160088999a89STejun Heo */
pcpu_chunk_addr_search(void * addr)160188999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
160288999a89STejun Heo {
1603c0ebfdc3SDennis Zhou (Facebook) /* is it in the dynamic region (first chunk)? */
1604560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1605c0ebfdc3SDennis Zhou (Facebook) return pcpu_first_chunk;
1606c0ebfdc3SDennis Zhou (Facebook)
1607c0ebfdc3SDennis Zhou (Facebook) /* is it in the reserved region? */
1608560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
160988999a89STejun Heo return pcpu_reserved_chunk;
161088999a89STejun Heo
161188999a89STejun Heo /*
161288999a89STejun Heo * The address is relative to unit0 which might be unused and
161388999a89STejun Heo * thus unmapped. Offset the address to the unit space of the
161488999a89STejun Heo * current processor before looking it up in the vmalloc
161588999a89STejun Heo * space. Note that any possible cpu id can be used here, so
161688999a89STejun Heo * there's no need to worry about preemption or cpu hotplug.
161788999a89STejun Heo */
161888999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()];
16199f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
162088999a89STejun Heo }
162188999a89STejun Heo
16223c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
pcpu_memcg_pre_alloc_hook(size_t size,gfp_t gfp,struct obj_cgroup ** objcgp)1623faf65ddeSRoman Gushchin static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
16243c7be18aSRoman Gushchin struct obj_cgroup **objcgp)
16253c7be18aSRoman Gushchin {
16263c7be18aSRoman Gushchin struct obj_cgroup *objcg;
16273c7be18aSRoman Gushchin
1628f7a449f7SRoman Gushchin if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT))
1629faf65ddeSRoman Gushchin return true;
16303c7be18aSRoman Gushchin
1631c63b835dSRoman Gushchin objcg = current_obj_cgroup();
16323c7be18aSRoman Gushchin if (!objcg)
1633faf65ddeSRoman Gushchin return true;
16343c7be18aSRoman Gushchin
1635c63b835dSRoman Gushchin if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size)))
1636faf65ddeSRoman Gushchin return false;
16373c7be18aSRoman Gushchin
16383c7be18aSRoman Gushchin *objcgp = objcg;
1639faf65ddeSRoman Gushchin return true;
16403c7be18aSRoman Gushchin }
16413c7be18aSRoman Gushchin
pcpu_memcg_post_alloc_hook(struct obj_cgroup * objcg,struct pcpu_chunk * chunk,int off,size_t size)16423c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
16433c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off,
16443c7be18aSRoman Gushchin size_t size)
16453c7be18aSRoman Gushchin {
16463c7be18aSRoman Gushchin if (!objcg)
16473c7be18aSRoman Gushchin return;
16483c7be18aSRoman Gushchin
1649faf65ddeSRoman Gushchin if (likely(chunk && chunk->obj_cgroups)) {
1650c63b835dSRoman Gushchin obj_cgroup_get(objcg);
16513c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1652772616b0SRoman Gushchin
1653772616b0SRoman Gushchin rcu_read_lock();
1654772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
16558c57c077SQi Zheng pcpu_obj_full_size(size));
1656772616b0SRoman Gushchin rcu_read_unlock();
16573c7be18aSRoman Gushchin } else {
16588c57c077SQi Zheng obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
16593c7be18aSRoman Gushchin }
16603c7be18aSRoman Gushchin }
16613c7be18aSRoman Gushchin
pcpu_memcg_free_hook(struct pcpu_chunk * chunk,int off,size_t size)16623c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
16633c7be18aSRoman Gushchin {
16643c7be18aSRoman Gushchin struct obj_cgroup *objcg;
16653c7be18aSRoman Gushchin
1666faf65ddeSRoman Gushchin if (unlikely(!chunk->obj_cgroups))
16673c7be18aSRoman Gushchin return;
16683c7be18aSRoman Gushchin
16693c7be18aSRoman Gushchin objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1670faf65ddeSRoman Gushchin if (!objcg)
1671faf65ddeSRoman Gushchin return;
16723c7be18aSRoman Gushchin chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
16733c7be18aSRoman Gushchin
16748c57c077SQi Zheng obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
16753c7be18aSRoman Gushchin
1676772616b0SRoman Gushchin rcu_read_lock();
1677772616b0SRoman Gushchin mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
16788c57c077SQi Zheng -pcpu_obj_full_size(size));
1679772616b0SRoman Gushchin rcu_read_unlock();
1680772616b0SRoman Gushchin
16813c7be18aSRoman Gushchin obj_cgroup_put(objcg);
16823c7be18aSRoman Gushchin }
16833c7be18aSRoman Gushchin
16843c7be18aSRoman Gushchin #else /* CONFIG_MEMCG_KMEM */
1685faf65ddeSRoman Gushchin static bool
pcpu_memcg_pre_alloc_hook(size_t size,gfp_t gfp,struct obj_cgroup ** objcgp)16863c7be18aSRoman Gushchin pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
16873c7be18aSRoman Gushchin {
1688faf65ddeSRoman Gushchin return true;
16893c7be18aSRoman Gushchin }
16903c7be18aSRoman Gushchin
pcpu_memcg_post_alloc_hook(struct obj_cgroup * objcg,struct pcpu_chunk * chunk,int off,size_t size)16913c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
16923c7be18aSRoman Gushchin struct pcpu_chunk *chunk, int off,
16933c7be18aSRoman Gushchin size_t size)
16943c7be18aSRoman Gushchin {
16953c7be18aSRoman Gushchin }
16963c7be18aSRoman Gushchin
pcpu_memcg_free_hook(struct pcpu_chunk * chunk,int off,size_t size)16973c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
16983c7be18aSRoman Gushchin {
16993c7be18aSRoman Gushchin }
17003c7be18aSRoman Gushchin #endif /* CONFIG_MEMCG_KMEM */
17013c7be18aSRoman Gushchin
170288999a89STejun Heo /**
1703edcb4639STejun Heo * pcpu_alloc - the percpu allocator
1704cae3aeb8STejun Heo * @size: size of area to allocate in bytes
1705fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE)
1706edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available
17075835d96eSTejun Heo * @gfp: allocation flags
1708fbf59bc9STejun Heo *
17095835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
17100ea7eeecSDaniel Borkmann * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
17110ea7eeecSDaniel Borkmann * then no warning will be triggered on invalid or failed allocation
17120ea7eeecSDaniel Borkmann * requests.
1713fbf59bc9STejun Heo *
1714fbf59bc9STejun Heo * RETURNS:
1715fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure.
1716fbf59bc9STejun Heo */
pcpu_alloc(size_t size,size_t align,bool reserved,gfp_t gfp)17175835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
17185835d96eSTejun Heo gfp_t gfp)
1719fbf59bc9STejun Heo {
172028307d93SFilipe Manana gfp_t pcpu_gfp;
172128307d93SFilipe Manana bool is_atomic;
172228307d93SFilipe Manana bool do_warn;
17233c7be18aSRoman Gushchin struct obj_cgroup *objcg = NULL;
1724f2badb0cSTejun Heo static int warn_limit = 10;
17258744d859SDennis Zhou struct pcpu_chunk *chunk, *next;
1726f2badb0cSTejun Heo const char *err;
172740064aecSDennis Zhou (Facebook) int slot, off, cpu, ret;
1728403a91b1SJiri Kosina unsigned long flags;
1729f528f0b8SCatalin Marinas void __percpu *ptr;
173040064aecSDennis Zhou (Facebook) size_t bits, bit_align;
1731fbf59bc9STejun Heo
173228307d93SFilipe Manana gfp = current_gfp_context(gfp);
173328307d93SFilipe Manana /* whitelisted flags that can be passed to the backing allocators */
173428307d93SFilipe Manana pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
173528307d93SFilipe Manana is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
173628307d93SFilipe Manana do_warn = !(gfp & __GFP_NOWARN);
173728307d93SFilipe Manana
1738723ad1d9SAl Viro /*
173940064aecSDennis Zhou (Facebook) * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
174040064aecSDennis Zhou (Facebook) * therefore alignment must be a minimum of that many bytes.
174140064aecSDennis Zhou (Facebook) * An allocation may have internal fragmentation from rounding up
174240064aecSDennis Zhou (Facebook) * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1743723ad1d9SAl Viro */
1744d2f3c384SDennis Zhou (Facebook) if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1745d2f3c384SDennis Zhou (Facebook) align = PCPU_MIN_ALLOC_SIZE;
1746723ad1d9SAl Viro
1747d2f3c384SDennis Zhou (Facebook) size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
174840064aecSDennis Zhou (Facebook) bits = size >> PCPU_MIN_ALLOC_SHIFT;
174940064aecSDennis Zhou (Facebook) bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
17502f69fa82SViro
17513ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
17523ca45a46Szijun_hu !is_power_of_2(align))) {
17530ea7eeecSDaniel Borkmann WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1754756a025fSJoe Perches size, align);
1755fbf59bc9STejun Heo return NULL;
1756fbf59bc9STejun Heo }
1757fbf59bc9STejun Heo
1758faf65ddeSRoman Gushchin if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
17593c7be18aSRoman Gushchin return NULL;
17603c7be18aSRoman Gushchin
1761f52ba1feSKirill Tkhai if (!is_atomic) {
1762f52ba1feSKirill Tkhai /*
1763f52ba1feSKirill Tkhai * pcpu_balance_workfn() allocates memory under this mutex,
1764f52ba1feSKirill Tkhai * and it may wait for memory reclaim. Allow current task
1765f52ba1feSKirill Tkhai * to become OOM victim, in case of memory pressure.
1766f52ba1feSKirill Tkhai */
17673c7be18aSRoman Gushchin if (gfp & __GFP_NOFAIL) {
17686710e594STejun Heo mutex_lock(&pcpu_alloc_mutex);
17693c7be18aSRoman Gushchin } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
17703c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1771f52ba1feSKirill Tkhai return NULL;
1772f52ba1feSKirill Tkhai }
17733c7be18aSRoman Gushchin }
17746710e594STejun Heo
1775403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags);
1776fbf59bc9STejun Heo
1777edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */
1778edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) {
1779edcb4639STejun Heo chunk = pcpu_reserved_chunk;
1780833af842STejun Heo
178140064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
178240064aecSDennis Zhou (Facebook) if (off < 0) {
1783833af842STejun Heo err = "alloc from reserved chunk failed";
1784ccea34b5STejun Heo goto fail_unlock;
1785f2badb0cSTejun Heo }
1786833af842STejun Heo
178740064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off);
1788edcb4639STejun Heo if (off >= 0)
1789edcb4639STejun Heo goto area_found;
1790833af842STejun Heo
1791f2badb0cSTejun Heo err = "alloc from reserved chunk failed";
1792ccea34b5STejun Heo goto fail_unlock;
1793edcb4639STejun Heo }
1794edcb4639STejun Heo
1795ccea34b5STejun Heo restart:
1796edcb4639STejun Heo /* search through normal chunks */
1797f1833241SRoman Gushchin for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
1798faf65ddeSRoman Gushchin list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1799faf65ddeSRoman Gushchin list) {
180040064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align,
180140064aecSDennis Zhou (Facebook) is_atomic);
18028744d859SDennis Zhou if (off < 0) {
18038744d859SDennis Zhou if (slot < PCPU_SLOT_FAIL_THRESHOLD)
18048744d859SDennis Zhou pcpu_chunk_move(chunk, 0);
1805fbf59bc9STejun Heo continue;
18068744d859SDennis Zhou }
1807ccea34b5STejun Heo
180840064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off);
1809f1833241SRoman Gushchin if (off >= 0) {
1810f1833241SRoman Gushchin pcpu_reintegrate_chunk(chunk);
1811fbf59bc9STejun Heo goto area_found;
1812f1833241SRoman Gushchin }
1813fbf59bc9STejun Heo }
1814fbf59bc9STejun Heo }
1815fbf59bc9STejun Heo
1816403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags);
1817ccea34b5STejun Heo
181811df02bfSDennis Zhou if (is_atomic) {
181911df02bfSDennis Zhou err = "atomic alloc failed, no space left";
18205835d96eSTejun Heo goto fail;
182111df02bfSDennis Zhou }
18225835d96eSTejun Heo
1823e04cb697SBaoquan He /* No space left. Create a new chunk. */
1824faf65ddeSRoman Gushchin if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1825faf65ddeSRoman Gushchin chunk = pcpu_create_chunk(pcpu_gfp);
1826f2badb0cSTejun Heo if (!chunk) {
1827f2badb0cSTejun Heo err = "failed to allocate new chunk";
1828b38d08f3STejun Heo goto fail;
1829f2badb0cSTejun Heo }
1830ccea34b5STejun Heo
1831403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags);
1832fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1);
1833b38d08f3STejun Heo } else {
1834b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags);
1835b38d08f3STejun Heo }
1836b38d08f3STejun Heo
1837ccea34b5STejun Heo goto restart;
1838fbf59bc9STejun Heo
1839fbf59bc9STejun Heo area_found:
184030a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size);
1841403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags);
1842ccea34b5STejun Heo
1843dca49645STejun Heo /* populate if not all pages are already there */
18445835d96eSTejun Heo if (!is_atomic) {
1845ec288a2cSYury Norov unsigned int page_end, rs, re;
1846e04d3208STejun Heo
1847ec288a2cSYury Norov rs = PFN_DOWN(off);
1848dca49645STejun Heo page_end = PFN_UP(off + size);
1849dca49645STejun Heo
1850ec288a2cSYury Norov for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
1851dca49645STejun Heo WARN_ON(chunk->immutable);
1852dca49645STejun Heo
1853554fef1cSDennis Zhou ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1854b38d08f3STejun Heo
1855403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags);
1856b38d08f3STejun Heo if (ret) {
185740064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off);
1858f2badb0cSTejun Heo err = "failed to populate";
1859ccea34b5STejun Heo goto fail_unlock;
1860fbf59bc9STejun Heo }
1861b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, re);
1862b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags);
1863dca49645STejun Heo }
1864dca49645STejun Heo
1865ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex);
1866e04d3208STejun Heo }
1867ccea34b5STejun Heo
1868faf65ddeSRoman Gushchin if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
18691a4d7607STejun Heo pcpu_schedule_balance_work();
18701a4d7607STejun Heo
1871dca49645STejun Heo /* clear the areas and return address relative to base address */
1872dca49645STejun Heo for_each_possible_cpu(cpu)
1873dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1874dca49645STejun Heo
1875f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
18768a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp);
1877df95e795SDennis Zhou
1878f67bed13SVasily Averin trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
1879f67bed13SVasily Averin chunk->base_addr, off, ptr,
1880f67bed13SVasily Averin pcpu_obj_full_size(size), gfp);
1881df95e795SDennis Zhou
18823c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
18833c7be18aSRoman Gushchin
1884f528f0b8SCatalin Marinas return ptr;
1885ccea34b5STejun Heo
1886ccea34b5STejun Heo fail_unlock:
1887403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags);
1888b38d08f3STejun Heo fail:
1889df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1890df95e795SDennis Zhou
1891f7d77dfcSBaoquan He if (do_warn && warn_limit) {
1892870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
18935835d96eSTejun Heo size, align, is_atomic, err);
1894f7d77dfcSBaoquan He if (!is_atomic)
1895f2badb0cSTejun Heo dump_stack();
1896f2badb0cSTejun Heo if (!--warn_limit)
1897870d4b12SJoe Perches pr_info("limit reached, disable warning\n");
1898f2badb0cSTejun Heo }
1899f7d77dfcSBaoquan He
19001a4d7607STejun Heo if (is_atomic) {
1901f0953a1bSIngo Molnar /* see the flag handling in pcpu_balance_workfn() */
19021a4d7607STejun Heo pcpu_atomic_alloc_failed = true;
19031a4d7607STejun Heo pcpu_schedule_balance_work();
19046710e594STejun Heo } else {
19056710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex);
19061a4d7607STejun Heo }
19073c7be18aSRoman Gushchin
19083c7be18aSRoman Gushchin pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
19093c7be18aSRoman Gushchin
1910ccea34b5STejun Heo return NULL;
1911fbf59bc9STejun Heo }
1912edcb4639STejun Heo
1913edcb4639STejun Heo /**
19145835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area
1915edcb4639STejun Heo * @size: size of area to allocate in bytes
1916edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE)
19175835d96eSTejun Heo * @gfp: allocation flags
1918edcb4639STejun Heo *
19195835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If
19205835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
19210ea7eeecSDaniel Borkmann * be called from any context but is a lot more likely to fail. If @gfp
19220ea7eeecSDaniel Borkmann * has __GFP_NOWARN then no warning will be triggered on invalid or failed
19230ea7eeecSDaniel Borkmann * allocation requests.
1924ccea34b5STejun Heo *
1925edcb4639STejun Heo * RETURNS:
1926edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure.
1927edcb4639STejun Heo */
__alloc_percpu_gfp(size_t size,size_t align,gfp_t gfp)19285835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
19295835d96eSTejun Heo {
19305835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp);
19315835d96eSTejun Heo }
19325835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
19335835d96eSTejun Heo
19345835d96eSTejun Heo /**
19355835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area
19365835d96eSTejun Heo * @size: size of area to allocate in bytes
19375835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE)
19385835d96eSTejun Heo *
19395835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
19405835d96eSTejun Heo */
__alloc_percpu(size_t size,size_t align)194143cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align)
1942edcb4639STejun Heo {
19435835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL);
1944edcb4639STejun Heo }
1945fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu);
1946fbf59bc9STejun Heo
1947edcb4639STejun Heo /**
1948edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area
1949edcb4639STejun Heo * @size: size of area to allocate in bytes
1950edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE)
1951edcb4639STejun Heo *
19529329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align
19539329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise,
19549329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep.
19559329ba97STejun Heo * Might trigger writeouts.
1956edcb4639STejun Heo *
1957ccea34b5STejun Heo * CONTEXT:
1958ccea34b5STejun Heo * Does GFP_KERNEL allocation.
1959ccea34b5STejun Heo *
1960edcb4639STejun Heo * RETURNS:
1961edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure.
1962edcb4639STejun Heo */
__alloc_reserved_percpu(size_t size,size_t align)196343cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1964edcb4639STejun Heo {
19655835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL);
1966edcb4639STejun Heo }
1967edcb4639STejun Heo
1968a56dbddfSTejun Heo /**
196967c2669dSRoman Gushchin * pcpu_balance_free - manage the amount of free chunks
1970f1833241SRoman Gushchin * @empty_only: free chunks only if there are no populated pages
1971a56dbddfSTejun Heo *
1972f1833241SRoman Gushchin * If empty_only is %false, reclaim all fully free chunks regardless of the
1973f1833241SRoman Gushchin * number of populated pages. Otherwise, only reclaim chunks that have no
1974f1833241SRoman Gushchin * populated pages.
1975e4d77700SRoman Gushchin *
1976e4d77700SRoman Gushchin * CONTEXT:
1977e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily)
1978a56dbddfSTejun Heo */
pcpu_balance_free(bool empty_only)1979faf65ddeSRoman Gushchin static void pcpu_balance_free(bool empty_only)
1980fbf59bc9STejun Heo {
1981fe6bd8c3STejun Heo LIST_HEAD(to_free);
1982faf65ddeSRoman Gushchin struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
1983a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next;
1984a56dbddfSTejun Heo
1985e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock);
1986a56dbddfSTejun Heo
19871a4d7607STejun Heo /*
19881a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM
19891a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one.
19901a4d7607STejun Heo */
1991fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) {
19928d408b4bSTejun Heo WARN_ON(chunk->immutable);
1993a56dbddfSTejun Heo
1994a56dbddfSTejun Heo /* spare the first one */
1995fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1996a56dbddfSTejun Heo continue;
1997a56dbddfSTejun Heo
1998f1833241SRoman Gushchin if (!empty_only || chunk->nr_empty_pop_pages == 0)
1999fe6bd8c3STejun Heo list_move(&chunk->list, &to_free);
2000a56dbddfSTejun Heo }
2001a56dbddfSTejun Heo
2002e4d77700SRoman Gushchin if (list_empty(&to_free))
2003e4d77700SRoman Gushchin return;
2004a56dbddfSTejun Heo
2005e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock);
2006fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) {
2007e837dfdeSDennis Zhou unsigned int rs, re;
2008dca49645STejun Heo
2009ec288a2cSYury Norov for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
2010a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re);
2011b539b87fSTejun Heo spin_lock_irq(&pcpu_lock);
2012b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re);
2013b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock);
2014a93ace48STejun Heo }
20156081089fSTejun Heo pcpu_destroy_chunk(chunk);
2016accd4f36SEric Dumazet cond_resched();
2017fbf59bc9STejun Heo }
2018e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock);
201967c2669dSRoman Gushchin }
202067c2669dSRoman Gushchin
202167c2669dSRoman Gushchin /**
202267c2669dSRoman Gushchin * pcpu_balance_populated - manage the amount of populated pages
202367c2669dSRoman Gushchin *
202467c2669dSRoman Gushchin * Maintain a certain amount of populated pages to satisfy atomic allocations.
202567c2669dSRoman Gushchin * It is possible that this is called when physical memory is scarce causing
202667c2669dSRoman Gushchin * OOM killer to be triggered. We should avoid doing so until an actual
202767c2669dSRoman Gushchin * allocation causes the failure as it is possible that requests can be
202867c2669dSRoman Gushchin * serviced from already backed regions.
2029e4d77700SRoman Gushchin *
2030e4d77700SRoman Gushchin * CONTEXT:
2031e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily)
203267c2669dSRoman Gushchin */
pcpu_balance_populated(void)2033faf65ddeSRoman Gushchin static void pcpu_balance_populated(void)
203467c2669dSRoman Gushchin {
203567c2669dSRoman Gushchin /* gfp flags passed to underlying allocators */
203667c2669dSRoman Gushchin const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
203767c2669dSRoman Gushchin struct pcpu_chunk *chunk;
203867c2669dSRoman Gushchin int slot, nr_to_pop, ret;
2039971f3918STejun Heo
2040e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock);
2041971f3918STejun Heo
20421a4d7607STejun Heo /*
20431a4d7607STejun Heo * Ensure there are certain number of free populated pages for
20441a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic
20451a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation
20461a4d7607STejun Heo * failed previously, always populate the maximum amount. This
20471a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping
20481a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not
20491a4d7607STejun Heo * something we support properly and can be highly unreliable and
20501a4d7607STejun Heo * inefficient.
20511a4d7607STejun Heo */
20521a4d7607STejun Heo retry_pop:
20531a4d7607STejun Heo if (pcpu_atomic_alloc_failed) {
20541a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
20551a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */
20561a4d7607STejun Heo pcpu_atomic_alloc_failed = false;
20571a4d7607STejun Heo } else {
20581a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2059faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages,
20601a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH);
20611a4d7607STejun Heo }
20621a4d7607STejun Heo
20631c29a3ceSDennis Zhou for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
2064e837dfdeSDennis Zhou unsigned int nr_unpop = 0, rs, re;
20651a4d7607STejun Heo
20661a4d7607STejun Heo if (!nr_to_pop)
20671a4d7607STejun Heo break;
20681a4d7607STejun Heo
2069faf65ddeSRoman Gushchin list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
20708ab16c43SDennis Zhou (Facebook) nr_unpop = chunk->nr_pages - chunk->nr_populated;
20711a4d7607STejun Heo if (nr_unpop)
20721a4d7607STejun Heo break;
20731a4d7607STejun Heo }
20741a4d7607STejun Heo
20751a4d7607STejun Heo if (!nr_unpop)
20761a4d7607STejun Heo continue;
20771a4d7607STejun Heo
20781a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */
2079ec288a2cSYury Norov for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
2080e837dfdeSDennis Zhou int nr = min_t(int, re - rs, nr_to_pop);
20811a4d7607STejun Heo
2082e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock);
208347504ee0SDennis Zhou ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2084e4d77700SRoman Gushchin cond_resched();
2085e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock);
20861a4d7607STejun Heo if (!ret) {
20871a4d7607STejun Heo nr_to_pop -= nr;
2088b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, rs + nr);
20891a4d7607STejun Heo } else {
20901a4d7607STejun Heo nr_to_pop = 0;
20911a4d7607STejun Heo }
20921a4d7607STejun Heo
20931a4d7607STejun Heo if (!nr_to_pop)
20941a4d7607STejun Heo break;
20951a4d7607STejun Heo }
20961a4d7607STejun Heo }
20971a4d7607STejun Heo
20981a4d7607STejun Heo if (nr_to_pop) {
20991a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */
21001a4d7607STejun Heo spin_unlock_irq(&pcpu_lock);
2101e4d77700SRoman Gushchin chunk = pcpu_create_chunk(gfp);
2102e4d77700SRoman Gushchin cond_resched();
2103e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock);
2104e4d77700SRoman Gushchin if (chunk) {
2105e4d77700SRoman Gushchin pcpu_chunk_relocate(chunk, -1);
21061a4d7607STejun Heo goto retry_pop;
21071a4d7607STejun Heo }
21081a4d7607STejun Heo }
2109a56dbddfSTejun Heo }
2110fbf59bc9STejun Heo
2111fbf59bc9STejun Heo /**
2112f1833241SRoman Gushchin * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
2113f1833241SRoman Gushchin *
2114f1833241SRoman Gushchin * Scan over chunks in the depopulate list and try to release unused populated
2115f1833241SRoman Gushchin * pages back to the system. Depopulated chunks are sidelined to prevent
2116f1833241SRoman Gushchin * repopulating these pages unless required. Fully free chunks are reintegrated
2117f1833241SRoman Gushchin * and freed accordingly (1 is kept around). If we drop below the empty
2118f1833241SRoman Gushchin * populated pages threshold, reintegrate the chunk if it has empty free pages.
2119f1833241SRoman Gushchin * Each chunk is scanned in the reverse order to keep populated pages close to
2120f1833241SRoman Gushchin * the beginning of the chunk.
2121e4d77700SRoman Gushchin *
2122e4d77700SRoman Gushchin * CONTEXT:
2123e4d77700SRoman Gushchin * pcpu_lock (can be dropped temporarily)
2124e4d77700SRoman Gushchin *
2125f1833241SRoman Gushchin */
pcpu_reclaim_populated(void)2126faf65ddeSRoman Gushchin static void pcpu_reclaim_populated(void)
2127f1833241SRoman Gushchin {
2128f1833241SRoman Gushchin struct pcpu_chunk *chunk;
2129f1833241SRoman Gushchin struct pcpu_block_md *block;
213093274f1dSDennis Zhou int freed_page_start, freed_page_end;
2131f1833241SRoman Gushchin int i, end;
213293274f1dSDennis Zhou bool reintegrate;
2133f1833241SRoman Gushchin
2134e4d77700SRoman Gushchin lockdep_assert_held(&pcpu_lock);
2135f1833241SRoman Gushchin
2136f1833241SRoman Gushchin /*
2137f1833241SRoman Gushchin * Once a chunk is isolated to the to_depopulate list, the chunk is no
2138f1833241SRoman Gushchin * longer discoverable to allocations whom may populate pages. The only
2139f1833241SRoman Gushchin * other accessor is the free path which only returns area back to the
2140f1833241SRoman Gushchin * allocator not touching the populated bitmap.
2141f1833241SRoman Gushchin */
2142c1f6688dSBaoquan He while ((chunk = list_first_entry_or_null(
2143c1f6688dSBaoquan He &pcpu_chunk_lists[pcpu_to_depopulate_slot],
2144c1f6688dSBaoquan He struct pcpu_chunk, list))) {
2145f1833241SRoman Gushchin WARN_ON(chunk->immutable);
2146f1833241SRoman Gushchin
2147f1833241SRoman Gushchin /*
2148f1833241SRoman Gushchin * Scan chunk's pages in the reverse order to keep populated
2149f1833241SRoman Gushchin * pages close to the beginning of the chunk.
2150f1833241SRoman Gushchin */
215193274f1dSDennis Zhou freed_page_start = chunk->nr_pages;
215293274f1dSDennis Zhou freed_page_end = 0;
215393274f1dSDennis Zhou reintegrate = false;
2154f1833241SRoman Gushchin for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2155f1833241SRoman Gushchin /* no more work to do */
2156f1833241SRoman Gushchin if (chunk->nr_empty_pop_pages == 0)
2157f1833241SRoman Gushchin break;
2158f1833241SRoman Gushchin
2159f1833241SRoman Gushchin /* reintegrate chunk to prevent atomic alloc failures */
2160faf65ddeSRoman Gushchin if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
216193274f1dSDennis Zhou reintegrate = true;
216283d261fcSBaoquan He break;
2163f1833241SRoman Gushchin }
2164f1833241SRoman Gushchin
2165f1833241SRoman Gushchin /*
2166f1833241SRoman Gushchin * If the page is empty and populated, start or
2167f1833241SRoman Gushchin * extend the (i, end) range. If i == 0, decrease
2168f1833241SRoman Gushchin * i and perform the depopulation to cover the last
2169f1833241SRoman Gushchin * (first) page in the chunk.
2170f1833241SRoman Gushchin */
2171f1833241SRoman Gushchin block = chunk->md_blocks + i;
2172f1833241SRoman Gushchin if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2173f1833241SRoman Gushchin test_bit(i, chunk->populated)) {
2174f1833241SRoman Gushchin if (end == -1)
2175f1833241SRoman Gushchin end = i;
2176f1833241SRoman Gushchin if (i > 0)
2177f1833241SRoman Gushchin continue;
2178f1833241SRoman Gushchin i--;
2179f1833241SRoman Gushchin }
2180f1833241SRoman Gushchin
2181f1833241SRoman Gushchin /* depopulate if there is an active range */
2182f1833241SRoman Gushchin if (end == -1)
2183f1833241SRoman Gushchin continue;
2184f1833241SRoman Gushchin
2185f1833241SRoman Gushchin spin_unlock_irq(&pcpu_lock);
2186f1833241SRoman Gushchin pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2187f1833241SRoman Gushchin cond_resched();
2188f1833241SRoman Gushchin spin_lock_irq(&pcpu_lock);
2189f1833241SRoman Gushchin
2190f1833241SRoman Gushchin pcpu_chunk_depopulated(chunk, i + 1, end + 1);
219193274f1dSDennis Zhou freed_page_start = min(freed_page_start, i + 1);
219293274f1dSDennis Zhou freed_page_end = max(freed_page_end, end + 1);
2193f1833241SRoman Gushchin
2194f1833241SRoman Gushchin /* reset the range and continue */
2195f1833241SRoman Gushchin end = -1;
2196f1833241SRoman Gushchin }
2197f1833241SRoman Gushchin
219893274f1dSDennis Zhou /* batch tlb flush per chunk to amortize cost */
219993274f1dSDennis Zhou if (freed_page_start < freed_page_end) {
220093274f1dSDennis Zhou spin_unlock_irq(&pcpu_lock);
220193274f1dSDennis Zhou pcpu_post_unmap_tlb_flush(chunk,
220293274f1dSDennis Zhou freed_page_start,
220393274f1dSDennis Zhou freed_page_end);
220493274f1dSDennis Zhou cond_resched();
220593274f1dSDennis Zhou spin_lock_irq(&pcpu_lock);
220693274f1dSDennis Zhou }
220793274f1dSDennis Zhou
220893274f1dSDennis Zhou if (reintegrate || chunk->free_bytes == pcpu_unit_size)
2209f1833241SRoman Gushchin pcpu_reintegrate_chunk(chunk);
2210f1833241SRoman Gushchin else
221193274f1dSDennis Zhou list_move_tail(&chunk->list,
2212faf65ddeSRoman Gushchin &pcpu_chunk_lists[pcpu_sidelined_slot]);
2213f1833241SRoman Gushchin }
2214fbf59bc9STejun Heo }
2215fbf59bc9STejun Heo
2216fbf59bc9STejun Heo /**
22173c7be18aSRoman Gushchin * pcpu_balance_workfn - manage the amount of free chunks and populated pages
22183c7be18aSRoman Gushchin * @work: unused
22193c7be18aSRoman Gushchin *
2220f1833241SRoman Gushchin * For each chunk type, manage the number of fully free chunks and the number of
2221f1833241SRoman Gushchin * populated pages. An important thing to consider is when pages are freed and
2222f1833241SRoman Gushchin * how they contribute to the global counts.
22233c7be18aSRoman Gushchin */
pcpu_balance_workfn(struct work_struct * work)22243c7be18aSRoman Gushchin static void pcpu_balance_workfn(struct work_struct *work)
22253c7be18aSRoman Gushchin {
2226f1833241SRoman Gushchin /*
2227f1833241SRoman Gushchin * pcpu_balance_free() is called twice because the first time we may
2228f1833241SRoman Gushchin * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2229f1833241SRoman Gushchin * to grow other chunks. This then gives pcpu_reclaim_populated() time
2230f1833241SRoman Gushchin * to move fully free chunks to the active list to be freed if
2231f1833241SRoman Gushchin * appropriate.
2232f1833241SRoman Gushchin */
223367c2669dSRoman Gushchin mutex_lock(&pcpu_alloc_mutex);
2234e4d77700SRoman Gushchin spin_lock_irq(&pcpu_lock);
22353c7be18aSRoman Gushchin
2236faf65ddeSRoman Gushchin pcpu_balance_free(false);
2237faf65ddeSRoman Gushchin pcpu_reclaim_populated();
2238faf65ddeSRoman Gushchin pcpu_balance_populated();
2239faf65ddeSRoman Gushchin pcpu_balance_free(true);
2240e4d77700SRoman Gushchin
2241e4d77700SRoman Gushchin spin_unlock_irq(&pcpu_lock);
224267c2669dSRoman Gushchin mutex_unlock(&pcpu_alloc_mutex);
22433c7be18aSRoman Gushchin }
22443c7be18aSRoman Gushchin
22453c7be18aSRoman Gushchin /**
2246b460bc83SHou Tao * pcpu_alloc_size - the size of the dynamic percpu area
2247b460bc83SHou Tao * @ptr: pointer to the dynamic percpu area
2248b460bc83SHou Tao *
2249b460bc83SHou Tao * Returns the size of the @ptr allocation. This is undefined for statically
2250b460bc83SHou Tao * defined percpu variables as there is no corresponding chunk->bound_map.
2251b460bc83SHou Tao *
2252b460bc83SHou Tao * RETURNS:
2253b460bc83SHou Tao * The size of the dynamic percpu area.
2254b460bc83SHou Tao *
2255b460bc83SHou Tao * CONTEXT:
2256b460bc83SHou Tao * Can be called from atomic context.
2257b460bc83SHou Tao */
pcpu_alloc_size(void __percpu * ptr)2258b460bc83SHou Tao size_t pcpu_alloc_size(void __percpu *ptr)
2259b460bc83SHou Tao {
2260b460bc83SHou Tao struct pcpu_chunk *chunk;
2261b460bc83SHou Tao unsigned long bit_off, end;
2262b460bc83SHou Tao void *addr;
2263b460bc83SHou Tao
2264b460bc83SHou Tao if (!ptr)
2265b460bc83SHou Tao return 0;
2266b460bc83SHou Tao
2267b460bc83SHou Tao addr = __pcpu_ptr_to_addr(ptr);
2268b460bc83SHou Tao /* No pcpu_lock here: ptr has not been freed, so chunk is still alive */
2269b460bc83SHou Tao chunk = pcpu_chunk_addr_search(addr);
2270b460bc83SHou Tao bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE;
2271b460bc83SHou Tao end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
2272b460bc83SHou Tao bit_off + 1);
2273b460bc83SHou Tao return (end - bit_off) * PCPU_MIN_ALLOC_SIZE;
2274b460bc83SHou Tao }
2275b460bc83SHou Tao
2276b460bc83SHou Tao /**
2277fbf59bc9STejun Heo * free_percpu - free percpu area
2278fbf59bc9STejun Heo * @ptr: pointer to area to free
2279fbf59bc9STejun Heo *
2280ccea34b5STejun Heo * Free percpu area @ptr.
2281ccea34b5STejun Heo *
2282ccea34b5STejun Heo * CONTEXT:
2283ccea34b5STejun Heo * Can be called from atomic context.
2284fbf59bc9STejun Heo */
free_percpu(void __percpu * ptr)228543cf38ebSTejun Heo void free_percpu(void __percpu *ptr)
2286fbf59bc9STejun Heo {
2287129182e5SAndrew Morton void *addr;
2288fbf59bc9STejun Heo struct pcpu_chunk *chunk;
2289ccea34b5STejun Heo unsigned long flags;
22903c7be18aSRoman Gushchin int size, off;
2291198790d9SJohn Sperbeck bool need_balance = false;
2292fbf59bc9STejun Heo
2293fbf59bc9STejun Heo if (!ptr)
2294fbf59bc9STejun Heo return;
2295fbf59bc9STejun Heo
2296f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr);
2297f528f0b8SCatalin Marinas
2298129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr);
2299fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr);
2300bba174f5STejun Heo off = addr - chunk->base_addr;
2301fbf59bc9STejun Heo
2302394e6869SHou Tao spin_lock_irqsave(&pcpu_lock, flags);
23033c7be18aSRoman Gushchin size = pcpu_free_area(chunk, off);
23043c7be18aSRoman Gushchin
23053c7be18aSRoman Gushchin pcpu_memcg_free_hook(chunk, off, size);
2306fbf59bc9STejun Heo
2307f1833241SRoman Gushchin /*
2308f1833241SRoman Gushchin * If there are more than one fully free chunks, wake up grim reaper.
2309f1833241SRoman Gushchin * If the chunk is isolated, it may be in the process of being
2310f1833241SRoman Gushchin * reclaimed. Let reclaim manage cleaning up of that chunk.
2311f1833241SRoman Gushchin */
2312f1833241SRoman Gushchin if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
2313fbf59bc9STejun Heo struct pcpu_chunk *pos;
2314fbf59bc9STejun Heo
2315faf65ddeSRoman Gushchin list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
2316fbf59bc9STejun Heo if (pos != chunk) {
2317198790d9SJohn Sperbeck need_balance = true;
2318fbf59bc9STejun Heo break;
2319fbf59bc9STejun Heo }
2320f1833241SRoman Gushchin } else if (pcpu_should_reclaim_chunk(chunk)) {
2321f1833241SRoman Gushchin pcpu_isolate_chunk(chunk);
2322f1833241SRoman Gushchin need_balance = true;
2323fbf59bc9STejun Heo }
2324fbf59bc9STejun Heo
2325df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2326df95e795SDennis Zhou
2327ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags);
2328198790d9SJohn Sperbeck
2329198790d9SJohn Sperbeck if (need_balance)
2330198790d9SJohn Sperbeck pcpu_schedule_balance_work();
2331fbf59bc9STejun Heo }
2332fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu);
2333fbf59bc9STejun Heo
__is_kernel_percpu_address(unsigned long addr,unsigned long * can_addr)2334383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2335383776faSThomas Gleixner {
2336383776faSThomas Gleixner #ifdef CONFIG_SMP
2337383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start;
2338383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2339383776faSThomas Gleixner unsigned int cpu;
2340383776faSThomas Gleixner
2341383776faSThomas Gleixner for_each_possible_cpu(cpu) {
2342383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu);
2343383776faSThomas Gleixner void *va = (void *)addr;
2344383776faSThomas Gleixner
2345383776faSThomas Gleixner if (va >= start && va < start + static_size) {
23468ce371f9SPeter Zijlstra if (can_addr) {
2347383776faSThomas Gleixner *can_addr = (unsigned long) (va - start);
23488ce371f9SPeter Zijlstra *can_addr += (unsigned long)
23498ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id());
23508ce371f9SPeter Zijlstra }
2351383776faSThomas Gleixner return true;
2352383776faSThomas Gleixner }
2353383776faSThomas Gleixner }
2354383776faSThomas Gleixner #endif
2355383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */
2356383776faSThomas Gleixner return false;
2357383776faSThomas Gleixner }
2358383776faSThomas Gleixner
23593b034b0dSVivek Goyal /**
236010fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area
236110fad5e4STejun Heo * @addr: address to test
236210fad5e4STejun Heo *
236310fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module
236410fad5e4STejun Heo * static percpu areas are not considered. For those, use
236510fad5e4STejun Heo * is_module_percpu_address().
236610fad5e4STejun Heo *
236710fad5e4STejun Heo * RETURNS:
236810fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise.
236910fad5e4STejun Heo */
is_kernel_percpu_address(unsigned long addr)237010fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr)
237110fad5e4STejun Heo {
2372383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL);
237310fad5e4STejun Heo }
237410fad5e4STejun Heo
237510fad5e4STejun Heo /**
23763b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address
23773b034b0dSVivek Goyal * @addr: the address to be converted to physical address
23783b034b0dSVivek Goyal *
23793b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of
23803b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical
23813b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid
23823b034b0dSVivek Goyal * until this function finishes.
23833b034b0dSVivek Goyal *
238467589c71SDave Young * percpu allocator has special setup for the first chunk, which currently
238567589c71SDave Young * supports either embedding in linear address space or vmalloc mapping,
238667589c71SDave Young * and, from the second one, the backing allocator (currently either vm or
238767589c71SDave Young * km) provides translation.
238867589c71SDave Young *
2389bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the
239067589c71SDave Young * first chunk. But the current code reflects better how percpu allocator
239167589c71SDave Young * actually works, and the verification can discover both bugs in percpu
239267589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
239367589c71SDave Young * code.
239467589c71SDave Young *
23953b034b0dSVivek Goyal * RETURNS:
23963b034b0dSVivek Goyal * The physical address for @addr.
23973b034b0dSVivek Goyal */
per_cpu_ptr_to_phys(void * addr)23983b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr)
23993b034b0dSVivek Goyal {
24009983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
24019983b6f0STejun Heo bool in_first_chunk = false;
2402a855b84cSTejun Heo unsigned long first_low, first_high;
24039983b6f0STejun Heo unsigned int cpu;
24049983b6f0STejun Heo
24059983b6f0STejun Heo /*
2406a855b84cSTejun Heo * The following test on unit_low/high isn't strictly
24079983b6f0STejun Heo * necessary but will speed up lookups of addresses which
24089983b6f0STejun Heo * aren't in the first chunk.
2409c0ebfdc3SDennis Zhou (Facebook) *
2410c0ebfdc3SDennis Zhou (Facebook) * The address check is against full chunk sizes. pcpu_base_addr
2411c0ebfdc3SDennis Zhou (Facebook) * points to the beginning of the first chunk including the
2412c0ebfdc3SDennis Zhou (Facebook) * static region. Assumes good intent as the first chunk may
2413c0ebfdc3SDennis Zhou (Facebook) * not be full (ie. < pcpu_unit_pages in size).
24149983b6f0STejun Heo */
2415c0ebfdc3SDennis Zhou (Facebook) first_low = (unsigned long)pcpu_base_addr +
2416c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2417c0ebfdc3SDennis Zhou (Facebook) first_high = (unsigned long)pcpu_base_addr +
2418c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2419a855b84cSTejun Heo if ((unsigned long)addr >= first_low &&
2420a855b84cSTejun Heo (unsigned long)addr < first_high) {
24219983b6f0STejun Heo for_each_possible_cpu(cpu) {
24229983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu);
24239983b6f0STejun Heo
24249983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) {
24259983b6f0STejun Heo in_first_chunk = true;
24269983b6f0STejun Heo break;
24279983b6f0STejun Heo }
24289983b6f0STejun Heo }
24299983b6f0STejun Heo }
24309983b6f0STejun Heo
24319983b6f0STejun Heo if (in_first_chunk) {
2432eac522efSDavid Howells if (!is_vmalloc_addr(addr))
24333b034b0dSVivek Goyal return __pa(addr);
24343b034b0dSVivek Goyal else
24359f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) +
24369f57bd4dSEugene Surovegin offset_in_page(addr);
2437020ec653STejun Heo } else
24389f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) +
24399f57bd4dSEugene Surovegin offset_in_page(addr);
24403b034b0dSVivek Goyal }
24413b034b0dSVivek Goyal
2442fbf59bc9STejun Heo /**
2443fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info
2444fd1e8a1fSTejun Heo * @nr_groups: the number of groups
2445fd1e8a1fSTejun Heo * @nr_units: the number of units
2446033e48fbSTejun Heo *
2447fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing
2448fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the
2449fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with
2450fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2451fd1e8a1fSTejun Heo * pointer of other groups.
2452033e48fbSTejun Heo *
2453033e48fbSTejun Heo * RETURNS:
2454fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on
2455fd1e8a1fSTejun Heo * failure.
2456033e48fbSTejun Heo */
pcpu_alloc_alloc_info(int nr_groups,int nr_units)2457fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2458fd1e8a1fSTejun Heo int nr_units)
2459fd1e8a1fSTejun Heo {
2460fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai;
2461fd1e8a1fSTejun Heo size_t base_size, ai_size;
2462fd1e8a1fSTejun Heo void *ptr;
2463fd1e8a1fSTejun Heo int unit;
2464fd1e8a1fSTejun Heo
246514d37612SGustavo A. R. Silva base_size = ALIGN(struct_size(ai, groups, nr_groups),
2466fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0]));
2467fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2468fd1e8a1fSTejun Heo
246926fb3daeSMike Rapoport ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2470fd1e8a1fSTejun Heo if (!ptr)
2471fd1e8a1fSTejun Heo return NULL;
2472fd1e8a1fSTejun Heo ai = ptr;
2473fd1e8a1fSTejun Heo ptr += base_size;
2474fd1e8a1fSTejun Heo
2475fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr;
2476fd1e8a1fSTejun Heo
2477fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++)
2478fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS;
2479fd1e8a1fSTejun Heo
2480fd1e8a1fSTejun Heo ai->nr_groups = nr_groups;
2481fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size);
2482fd1e8a1fSTejun Heo
2483fd1e8a1fSTejun Heo return ai;
2484fd1e8a1fSTejun Heo }
2485fd1e8a1fSTejun Heo
2486fd1e8a1fSTejun Heo /**
2487fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info
2488fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free
2489fd1e8a1fSTejun Heo *
2490fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info().
2491fd1e8a1fSTejun Heo */
pcpu_free_alloc_info(struct pcpu_alloc_info * ai)2492fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2493fd1e8a1fSTejun Heo {
24944421cca0SMike Rapoport memblock_free(ai, ai->__ai_size);
2495fd1e8a1fSTejun Heo }
2496fd1e8a1fSTejun Heo
2497fd1e8a1fSTejun Heo /**
2498fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2499fd1e8a1fSTejun Heo * @lvl: loglevel
2500fd1e8a1fSTejun Heo * @ai: allocation info to dump
2501fd1e8a1fSTejun Heo *
2502fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl.
2503fd1e8a1fSTejun Heo */
pcpu_dump_alloc_info(const char * lvl,const struct pcpu_alloc_info * ai)2504fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl,
2505fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai)
2506033e48fbSTejun Heo {
2507fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width;
2508033e48fbSTejun Heo char empty_str[] = "--------";
2509fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0;
2510fd1e8a1fSTejun Heo int group, v;
2511fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */
2512033e48fbSTejun Heo
2513fd1e8a1fSTejun Heo v = ai->nr_groups;
2514033e48fbSTejun Heo while (v /= 10)
2515fd1e8a1fSTejun Heo group_width++;
2516033e48fbSTejun Heo
2517fd1e8a1fSTejun Heo v = num_possible_cpus();
2518fd1e8a1fSTejun Heo while (v /= 10)
2519fd1e8a1fSTejun Heo cpu_width++;
2520fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2521033e48fbSTejun Heo
2522fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size;
2523fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3;
2524fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1));
2525033e48fbSTejun Heo
2526fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2527fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2528fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2529fd1e8a1fSTejun Heo
2530fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) {
2531fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group];
2532fd1e8a1fSTejun Heo int unit = 0, unit_end = 0;
2533fd1e8a1fSTejun Heo
2534fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa);
2535fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa;
2536fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) {
2537fd1e8a1fSTejun Heo if (!(alloc % apl)) {
25381170532bSJoe Perches pr_cont("\n");
2539fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl);
2540033e48fbSTejun Heo }
25411170532bSJoe Perches pr_cont("[%0*d] ", group_width, group);
2542fd1e8a1fSTejun Heo
2543fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++)
2544fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS)
25451170532bSJoe Perches pr_cont("%0*d ",
25461170532bSJoe Perches cpu_width, gi->cpu_map[unit]);
2547033e48fbSTejun Heo else
25481170532bSJoe Perches pr_cont("%s ", empty_str);
2549033e48fbSTejun Heo }
2550fd1e8a1fSTejun Heo }
25511170532bSJoe Perches pr_cont("\n");
2552033e48fbSTejun Heo }
2553033e48fbSTejun Heo
2554fbf59bc9STejun Heo /**
25558d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk
2556fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped
255738a6be52STejun Heo * @base_addr: mapped address
2558fbf59bc9STejun Heo *
25598d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static
256069ab285bSChristophe JAILLET * percpu area. This function is to be called from arch percpu area
256138a6be52STejun Heo * setup path.
25628d408b4bSTejun Heo *
2563fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first
2564fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator.
25658d408b4bSTejun Heo *
2566fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area.
2567fd1e8a1fSTejun Heo *
2568fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2569edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves
2570edcb4639STejun Heo * the first chunk such that it's available only through reserved
2571edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu
2572edcb4639STejun Heo * static areas on architectures where the addressing model has
2573edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module
2574edcb4639STejun Heo * percpu symbols fall inside the relocatable range.
2575edcb4639STejun Heo *
2576fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic
2577fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size +
2578fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
25796074d5b0STejun Heo *
2580fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2581fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size +
2582fd1e8a1fSTejun Heo * @ai->dyn_size.
25838d408b4bSTejun Heo *
2584fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment
2585fd1e8a1fSTejun Heo * for vm areas.
25868d408b4bSTejun Heo *
2587fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of
2588fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if
2589fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size.
2590fd1e8a1fSTejun Heo *
2591fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of
2592fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the
2593fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these
2594fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing
2595fd1e8a1fSTejun Heo * all units is assumed.
25968d408b4bSTejun Heo *
259738a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and
259838a6be52STejun Heo * copied static data to each unit.
2599fbf59bc9STejun Heo *
2600c0ebfdc3SDennis Zhou (Facebook) * The first chunk will always contain a static and a dynamic region.
2601c0ebfdc3SDennis Zhou (Facebook) * However, the static region is not managed by any chunk. If the first
2602c0ebfdc3SDennis Zhou (Facebook) * chunk also contains a reserved region, it is served by two chunks -
2603c0ebfdc3SDennis Zhou (Facebook) * one for the reserved region and one for the dynamic region. They
2604c0ebfdc3SDennis Zhou (Facebook) * share the same vm, but use offset regions in the area allocation map.
2605c0ebfdc3SDennis Zhou (Facebook) * The chunk serving the dynamic region is circulated in the chunk slots
2606c0ebfdc3SDennis Zhou (Facebook) * and available for dynamic allocation like any other chunk.
2607fbf59bc9STejun Heo */
pcpu_setup_first_chunk(const struct pcpu_alloc_info * ai,void * base_addr)2608163fa234SKefeng Wang void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2609fd1e8a1fSTejun Heo void *base_addr)
2610fbf59bc9STejun Heo {
2611b9c39442SDennis Zhou (Facebook) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2612d2f3c384SDennis Zhou (Facebook) size_t static_size, dyn_size;
26136563297cSTejun Heo unsigned long *group_offsets;
26146563297cSTejun Heo size_t *group_sizes;
2615fb435d52STejun Heo unsigned long *unit_off;
2616fbf59bc9STejun Heo unsigned int cpu;
2617fd1e8a1fSTejun Heo int *unit_map;
2618fd1e8a1fSTejun Heo int group, unit, i;
2619c0ebfdc3SDennis Zhou (Facebook) unsigned long tmp_addr;
2620f655f405SMike Rapoport size_t alloc_size;
2621fbf59bc9STejun Heo
2622635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \
2623635b75fcSTejun Heo if (unlikely(cond)) { \
2624870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \
2625870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \
2626807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \
2627635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \
2628635b75fcSTejun Heo BUG(); \
2629635b75fcSTejun Heo } \
2630635b75fcSTejun Heo } while (0)
2631635b75fcSTejun Heo
26322f39e637STejun Heo /* sanity checks */
2633635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2634bbddff05STejun Heo #ifdef CONFIG_SMP
2635635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size);
2636f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2637bbddff05STejun Heo #endif
2638635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr);
2639f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2640635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2641f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2642635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2643ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2644099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2645d2f3c384SDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2646ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2647ca460b3cSDennis Zhou (Facebook) IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
26489f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
26498d408b4bSTejun Heo
26506563297cSTejun Heo /* process group information and build config tables accordingly */
2651f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2652f655f405SMike Rapoport group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2653f655f405SMike Rapoport if (!group_offsets)
2654f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
2655f655f405SMike Rapoport alloc_size);
2656f655f405SMike Rapoport
2657f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2658f655f405SMike Rapoport group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2659f655f405SMike Rapoport if (!group_sizes)
2660f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
2661f655f405SMike Rapoport alloc_size);
2662f655f405SMike Rapoport
2663f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2664f655f405SMike Rapoport unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2665f655f405SMike Rapoport if (!unit_map)
2666f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
2667f655f405SMike Rapoport alloc_size);
2668f655f405SMike Rapoport
2669f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2670f655f405SMike Rapoport unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2671f655f405SMike Rapoport if (!unit_off)
2672f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
2673f655f405SMike Rapoport alloc_size);
26742f39e637STejun Heo
2675fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2676ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX;
2677a855b84cSTejun Heo
2678a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS;
2679a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS;
26802f39e637STejun Heo
2681fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2682fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group];
26832f39e637STejun Heo
26846563297cSTejun Heo group_offsets[group] = gi->base_offset;
26856563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size;
26866563297cSTejun Heo
2687fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) {
2688fd1e8a1fSTejun Heo cpu = gi->cpu_map[i];
2689fd1e8a1fSTejun Heo if (cpu == NR_CPUS)
2690fd1e8a1fSTejun Heo continue;
2691fd1e8a1fSTejun Heo
26929f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2693635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2694635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2695fd1e8a1fSTejun Heo
2696fd1e8a1fSTejun Heo unit_map[cpu] = unit + i;
2697fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2698fb435d52STejun Heo
2699a855b84cSTejun Heo /* determine low/high unit_cpu */
2700a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS ||
2701a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2702a855b84cSTejun Heo pcpu_low_unit_cpu = cpu;
2703a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS ||
2704a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2705a855b84cSTejun Heo pcpu_high_unit_cpu = cpu;
27060fc0531eSLinus Torvalds }
27070fc0531eSLinus Torvalds }
2708fd1e8a1fSTejun Heo pcpu_nr_units = unit;
27092f39e637STejun Heo
27102f39e637STejun Heo for_each_possible_cpu(cpu)
2711635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2712635b75fcSTejun Heo
2713635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */
2714635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON
2715bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai);
27162f39e637STejun Heo
27176563297cSTejun Heo pcpu_nr_groups = ai->nr_groups;
27186563297cSTejun Heo pcpu_group_offsets = group_offsets;
27196563297cSTejun Heo pcpu_group_sizes = group_sizes;
2720fd1e8a1fSTejun Heo pcpu_unit_map = unit_map;
2721fb435d52STejun Heo pcpu_unit_offsets = unit_off;
27222f39e637STejun Heo
27232f39e637STejun Heo /* determine basic parameters */
2724fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2725d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
27266563297cSTejun Heo pcpu_atom_size = ai->atom_size;
27277ee1e758SBaoquan He pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
272861cf93d3SDennis Zhou BITS_TO_LONGS(pcpu_unit_pages));
2729cafe8816STejun Heo
273030a5b536SDennis Zhou pcpu_stats_save_ai(ai);
273130a5b536SDennis Zhou
2732d9b55eebSTejun Heo /*
2733f1833241SRoman Gushchin * Allocate chunk slots. The slots after the active slots are:
2734f1833241SRoman Gushchin * sidelined_slot - isolated, depopulated chunks
2735f1833241SRoman Gushchin * free_slot - fully free chunks
2736f1833241SRoman Gushchin * to_depopulate_slot - isolated, chunks to depopulate
2737d9b55eebSTejun Heo */
2738f1833241SRoman Gushchin pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2739f1833241SRoman Gushchin pcpu_free_slot = pcpu_sidelined_slot + 1;
2740f1833241SRoman Gushchin pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2741f1833241SRoman Gushchin pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
27423c7be18aSRoman Gushchin pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2743faf65ddeSRoman Gushchin sizeof(pcpu_chunk_lists[0]),
27447e1c4e27SMike Rapoport SMP_CACHE_BYTES);
27453c7be18aSRoman Gushchin if (!pcpu_chunk_lists)
2746f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
2747faf65ddeSRoman Gushchin pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
27483c7be18aSRoman Gushchin
2749fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++)
2750faf65ddeSRoman Gushchin INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
2751fbf59bc9STejun Heo
2752edcb4639STejun Heo /*
2753d2f3c384SDennis Zhou (Facebook) * The end of the static region needs to be aligned with the
2754d2f3c384SDennis Zhou (Facebook) * minimum allocation size as this offsets the reserved and
2755d2f3c384SDennis Zhou (Facebook) * dynamic region. The first chunk ends page aligned by
2756d2f3c384SDennis Zhou (Facebook) * expanding the dynamic region, therefore the dynamic region
2757d2f3c384SDennis Zhou (Facebook) * can be shrunk to compensate while still staying above the
2758d2f3c384SDennis Zhou (Facebook) * configured sizes.
2759d2f3c384SDennis Zhou (Facebook) */
2760d2f3c384SDennis Zhou (Facebook) static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2761d2f3c384SDennis Zhou (Facebook) dyn_size = ai->dyn_size - (static_size - ai->static_size);
2762d2f3c384SDennis Zhou (Facebook)
2763d2f3c384SDennis Zhou (Facebook) /*
27647ee1e758SBaoquan He * Initialize first chunk:
27657ee1e758SBaoquan He * This chunk is broken up into 3 parts:
27667ee1e758SBaoquan He * < static | [reserved] | dynamic >
27677ee1e758SBaoquan He * - static - there is no backing chunk because these allocations can
27687ee1e758SBaoquan He * never be freed.
27697ee1e758SBaoquan He * - reserved (pcpu_reserved_chunk) - exists primarily to serve
27707ee1e758SBaoquan He * allocations from module load.
27717ee1e758SBaoquan He * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first
27727ee1e758SBaoquan He * chunk.
2773edcb4639STejun Heo */
2774d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size;
27757ee1e758SBaoquan He if (ai->reserved_size)
27767ee1e758SBaoquan He pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr,
27777ee1e758SBaoquan He ai->reserved_size);
27787ee1e758SBaoquan He tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
27797ee1e758SBaoquan He pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size);
278061ace7faSTejun Heo
2781faf65ddeSRoman Gushchin pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2782ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1);
2783fbf59bc9STejun Heo
27847e8a6304SDennis Zhou (Facebook) /* include all regions of the first chunk */
27857e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += PFN_DOWN(size_sum);
27867e8a6304SDennis Zhou (Facebook)
278730a5b536SDennis Zhou pcpu_stats_chunk_alloc();
2788df95e795SDennis Zhou trace_percpu_create_chunk(base_addr);
278930a5b536SDennis Zhou
2790fbf59bc9STejun Heo /* we're done */
2791bba174f5STejun Heo pcpu_base_addr = base_addr;
2792fbf59bc9STejun Heo }
279366c3a757STejun Heo
2794bbddff05STejun Heo #ifdef CONFIG_SMP
2795bbddff05STejun Heo
279617f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2797f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto",
2798f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed",
2799f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page",
2800f58dc01bSTejun Heo };
280166c3a757STejun Heo
2802f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2803f58dc01bSTejun Heo
percpu_alloc_setup(char * str)2804f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str)
280566c3a757STejun Heo {
28065479c78aSCyrill Gorcunov if (!str)
28075479c78aSCyrill Gorcunov return -EINVAL;
28085479c78aSCyrill Gorcunov
2809f58dc01bSTejun Heo if (0)
2810f58dc01bSTejun Heo /* nada */;
2811f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2812f58dc01bSTejun Heo else if (!strcmp(str, "embed"))
2813f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED;
2814f58dc01bSTejun Heo #endif
2815f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2816f58dc01bSTejun Heo else if (!strcmp(str, "page"))
2817f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE;
2818f58dc01bSTejun Heo #endif
2819f58dc01bSTejun Heo else
2820870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str);
282166c3a757STejun Heo
2822f58dc01bSTejun Heo return 0;
282366c3a757STejun Heo }
2824f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup);
282566c3a757STejun Heo
28263c9a024fSTejun Heo /*
28273c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup.
28283c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going
28293c9a024fSTejun Heo * to be used.
28303c9a024fSTejun Heo */
283108fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
283208fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
28333c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK
28343c9a024fSTejun Heo #endif
28353c9a024fSTejun Heo
28363c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */
28373c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
28383c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK
28393c9a024fSTejun Heo #endif
28403c9a024fSTejun Heo
28413c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */
28423c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
28433c9a024fSTejun Heo /**
2844fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2845fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes
2846fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes
2847fbf59bc9STejun Heo * @atom_size: allocation atom size
2848fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional
2849fbf59bc9STejun Heo *
2850fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus
2851fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation
2852fbf59bc9STejun Heo * atom size and distances between CPUs.
2853fbf59bc9STejun Heo *
2854bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of
2855fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for
2856fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed
2857fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage
2858fbf59bc9STejun Heo * of allocated virtual address space.
2859fbf59bc9STejun Heo *
2860fbf59bc9STejun Heo * RETURNS:
2861fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On
2862fbf59bc9STejun Heo * failure, ERR_PTR value is returned.
2863fbf59bc9STejun Heo */
pcpu_build_alloc_info(size_t reserved_size,size_t dyn_size,size_t atom_size,pcpu_fc_cpu_distance_fn_t cpu_distance_fn)2864258e0815SDennis Zhou static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
2865fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size,
2866fbf59bc9STejun Heo size_t atom_size,
2867fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2868fbf59bc9STejun Heo {
2869fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata;
2870fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata;
2871d7d29ac7SWonhyuk Yang static struct cpumask mask __initdata;
2872fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start;
2873fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0;
2874fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size;
28753f649ab7SKees Cook int upa, max_upa, best_upa; /* units_per_alloc */
2876fbf59bc9STejun Heo int last_allocs, group, unit;
2877fbf59bc9STejun Heo unsigned int cpu, tcpu;
2878fbf59bc9STejun Heo struct pcpu_alloc_info *ai;
2879fbf59bc9STejun Heo unsigned int *cpu_map;
2880fbf59bc9STejun Heo
2881fbf59bc9STejun Heo /* this function may be called multiple times */
2882fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map));
2883fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt));
2884d7d29ac7SWonhyuk Yang cpumask_clear(&mask);
2885fbf59bc9STejun Heo
2886fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */
2887fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size +
2888fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2889fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size;
2890fbf59bc9STejun Heo
2891fbf59bc9STejun Heo /*
2892fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that
2893fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest
289425985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to
2895fbf59bc9STejun Heo * or larger than min_unit_size.
2896fbf59bc9STejun Heo */
2897fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2898fbf59bc9STejun Heo
28999c015162SDennis Zhou (Facebook) /* determine the maximum # of units that can fit in an allocation */
2900fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size);
2901fbf59bc9STejun Heo upa = alloc_size / min_unit_size;
2902f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2903fbf59bc9STejun Heo upa--;
2904fbf59bc9STejun Heo max_upa = upa;
2905fbf59bc9STejun Heo
2906d7d29ac7SWonhyuk Yang cpumask_copy(&mask, cpu_possible_mask);
2907d7d29ac7SWonhyuk Yang
2908fbf59bc9STejun Heo /* group cpus according to their proximity */
2909d7d29ac7SWonhyuk Yang for (group = 0; !cpumask_empty(&mask); group++) {
2910d7d29ac7SWonhyuk Yang /* pop the group's first cpu */
2911d7d29ac7SWonhyuk Yang cpu = cpumask_first(&mask);
2912fbf59bc9STejun Heo group_map[cpu] = group;
2913fbf59bc9STejun Heo group_cnt[group]++;
2914d7d29ac7SWonhyuk Yang cpumask_clear_cpu(cpu, &mask);
2915d7d29ac7SWonhyuk Yang
2916d7d29ac7SWonhyuk Yang for_each_cpu(tcpu, &mask) {
2917d7d29ac7SWonhyuk Yang if (!cpu_distance_fn ||
2918d7d29ac7SWonhyuk Yang (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2919d7d29ac7SWonhyuk Yang cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2920d7d29ac7SWonhyuk Yang group_map[tcpu] = group;
2921d7d29ac7SWonhyuk Yang group_cnt[group]++;
2922d7d29ac7SWonhyuk Yang cpumask_clear_cpu(tcpu, &mask);
2923fbf59bc9STejun Heo }
2924d7d29ac7SWonhyuk Yang }
2925d7d29ac7SWonhyuk Yang }
2926d7d29ac7SWonhyuk Yang nr_groups = group;
2927fbf59bc9STejun Heo
2928fbf59bc9STejun Heo /*
29299c015162SDennis Zhou (Facebook) * Wasted space is caused by a ratio imbalance of upa to group_cnt.
29309c015162SDennis Zhou (Facebook) * Expand the unit_size until we use >= 75% of the units allocated.
29319c015162SDennis Zhou (Facebook) * Related to atom_size, which could be much larger than the unit_size.
2932fbf59bc9STejun Heo */
2933fbf59bc9STejun Heo last_allocs = INT_MAX;
29344829c791SDennis Zhou best_upa = 0;
2935fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) {
2936fbf59bc9STejun Heo int allocs = 0, wasted = 0;
2937fbf59bc9STejun Heo
2938f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2939fbf59bc9STejun Heo continue;
2940fbf59bc9STejun Heo
2941fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) {
2942fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2943fbf59bc9STejun Heo allocs += this_allocs;
2944fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group];
2945fbf59bc9STejun Heo }
2946fbf59bc9STejun Heo
2947fbf59bc9STejun Heo /*
2948fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The
2949fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always
2950fbf59bc9STejun Heo * passes the following check.
2951fbf59bc9STejun Heo */
2952fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3)
2953fbf59bc9STejun Heo continue;
2954fbf59bc9STejun Heo
2955fbf59bc9STejun Heo /* and then don't consume more memory */
2956fbf59bc9STejun Heo if (allocs > last_allocs)
2957fbf59bc9STejun Heo break;
2958fbf59bc9STejun Heo last_allocs = allocs;
2959fbf59bc9STejun Heo best_upa = upa;
2960fbf59bc9STejun Heo }
29614829c791SDennis Zhou BUG_ON(!best_upa);
2962fbf59bc9STejun Heo upa = best_upa;
2963fbf59bc9STejun Heo
2964fbf59bc9STejun Heo /* allocate and fill alloc_info */
2965fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++)
2966fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa);
2967fbf59bc9STejun Heo
2968fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2969fbf59bc9STejun Heo if (!ai)
2970fbf59bc9STejun Heo return ERR_PTR(-ENOMEM);
2971fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map;
2972fbf59bc9STejun Heo
2973fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) {
2974fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map;
2975fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa);
2976fbf59bc9STejun Heo }
2977fbf59bc9STejun Heo
2978fbf59bc9STejun Heo ai->static_size = static_size;
2979fbf59bc9STejun Heo ai->reserved_size = reserved_size;
2980fbf59bc9STejun Heo ai->dyn_size = dyn_size;
2981fbf59bc9STejun Heo ai->unit_size = alloc_size / upa;
2982fbf59bc9STejun Heo ai->atom_size = atom_size;
2983fbf59bc9STejun Heo ai->alloc_size = alloc_size;
2984fbf59bc9STejun Heo
29852de7852fSPeng Fan for (group = 0, unit = 0; group < nr_groups; group++) {
2986fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group];
2987fbf59bc9STejun Heo
2988fbf59bc9STejun Heo /*
2989fbf59bc9STejun Heo * Initialize base_offset as if all groups are located
2990fbf59bc9STejun Heo * back-to-back. The caller should update this to
2991fbf59bc9STejun Heo * reflect actual allocation.
2992fbf59bc9STejun Heo */
2993fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size;
2994fbf59bc9STejun Heo
2995fbf59bc9STejun Heo for_each_possible_cpu(cpu)
2996fbf59bc9STejun Heo if (group_map[cpu] == group)
2997fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu;
2998fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa);
2999fbf59bc9STejun Heo unit += gi->nr_units;
3000fbf59bc9STejun Heo }
3001fbf59bc9STejun Heo BUG_ON(unit != nr_units);
3002fbf59bc9STejun Heo
3003fbf59bc9STejun Heo return ai;
3004fbf59bc9STejun Heo }
300523f91716SKefeng Wang
pcpu_fc_alloc(unsigned int cpu,size_t size,size_t align,pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)300623f91716SKefeng Wang static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
300723f91716SKefeng Wang pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
300823f91716SKefeng Wang {
300923f91716SKefeng Wang const unsigned long goal = __pa(MAX_DMA_ADDRESS);
301023f91716SKefeng Wang #ifdef CONFIG_NUMA
301123f91716SKefeng Wang int node = NUMA_NO_NODE;
301223f91716SKefeng Wang void *ptr;
301323f91716SKefeng Wang
301423f91716SKefeng Wang if (cpu_to_nd_fn)
301523f91716SKefeng Wang node = cpu_to_nd_fn(cpu);
301623f91716SKefeng Wang
301723f91716SKefeng Wang if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
301823f91716SKefeng Wang ptr = memblock_alloc_from(size, align, goal);
301923f91716SKefeng Wang pr_info("cpu %d has no node %d or node-local memory\n",
302023f91716SKefeng Wang cpu, node);
302123f91716SKefeng Wang pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
302223f91716SKefeng Wang cpu, size, (u64)__pa(ptr));
302323f91716SKefeng Wang } else {
302423f91716SKefeng Wang ptr = memblock_alloc_try_nid(size, align, goal,
302523f91716SKefeng Wang MEMBLOCK_ALLOC_ACCESSIBLE,
302623f91716SKefeng Wang node);
302723f91716SKefeng Wang
302823f91716SKefeng Wang pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n",
302923f91716SKefeng Wang cpu, size, node, (u64)__pa(ptr));
303023f91716SKefeng Wang }
303123f91716SKefeng Wang return ptr;
303223f91716SKefeng Wang #else
303323f91716SKefeng Wang return memblock_alloc_from(size, align, goal);
303423f91716SKefeng Wang #endif
303523f91716SKefeng Wang }
303623f91716SKefeng Wang
pcpu_fc_free(void * ptr,size_t size)303723f91716SKefeng Wang static void __init pcpu_fc_free(void *ptr, size_t size)
303823f91716SKefeng Wang {
303923f91716SKefeng Wang memblock_free(ptr, size);
304023f91716SKefeng Wang }
30413c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
3042fbf59bc9STejun Heo
30433c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK)
304466c3a757STejun Heo /**
304566c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
304666c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes
30474ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes
3048c8826dd5STejun Heo * @atom_size: allocation atom size
3049c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional
30501ca3fb3aSKefeng Wang * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
305166c3a757STejun Heo *
305266c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and
305366c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected.
305466c3a757STejun Heo *
305566c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated
305623f91716SKefeng Wang * by calling pcpu_fc_alloc and used as-is without being mapped into
3057c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size
3058c8826dd5STejun Heo * aligned to @atom_size.
3059c8826dd5STejun Heo *
3060c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical
3061c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this
3062c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus
3063c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if
3064c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances
3065c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines).
306666c3a757STejun Heo *
30674ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size.
306866c3a757STejun Heo *
306966c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit
307023f91716SKefeng Wang * size, the leftover is returned using pcpu_fc_free.
307166c3a757STejun Heo *
307266c3a757STejun Heo * RETURNS:
3073fb435d52STejun Heo * 0 on success, -errno on failure.
307466c3a757STejun Heo */
pcpu_embed_first_chunk(size_t reserved_size,size_t dyn_size,size_t atom_size,pcpu_fc_cpu_distance_fn_t cpu_distance_fn,pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)30754ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
3076c8826dd5STejun Heo size_t atom_size,
3077c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
307823f91716SKefeng Wang pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
307966c3a757STejun Heo {
3080c8826dd5STejun Heo void *base = (void *)ULONG_MAX;
3081c8826dd5STejun Heo void **areas = NULL;
3082fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai;
308393c76b6bSzijun_hu size_t size_sum, areas_size;
308493c76b6bSzijun_hu unsigned long max_distance;
3085163fa234SKefeng Wang int group, i, highest_group, rc = 0;
308666c3a757STejun Heo
3087c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3088c8826dd5STejun Heo cpu_distance_fn);
3089fd1e8a1fSTejun Heo if (IS_ERR(ai))
3090fd1e8a1fSTejun Heo return PTR_ERR(ai);
309166c3a757STejun Heo
3092fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3093c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
309466c3a757STejun Heo
309526fb3daeSMike Rapoport areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
3096c8826dd5STejun Heo if (!areas) {
3097fb435d52STejun Heo rc = -ENOMEM;
3098c8826dd5STejun Heo goto out_free;
3099fa8a7094STejun Heo }
310066c3a757STejun Heo
31019b739662Szijun_hu /* allocate, copy and determine base address & max_distance */
31029b739662Szijun_hu highest_group = 0;
3103c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) {
3104c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group];
3105c8826dd5STejun Heo unsigned int cpu = NR_CPUS;
3106c8826dd5STejun Heo void *ptr;
310766c3a757STejun Heo
3108c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3109c8826dd5STejun Heo cpu = gi->cpu_map[i];
3110c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS);
3111c8826dd5STejun Heo
3112c8826dd5STejun Heo /* allocate space for the whole group */
311323f91716SKefeng Wang ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
3114c8826dd5STejun Heo if (!ptr) {
3115c8826dd5STejun Heo rc = -ENOMEM;
3116c8826dd5STejun Heo goto out_free_areas;
3117c8826dd5STejun Heo }
3118f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */
3119a317ebccSPatrick Wang kmemleak_ignore_phys(__pa(ptr));
3120c8826dd5STejun Heo areas[group] = ptr;
3121c8826dd5STejun Heo
3122c8826dd5STejun Heo base = min(ptr, base);
31239b739662Szijun_hu if (ptr > areas[highest_group])
31249b739662Szijun_hu highest_group = group;
31259b739662Szijun_hu }
31269b739662Szijun_hu max_distance = areas[highest_group] - base;
31279b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
31289b739662Szijun_hu
31299b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */
31309b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) {
31319b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
31329b739662Szijun_hu max_distance, VMALLOC_TOTAL);
31339b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
31349b739662Szijun_hu /* and fail if we have fallback */
31359b739662Szijun_hu rc = -EINVAL;
31369b739662Szijun_hu goto out_free_areas;
31379b739662Szijun_hu #endif
313842b64281STejun Heo }
313942b64281STejun Heo
314042b64281STejun Heo /*
314142b64281STejun Heo * Copy data and free unused parts. This should happen after all
314242b64281STejun Heo * allocations are complete; otherwise, we may end up with
314342b64281STejun Heo * overlapping groups.
314442b64281STejun Heo */
314542b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) {
314642b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group];
314742b64281STejun Heo void *ptr = areas[group];
3148c8826dd5STejun Heo
3149c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3150c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) {
3151c8826dd5STejun Heo /* unused unit, free whole */
315223f91716SKefeng Wang pcpu_fc_free(ptr, ai->unit_size);
3153c8826dd5STejun Heo continue;
3154c8826dd5STejun Heo }
3155c8826dd5STejun Heo /* copy and return the unused part */
3156fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size);
315723f91716SKefeng Wang pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
3158c8826dd5STejun Heo }
315966c3a757STejun Heo }
316066c3a757STejun Heo
3161c8826dd5STejun Heo /* base address is now known, determine group base offsets */
31626ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) {
3163c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base;
31646ea529a2STejun Heo }
3165c8826dd5STejun Heo
316600206a69SMatteo Croce pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
316700206a69SMatteo Croce PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3168fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size);
316966c3a757STejun Heo
3170163fa234SKefeng Wang pcpu_setup_first_chunk(ai, base);
3171c8826dd5STejun Heo goto out_free;
3172c8826dd5STejun Heo
3173c8826dd5STejun Heo out_free_areas:
3174c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++)
3175f851c8d8SMichael Holzheu if (areas[group])
317623f91716SKefeng Wang pcpu_fc_free(areas[group],
3177c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size);
3178c8826dd5STejun Heo out_free:
3179fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai);
3180c8826dd5STejun Heo if (areas)
31814421cca0SMike Rapoport memblock_free(areas, areas_size);
3182fb435d52STejun Heo return rc;
3183d4b95f80STejun Heo }
31843c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */
3185d4b95f80STejun Heo
31863c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK
318720c03576SKefeng Wang #include <asm/pgalloc.h>
318820c03576SKefeng Wang
318920c03576SKefeng Wang #ifndef P4D_TABLE_SIZE
319020c03576SKefeng Wang #define P4D_TABLE_SIZE PAGE_SIZE
319120c03576SKefeng Wang #endif
319220c03576SKefeng Wang
319320c03576SKefeng Wang #ifndef PUD_TABLE_SIZE
319420c03576SKefeng Wang #define PUD_TABLE_SIZE PAGE_SIZE
319520c03576SKefeng Wang #endif
319620c03576SKefeng Wang
319720c03576SKefeng Wang #ifndef PMD_TABLE_SIZE
319820c03576SKefeng Wang #define PMD_TABLE_SIZE PAGE_SIZE
319920c03576SKefeng Wang #endif
320020c03576SKefeng Wang
320120c03576SKefeng Wang #ifndef PTE_TABLE_SIZE
320220c03576SKefeng Wang #define PTE_TABLE_SIZE PAGE_SIZE
320320c03576SKefeng Wang #endif
pcpu_populate_pte(unsigned long addr)320420c03576SKefeng Wang void __init __weak pcpu_populate_pte(unsigned long addr)
320520c03576SKefeng Wang {
320620c03576SKefeng Wang pgd_t *pgd = pgd_offset_k(addr);
320720c03576SKefeng Wang p4d_t *p4d;
320820c03576SKefeng Wang pud_t *pud;
320920c03576SKefeng Wang pmd_t *pmd;
321020c03576SKefeng Wang
321120c03576SKefeng Wang if (pgd_none(*pgd)) {
321241fd59b7SBibo Mao p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
321341fd59b7SBibo Mao if (!p4d)
321420c03576SKefeng Wang goto err_alloc;
321541fd59b7SBibo Mao pgd_populate(&init_mm, pgd, p4d);
321620c03576SKefeng Wang }
321720c03576SKefeng Wang
321820c03576SKefeng Wang p4d = p4d_offset(pgd, addr);
321920c03576SKefeng Wang if (p4d_none(*p4d)) {
322041fd59b7SBibo Mao pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
322141fd59b7SBibo Mao if (!pud)
322220c03576SKefeng Wang goto err_alloc;
322341fd59b7SBibo Mao p4d_populate(&init_mm, p4d, pud);
322420c03576SKefeng Wang }
322520c03576SKefeng Wang
322620c03576SKefeng Wang pud = pud_offset(p4d, addr);
322720c03576SKefeng Wang if (pud_none(*pud)) {
322841fd59b7SBibo Mao pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
322941fd59b7SBibo Mao if (!pmd)
323020c03576SKefeng Wang goto err_alloc;
323141fd59b7SBibo Mao pud_populate(&init_mm, pud, pmd);
323220c03576SKefeng Wang }
323320c03576SKefeng Wang
323420c03576SKefeng Wang pmd = pmd_offset(pud, addr);
323520c03576SKefeng Wang if (!pmd_present(*pmd)) {
323620c03576SKefeng Wang pte_t *new;
323720c03576SKefeng Wang
323820c03576SKefeng Wang new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
323920c03576SKefeng Wang if (!new)
324020c03576SKefeng Wang goto err_alloc;
324120c03576SKefeng Wang pmd_populate_kernel(&init_mm, pmd, new);
324220c03576SKefeng Wang }
324320c03576SKefeng Wang
324420c03576SKefeng Wang return;
324520c03576SKefeng Wang
324620c03576SKefeng Wang err_alloc:
324720c03576SKefeng Wang panic("%s: Failed to allocate memory\n", __func__);
324820c03576SKefeng Wang }
324920c03576SKefeng Wang
3250d4b95f80STejun Heo /**
325100ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3252d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes
32531ca3fb3aSKefeng Wang * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
3254d4b95f80STejun Heo *
325500ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu
325600ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected.
3257d4b95f80STejun Heo *
3258d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated
3259d4b95f80STejun Heo * page-by-page into vmalloc area.
3260d4b95f80STejun Heo *
3261d4b95f80STejun Heo * RETURNS:
3262fb435d52STejun Heo * 0 on success, -errno on failure.
3263d4b95f80STejun Heo */
pcpu_page_first_chunk(size_t reserved_size,pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)326420c03576SKefeng Wang int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
3265d4b95f80STejun Heo {
32668f05a6a6STejun Heo static struct vm_struct vm;
3267fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai;
326800ae4064STejun Heo char psize_str[16];
3269ce3141a2STejun Heo int unit_pages;
3270d4b95f80STejun Heo size_t pages_size;
3271ce3141a2STejun Heo struct page **pages;
3272163fa234SKefeng Wang int unit, i, j, rc = 0;
32738f606604Szijun_hu int upa;
32748f606604Szijun_hu int nr_g0_units;
3275d4b95f80STejun Heo
327600ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
327700ae4064STejun Heo
32784ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3279fd1e8a1fSTejun Heo if (IS_ERR(ai))
3280fd1e8a1fSTejun Heo return PTR_ERR(ai);
3281fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1);
32828f606604Szijun_hu upa = ai->alloc_size/ai->unit_size;
32838f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa);
32840b59c25fSIgor Stoppa if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
32858f606604Szijun_hu pcpu_free_alloc_info(ai);
32868f606604Szijun_hu return -EINVAL;
32878f606604Szijun_hu }
3288fd1e8a1fSTejun Heo
3289fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT;
3290d4b95f80STejun Heo
3291d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */
3292fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3293fd1e8a1fSTejun Heo sizeof(pages[0]));
32947e1c4e27SMike Rapoport pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
3295f655f405SMike Rapoport if (!pages)
3296f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
3297f655f405SMike Rapoport pages_size);
3298d4b95f80STejun Heo
32998f05a6a6STejun Heo /* allocate pages */
3300d4b95f80STejun Heo j = 0;
33018f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) {
3302fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit];
33038f606604Szijun_hu for (i = 0; i < unit_pages; i++) {
3304d4b95f80STejun Heo void *ptr;
3305d4b95f80STejun Heo
330623f91716SKefeng Wang ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
3307d4b95f80STejun Heo if (!ptr) {
3308870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n",
3309598d8091SJoe Perches psize_str, cpu);
3310d4b95f80STejun Heo goto enomem;
3311d4b95f80STejun Heo }
3312f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */
3313a317ebccSPatrick Wang kmemleak_ignore_phys(__pa(ptr));
3314ce3141a2STejun Heo pages[j++] = virt_to_page(ptr);
3315d4b95f80STejun Heo }
33168f606604Szijun_hu }
3317d4b95f80STejun Heo
33188f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */
33198f05a6a6STejun Heo vm.flags = VM_ALLOC;
3320fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size;
33218f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE);
33228f05a6a6STejun Heo
3323fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) {
33241d9d3257STejun Heo unsigned long unit_addr =
3325fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size;
33268f05a6a6STejun Heo
3327ce3141a2STejun Heo for (i = 0; i < unit_pages; i++)
332820c03576SKefeng Wang pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
33298f05a6a6STejun Heo
33308f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */
3331fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3332ce3141a2STejun Heo unit_pages);
3333fb435d52STejun Heo if (rc < 0)
3334fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc);
33358f05a6a6STejun Heo
3336*7a92fc8bSAlexandre Ghiti flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
33378f05a6a6STejun Heo
33388f05a6a6STejun Heo /* copy static data */
3339fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
334066c3a757STejun Heo }
334166c3a757STejun Heo
334266c3a757STejun Heo /* we're ready, commit */
334300206a69SMatteo Croce pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
334400206a69SMatteo Croce unit_pages, psize_str, ai->static_size,
3345fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size);
334666c3a757STejun Heo
3347163fa234SKefeng Wang pcpu_setup_first_chunk(ai, vm.addr);
3348d4b95f80STejun Heo goto out_free_ar;
3349d4b95f80STejun Heo
3350d4b95f80STejun Heo enomem:
3351d4b95f80STejun Heo while (--j >= 0)
335223f91716SKefeng Wang pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
3353fb435d52STejun Heo rc = -ENOMEM;
3354d4b95f80STejun Heo out_free_ar:
33554421cca0SMike Rapoport memblock_free(pages, pages_size);
3356fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai);
3357fb435d52STejun Heo return rc;
335866c3a757STejun Heo }
33593c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */
3360d4b95f80STejun Heo
3361bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
33628c4bfc6eSTejun Heo /*
3363bbddff05STejun Heo * Generic SMP percpu area setup.
3364e74e3962STejun Heo *
3365e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles
3366e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is
3367e74e3962STejun Heo * important because many archs have addressing restrictions and might
3368e74e3962STejun Heo * fail if the percpu area is located far away from the previous
3369e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is
3370e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back
3371e74e3962STejun Heo * on the physical linear memory mapping which uses large page
3372e74e3962STejun Heo * mappings on applicable archs.
3373e74e3962STejun Heo */
3374e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3375e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset);
3376e74e3962STejun Heo
setup_per_cpu_areas(void)3377e74e3962STejun Heo void __init setup_per_cpu_areas(void)
3378e74e3962STejun Heo {
3379e74e3962STejun Heo unsigned long delta;
3380e74e3962STejun Heo unsigned int cpu;
3381fb435d52STejun Heo int rc;
3382e74e3962STejun Heo
3383e74e3962STejun Heo /*
3384e74e3962STejun Heo * Always reserve area for module percpu variables. That's
3385e74e3962STejun Heo * what the legacy allocator did.
3386e74e3962STejun Heo */
338723f91716SKefeng Wang rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE,
338823f91716SKefeng Wang PAGE_SIZE, NULL, NULL);
3389fb435d52STejun Heo if (rc < 0)
3390bbddff05STejun Heo panic("Failed to initialize percpu areas.");
3391e74e3962STejun Heo
3392e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3393e74e3962STejun Heo for_each_possible_cpu(cpu)
3394fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3395e74e3962STejun Heo }
3396e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3397099a19d9STejun Heo
3398bbddff05STejun Heo #else /* CONFIG_SMP */
3399bbddff05STejun Heo
3400bbddff05STejun Heo /*
3401bbddff05STejun Heo * UP percpu area setup.
3402bbddff05STejun Heo *
3403bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping.
3404bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static
3405bbddff05STejun Heo * variables and don't require any special preparation.
3406bbddff05STejun Heo */
setup_per_cpu_areas(void)3407bbddff05STejun Heo void __init setup_per_cpu_areas(void)
3408bbddff05STejun Heo {
3409bbddff05STejun Heo const size_t unit_size =
3410bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3411bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE));
3412bbddff05STejun Heo struct pcpu_alloc_info *ai;
3413bbddff05STejun Heo void *fc;
3414bbddff05STejun Heo
3415bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1);
341626fb3daeSMike Rapoport fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3417bbddff05STejun Heo if (!ai || !fc)
3418bbddff05STejun Heo panic("Failed to allocate memory for percpu areas.");
3419100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */
3420a317ebccSPatrick Wang kmemleak_ignore_phys(__pa(fc));
3421bbddff05STejun Heo
3422bbddff05STejun Heo ai->dyn_size = unit_size;
3423bbddff05STejun Heo ai->unit_size = unit_size;
3424bbddff05STejun Heo ai->atom_size = unit_size;
3425bbddff05STejun Heo ai->alloc_size = unit_size;
3426bbddff05STejun Heo ai->groups[0].nr_units = 1;
3427bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0;
3428bbddff05STejun Heo
3429163fa234SKefeng Wang pcpu_setup_first_chunk(ai, fc);
3430438a5061SNicolas Pitre pcpu_free_alloc_info(ai);
3431bbddff05STejun Heo }
3432bbddff05STejun Heo
3433bbddff05STejun Heo #endif /* CONFIG_SMP */
3434bbddff05STejun Heo
3435099a19d9STejun Heo /*
34367e8a6304SDennis Zhou (Facebook) * pcpu_nr_pages - calculate total number of populated backing pages
34377e8a6304SDennis Zhou (Facebook) *
34387e8a6304SDennis Zhou (Facebook) * This reflects the number of pages populated to back chunks. Metadata is
34397e8a6304SDennis Zhou (Facebook) * excluded in the number exposed in meminfo as the number of backing pages
34407e8a6304SDennis Zhou (Facebook) * scales with the number of cpus and can quickly outweigh the memory used for
34417e8a6304SDennis Zhou (Facebook) * metadata. It also keeps this calculation nice and simple.
34427e8a6304SDennis Zhou (Facebook) *
34437e8a6304SDennis Zhou (Facebook) * RETURNS:
34447e8a6304SDennis Zhou (Facebook) * Total number of populated backing pages in use by the allocator.
34457e8a6304SDennis Zhou (Facebook) */
pcpu_nr_pages(void)34467e8a6304SDennis Zhou (Facebook) unsigned long pcpu_nr_pages(void)
34477e8a6304SDennis Zhou (Facebook) {
34487e8a6304SDennis Zhou (Facebook) return pcpu_nr_populated * pcpu_nr_units;
34497e8a6304SDennis Zhou (Facebook) }
34507e8a6304SDennis Zhou (Facebook)
34517e8a6304SDennis Zhou (Facebook) /*
34521a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or
34531a4d7607STejun Heo * workqueue is available. Plug async management until everything is up
34541a4d7607STejun Heo * and running.
34551a4d7607STejun Heo */
percpu_enable_async(void)34561a4d7607STejun Heo static int __init percpu_enable_async(void)
34571a4d7607STejun Heo {
34581a4d7607STejun Heo pcpu_async_enabled = true;
34591a4d7607STejun Heo return 0;
34601a4d7607STejun Heo }
34611a4d7607STejun Heo subsys_initcall(percpu_enable_async);
3462