xref: /linux/mm/percpu.c (revision f0953a1bbaca71e1ebbcb9864eb1b273156157ed)
155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2fbf59bc9STejun Heo /*
388999a89STejun Heo  * mm/percpu.c - percpu memory allocator
4fbf59bc9STejun Heo  *
5fbf59bc9STejun Heo  * Copyright (C) 2009		SUSE Linux Products GmbH
6fbf59bc9STejun Heo  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
7fbf59bc9STejun Heo  *
85e81ee3eSDennis Zhou (Facebook)  * Copyright (C) 2017		Facebook Inc.
9bfacd38fSDennis Zhou  * Copyright (C) 2017		Dennis Zhou <dennis@kernel.org>
105e81ee3eSDennis Zhou (Facebook)  *
119c015162SDennis Zhou (Facebook)  * The percpu allocator handles both static and dynamic areas.  Percpu
129c015162SDennis Zhou (Facebook)  * areas are allocated in chunks which are divided into units.  There is
139c015162SDennis Zhou (Facebook)  * a 1-to-1 mapping for units to possible cpus.  These units are grouped
149c015162SDennis Zhou (Facebook)  * based on NUMA properties of the machine.
15fbf59bc9STejun Heo  *
16fbf59bc9STejun Heo  *  c0                           c1                         c2
17fbf59bc9STejun Heo  *  -------------------          -------------------        ------------
18fbf59bc9STejun Heo  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
19fbf59bc9STejun Heo  *  -------------------  ......  -------------------  ....  ------------
20fbf59bc9STejun Heo  *
219c015162SDennis Zhou (Facebook)  * Allocation is done by offsets into a unit's address space.  Ie., an
229c015162SDennis Zhou (Facebook)  * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
239c015162SDennis Zhou (Facebook)  * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
249c015162SDennis Zhou (Facebook)  * and even sparse.  Access is handled by configuring percpu base
259c015162SDennis Zhou (Facebook)  * registers according to the cpu to unit mappings and offsetting the
269c015162SDennis Zhou (Facebook)  * base address using pcpu_unit_size.
27fbf59bc9STejun Heo  *
289c015162SDennis Zhou (Facebook)  * There is special consideration for the first chunk which must handle
299c015162SDennis Zhou (Facebook)  * the static percpu variables in the kernel image as allocation services
305e81ee3eSDennis Zhou (Facebook)  * are not online yet.  In short, the first chunk is structured like so:
319c015162SDennis Zhou (Facebook)  *
329c015162SDennis Zhou (Facebook)  *                  <Static | [Reserved] | Dynamic>
339c015162SDennis Zhou (Facebook)  *
349c015162SDennis Zhou (Facebook)  * The static data is copied from the original section managed by the
359c015162SDennis Zhou (Facebook)  * linker.  The reserved section, if non-zero, primarily manages static
369c015162SDennis Zhou (Facebook)  * percpu variables from kernel modules.  Finally, the dynamic section
379c015162SDennis Zhou (Facebook)  * takes care of normal allocations.
38fbf59bc9STejun Heo  *
395e81ee3eSDennis Zhou (Facebook)  * The allocator organizes chunks into lists according to free size and
403c7be18aSRoman Gushchin  * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
413c7be18aSRoman Gushchin  * flag should be passed.  All memcg-aware allocations are sharing one set
423c7be18aSRoman Gushchin  * of chunks and all unaccounted allocations and allocations performed
433c7be18aSRoman Gushchin  * by processes belonging to the root memory cgroup are using the second set.
443c7be18aSRoman Gushchin  *
453c7be18aSRoman Gushchin  * The allocator tries to allocate from the fullest chunk first. Each chunk
463c7be18aSRoman Gushchin  * is managed by a bitmap with metadata blocks.  The allocation map is updated
473c7be18aSRoman Gushchin  * on every allocation and free to reflect the current state while the boundary
485e81ee3eSDennis Zhou (Facebook)  * map is only updated on allocation.  Each metadata block contains
495e81ee3eSDennis Zhou (Facebook)  * information to help mitigate the need to iterate over large portions
505e81ee3eSDennis Zhou (Facebook)  * of the bitmap.  The reverse mapping from page to chunk is stored in
515e81ee3eSDennis Zhou (Facebook)  * the page's index.  Lastly, units are lazily backed and grow in unison.
52fbf59bc9STejun Heo  *
535e81ee3eSDennis Zhou (Facebook)  * There is a unique conversion that goes on here between bytes and bits.
545e81ee3eSDennis Zhou (Facebook)  * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
555e81ee3eSDennis Zhou (Facebook)  * tracks the number of pages it is responsible for in nr_pages.  Helper
565e81ee3eSDennis Zhou (Facebook)  * functions are used to convert from between the bytes, bits, and blocks.
575e81ee3eSDennis Zhou (Facebook)  * All hints are managed in bits unless explicitly stated.
589c015162SDennis Zhou (Facebook)  *
594091fb95SMasahiro Yamada  * To use this allocator, arch code should do the following:
60fbf59bc9STejun Heo  *
61fbf59bc9STejun Heo  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62e0100983STejun Heo  *   regular address to percpu pointer and back if they need to be
63e0100983STejun Heo  *   different from the default
64fbf59bc9STejun Heo  *
658d408b4bSTejun Heo  * - use pcpu_setup_first_chunk() during percpu area initialization to
668d408b4bSTejun Heo  *   setup the first chunk containing the kernel static percpu area
67fbf59bc9STejun Heo  */
68fbf59bc9STejun Heo 
69870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70870d4b12SJoe Perches 
71fbf59bc9STejun Heo #include <linux/bitmap.h>
72d7d29ac7SWonhyuk Yang #include <linux/cpumask.h>
7357c8a661SMike Rapoport #include <linux/memblock.h>
74fd1e8a1fSTejun Heo #include <linux/err.h>
75ca460b3cSDennis Zhou (Facebook) #include <linux/lcm.h>
76fbf59bc9STejun Heo #include <linux/list.h>
77a530b795STejun Heo #include <linux/log2.h>
78fbf59bc9STejun Heo #include <linux/mm.h>
79fbf59bc9STejun Heo #include <linux/module.h>
80fbf59bc9STejun Heo #include <linux/mutex.h>
81fbf59bc9STejun Heo #include <linux/percpu.h>
82fbf59bc9STejun Heo #include <linux/pfn.h>
83fbf59bc9STejun Heo #include <linux/slab.h>
84ccea34b5STejun Heo #include <linux/spinlock.h>
85fbf59bc9STejun Heo #include <linux/vmalloc.h>
86a56dbddfSTejun Heo #include <linux/workqueue.h>
87f528f0b8SCatalin Marinas #include <linux/kmemleak.h>
8871546d10STejun Heo #include <linux/sched.h>
8928307d93SFilipe Manana #include <linux/sched/mm.h>
903c7be18aSRoman Gushchin #include <linux/memcontrol.h>
91fbf59bc9STejun Heo 
92fbf59bc9STejun Heo #include <asm/cacheflush.h>
93e0100983STejun Heo #include <asm/sections.h>
94fbf59bc9STejun Heo #include <asm/tlbflush.h>
953b034b0dSVivek Goyal #include <asm/io.h>
96fbf59bc9STejun Heo 
97df95e795SDennis Zhou #define CREATE_TRACE_POINTS
98df95e795SDennis Zhou #include <trace/events/percpu.h>
99df95e795SDennis Zhou 
1008fa3ed80SDennis Zhou #include "percpu-internal.h"
1018fa3ed80SDennis Zhou 
10240064aecSDennis Zhou (Facebook) /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
10340064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT		5
1048744d859SDennis Zhou /* chunks in slots below this are subject to being sidelined on failed alloc */
1058744d859SDennis Zhou #define PCPU_SLOT_FAIL_THRESHOLD	3
10640064aecSDennis Zhou (Facebook) 
1071a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW	2
1081a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH	4
109fbf59bc9STejun Heo 
110bbddff05STejun Heo #ifdef CONFIG_SMP
111e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
112e0100983STejun Heo #ifndef __addr_to_pcpu_ptr
113e0100983STejun Heo #define __addr_to_pcpu_ptr(addr)					\
11443cf38ebSTejun Heo 	(void __percpu *)((unsigned long)(addr) -			\
11543cf38ebSTejun Heo 			  (unsigned long)pcpu_base_addr	+		\
11643cf38ebSTejun Heo 			  (unsigned long)__per_cpu_start)
117e0100983STejun Heo #endif
118e0100983STejun Heo #ifndef __pcpu_ptr_to_addr
119e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr)						\
12043cf38ebSTejun Heo 	(void __force *)((unsigned long)(ptr) +				\
12143cf38ebSTejun Heo 			 (unsigned long)pcpu_base_addr -		\
12243cf38ebSTejun Heo 			 (unsigned long)__per_cpu_start)
123e0100983STejun Heo #endif
124bbddff05STejun Heo #else	/* CONFIG_SMP */
125bbddff05STejun Heo /* on UP, it's always identity mapped */
126bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
127bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
128bbddff05STejun Heo #endif	/* CONFIG_SMP */
129e0100983STejun Heo 
1301328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init;
1311328710bSDaniel Micay static int pcpu_unit_size __ro_after_init;
1321328710bSDaniel Micay static int pcpu_nr_units __ro_after_init;
1331328710bSDaniel Micay static int pcpu_atom_size __ro_after_init;
1348fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init;
1351328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init;
136fbf59bc9STejun Heo 
137a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */
1381328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init;
1391328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init;
1402f39e637STejun Heo 
141fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */
1421328710bSDaniel Micay void *pcpu_base_addr __ro_after_init;
143fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr);
144fbf59bc9STejun Heo 
1451328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
1461328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
1472f39e637STejun Heo 
1486563297cSTejun Heo /* group information, used for vm allocation */
1491328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init;
1501328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init;
1511328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init;
1526563297cSTejun Heo 
153ae9e6bc9STejun Heo /*
154ae9e6bc9STejun Heo  * The first chunk which always exists.  Note that unlike other
155ae9e6bc9STejun Heo  * chunks, this one can be allocated and mapped in several different
156ae9e6bc9STejun Heo  * ways and thus often doesn't live in the vmalloc area.
157ae9e6bc9STejun Heo  */
1588fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
159ae9e6bc9STejun Heo 
160ae9e6bc9STejun Heo /*
161ae9e6bc9STejun Heo  * Optional reserved chunk.  This chunk reserves part of the first
162e2266705SDennis Zhou (Facebook)  * chunk and serves it for reserved allocations.  When the reserved
163e2266705SDennis Zhou (Facebook)  * region doesn't exist, the following variable is NULL.
164ae9e6bc9STejun Heo  */
1658fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
166edcb4639STejun Heo 
1678fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
1686710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
169fbf59bc9STejun Heo 
1703c7be18aSRoman Gushchin struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
171fbf59bc9STejun Heo 
1724f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */
1734f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks);
1744f996e23STejun Heo 
175b539b87fSTejun Heo /*
1760760fa3dSRoman Gushchin  * The number of empty populated pages by chunk type, protected by pcpu_lock.
1770760fa3dSRoman Gushchin  * The reserved chunk doesn't contribute to the count.
178b539b87fSTejun Heo  */
1790760fa3dSRoman Gushchin int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES];
180b539b87fSTejun Heo 
1811a4d7607STejun Heo /*
1827e8a6304SDennis Zhou (Facebook)  * The number of populated pages in use by the allocator, protected by
1837e8a6304SDennis Zhou (Facebook)  * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
1847e8a6304SDennis Zhou (Facebook)  * allocated/deallocated, it is allocated/deallocated in all units of a chunk
1857e8a6304SDennis Zhou (Facebook)  * and increments/decrements this count by 1).
1867e8a6304SDennis Zhou (Facebook)  */
1877e8a6304SDennis Zhou (Facebook) static unsigned long pcpu_nr_populated;
1887e8a6304SDennis Zhou (Facebook) 
1897e8a6304SDennis Zhou (Facebook) /*
1901a4d7607STejun Heo  * Balance work is used to populate or destroy chunks asynchronously.  We
1911a4d7607STejun Heo  * try to keep the number of populated free pages between
1921a4d7607STejun Heo  * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
1931a4d7607STejun Heo  * empty chunk.
1941a4d7607STejun Heo  */
195fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work);
196fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
1971a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly;
1981a4d7607STejun Heo static bool pcpu_atomic_alloc_failed;
1991a4d7607STejun Heo 
2001a4d7607STejun Heo static void pcpu_schedule_balance_work(void)
2011a4d7607STejun Heo {
2021a4d7607STejun Heo 	if (pcpu_async_enabled)
2031a4d7607STejun Heo 		schedule_work(&pcpu_balance_work);
2041a4d7607STejun Heo }
205a56dbddfSTejun Heo 
206c0ebfdc3SDennis Zhou (Facebook) /**
207560f2c23SDennis Zhou (Facebook)  * pcpu_addr_in_chunk - check if the address is served from this chunk
208560f2c23SDennis Zhou (Facebook)  * @chunk: chunk of interest
209560f2c23SDennis Zhou (Facebook)  * @addr: percpu address
210c0ebfdc3SDennis Zhou (Facebook)  *
211c0ebfdc3SDennis Zhou (Facebook)  * RETURNS:
212560f2c23SDennis Zhou (Facebook)  * True if the address is served from this chunk.
213c0ebfdc3SDennis Zhou (Facebook)  */
214560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
215020ec653STejun Heo {
216c0ebfdc3SDennis Zhou (Facebook) 	void *start_addr, *end_addr;
217020ec653STejun Heo 
218560f2c23SDennis Zhou (Facebook) 	if (!chunk)
219c0ebfdc3SDennis Zhou (Facebook) 		return false;
220c0ebfdc3SDennis Zhou (Facebook) 
221560f2c23SDennis Zhou (Facebook) 	start_addr = chunk->base_addr + chunk->start_offset;
222560f2c23SDennis Zhou (Facebook) 	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
223560f2c23SDennis Zhou (Facebook) 		   chunk->end_offset;
224c0ebfdc3SDennis Zhou (Facebook) 
225c0ebfdc3SDennis Zhou (Facebook) 	return addr >= start_addr && addr < end_addr;
226020ec653STejun Heo }
227020ec653STejun Heo 
228d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size)
229fbf59bc9STejun Heo {
230cae3aeb8STejun Heo 	int highbit = fls(size);	/* size is in bytes */
231fbf59bc9STejun Heo 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
232fbf59bc9STejun Heo }
233fbf59bc9STejun Heo 
234d9b55eebSTejun Heo static int pcpu_size_to_slot(int size)
235d9b55eebSTejun Heo {
236d9b55eebSTejun Heo 	if (size == pcpu_unit_size)
237d9b55eebSTejun Heo 		return pcpu_nr_slots - 1;
238d9b55eebSTejun Heo 	return __pcpu_size_to_slot(size);
239d9b55eebSTejun Heo }
240d9b55eebSTejun Heo 
241fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
242fbf59bc9STejun Heo {
24392c14cabSDennis Zhou 	const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
24492c14cabSDennis Zhou 
24592c14cabSDennis Zhou 	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
24692c14cabSDennis Zhou 	    chunk_md->contig_hint == 0)
247fbf59bc9STejun Heo 		return 0;
248fbf59bc9STejun Heo 
24992c14cabSDennis Zhou 	return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
250fbf59bc9STejun Heo }
251fbf59bc9STejun Heo 
25288999a89STejun Heo /* set the pointer to a chunk in a page struct */
25388999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
25488999a89STejun Heo {
25588999a89STejun Heo 	page->index = (unsigned long)pcpu;
25688999a89STejun Heo }
25788999a89STejun Heo 
25888999a89STejun Heo /* obtain pointer to a chunk from a page struct */
25988999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
26088999a89STejun Heo {
26188999a89STejun Heo 	return (struct pcpu_chunk *)page->index;
26288999a89STejun Heo }
26388999a89STejun Heo 
26488999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
265fbf59bc9STejun Heo {
2662f39e637STejun Heo 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
267fbf59bc9STejun Heo }
268fbf59bc9STejun Heo 
269c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
270c0ebfdc3SDennis Zhou (Facebook) {
271c0ebfdc3SDennis Zhou (Facebook) 	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
272c0ebfdc3SDennis Zhou (Facebook) }
273c0ebfdc3SDennis Zhou (Facebook) 
2749983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
275fbf59bc9STejun Heo 				     unsigned int cpu, int page_idx)
276fbf59bc9STejun Heo {
277c0ebfdc3SDennis Zhou (Facebook) 	return (unsigned long)chunk->base_addr +
278c0ebfdc3SDennis Zhou (Facebook) 	       pcpu_unit_page_offset(cpu, page_idx);
279fbf59bc9STejun Heo }
280fbf59bc9STejun Heo 
281ca460b3cSDennis Zhou (Facebook) /*
282ca460b3cSDennis Zhou (Facebook)  * The following are helper functions to help access bitmaps and convert
283ca460b3cSDennis Zhou (Facebook)  * between bitmap offsets to address offsets.
284ca460b3cSDennis Zhou (Facebook)  */
285ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
286ca460b3cSDennis Zhou (Facebook) {
287ca460b3cSDennis Zhou (Facebook) 	return chunk->alloc_map +
288ca460b3cSDennis Zhou (Facebook) 	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
289ca460b3cSDennis Zhou (Facebook) }
290ca460b3cSDennis Zhou (Facebook) 
291ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off)
292ca460b3cSDennis Zhou (Facebook) {
293ca460b3cSDennis Zhou (Facebook) 	return off / PCPU_BITMAP_BLOCK_BITS;
294ca460b3cSDennis Zhou (Facebook) }
295ca460b3cSDennis Zhou (Facebook) 
296ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off)
297ca460b3cSDennis Zhou (Facebook) {
298ca460b3cSDennis Zhou (Facebook) 	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
299ca460b3cSDennis Zhou (Facebook) }
300ca460b3cSDennis Zhou (Facebook) 
301b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off)
302b185cd0dSDennis Zhou (Facebook) {
303b185cd0dSDennis Zhou (Facebook) 	return index * PCPU_BITMAP_BLOCK_BITS + off;
304b185cd0dSDennis Zhou (Facebook) }
305b185cd0dSDennis Zhou (Facebook) 
306382b88e9SDennis Zhou /*
307382b88e9SDennis Zhou  * pcpu_next_hint - determine which hint to use
308382b88e9SDennis Zhou  * @block: block of interest
309382b88e9SDennis Zhou  * @alloc_bits: size of allocation
310382b88e9SDennis Zhou  *
311382b88e9SDennis Zhou  * This determines if we should scan based on the scan_hint or first_free.
312382b88e9SDennis Zhou  * In general, we want to scan from first_free to fulfill allocations by
313382b88e9SDennis Zhou  * first fit.  However, if we know a scan_hint at position scan_hint_start
314382b88e9SDennis Zhou  * cannot fulfill an allocation, we can begin scanning from there knowing
315382b88e9SDennis Zhou  * the contig_hint will be our fallback.
316382b88e9SDennis Zhou  */
317382b88e9SDennis Zhou static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
318382b88e9SDennis Zhou {
319382b88e9SDennis Zhou 	/*
320382b88e9SDennis Zhou 	 * The three conditions below determine if we can skip past the
321382b88e9SDennis Zhou 	 * scan_hint.  First, does the scan hint exist.  Second, is the
322382b88e9SDennis Zhou 	 * contig_hint after the scan_hint (possibly not true iff
323382b88e9SDennis Zhou 	 * contig_hint == scan_hint).  Third, is the allocation request
324382b88e9SDennis Zhou 	 * larger than the scan_hint.
325382b88e9SDennis Zhou 	 */
326382b88e9SDennis Zhou 	if (block->scan_hint &&
327382b88e9SDennis Zhou 	    block->contig_hint_start > block->scan_hint_start &&
328382b88e9SDennis Zhou 	    alloc_bits > block->scan_hint)
329382b88e9SDennis Zhou 		return block->scan_hint_start + block->scan_hint;
330382b88e9SDennis Zhou 
331382b88e9SDennis Zhou 	return block->first_free;
332382b88e9SDennis Zhou }
333382b88e9SDennis Zhou 
334fbf59bc9STejun Heo /**
335525ca84dSDennis Zhou (Facebook)  * pcpu_next_md_free_region - finds the next hint free area
336525ca84dSDennis Zhou (Facebook)  * @chunk: chunk of interest
337525ca84dSDennis Zhou (Facebook)  * @bit_off: chunk offset
338525ca84dSDennis Zhou (Facebook)  * @bits: size of free area
339525ca84dSDennis Zhou (Facebook)  *
340525ca84dSDennis Zhou (Facebook)  * Helper function for pcpu_for_each_md_free_region.  It checks
341525ca84dSDennis Zhou (Facebook)  * block->contig_hint and performs aggregation across blocks to find the
342525ca84dSDennis Zhou (Facebook)  * next hint.  It modifies bit_off and bits in-place to be consumed in the
343525ca84dSDennis Zhou (Facebook)  * loop.
344525ca84dSDennis Zhou (Facebook)  */
345525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
346525ca84dSDennis Zhou (Facebook) 				     int *bits)
347525ca84dSDennis Zhou (Facebook) {
348525ca84dSDennis Zhou (Facebook) 	int i = pcpu_off_to_block_index(*bit_off);
349525ca84dSDennis Zhou (Facebook) 	int block_off = pcpu_off_to_block_off(*bit_off);
350525ca84dSDennis Zhou (Facebook) 	struct pcpu_block_md *block;
351525ca84dSDennis Zhou (Facebook) 
352525ca84dSDennis Zhou (Facebook) 	*bits = 0;
353525ca84dSDennis Zhou (Facebook) 	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
354525ca84dSDennis Zhou (Facebook) 	     block++, i++) {
355525ca84dSDennis Zhou (Facebook) 		/* handles contig area across blocks */
356525ca84dSDennis Zhou (Facebook) 		if (*bits) {
357525ca84dSDennis Zhou (Facebook) 			*bits += block->left_free;
358525ca84dSDennis Zhou (Facebook) 			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
359525ca84dSDennis Zhou (Facebook) 				continue;
360525ca84dSDennis Zhou (Facebook) 			return;
361525ca84dSDennis Zhou (Facebook) 		}
362525ca84dSDennis Zhou (Facebook) 
363525ca84dSDennis Zhou (Facebook) 		/*
364525ca84dSDennis Zhou (Facebook) 		 * This checks three things.  First is there a contig_hint to
365525ca84dSDennis Zhou (Facebook) 		 * check.  Second, have we checked this hint before by
366525ca84dSDennis Zhou (Facebook) 		 * comparing the block_off.  Third, is this the same as the
367525ca84dSDennis Zhou (Facebook) 		 * right contig hint.  In the last case, it spills over into
368525ca84dSDennis Zhou (Facebook) 		 * the next block and should be handled by the contig area
369525ca84dSDennis Zhou (Facebook) 		 * across blocks code.
370525ca84dSDennis Zhou (Facebook) 		 */
371525ca84dSDennis Zhou (Facebook) 		*bits = block->contig_hint;
372525ca84dSDennis Zhou (Facebook) 		if (*bits && block->contig_hint_start >= block_off &&
373525ca84dSDennis Zhou (Facebook) 		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
374525ca84dSDennis Zhou (Facebook) 			*bit_off = pcpu_block_off_to_off(i,
375525ca84dSDennis Zhou (Facebook) 					block->contig_hint_start);
376525ca84dSDennis Zhou (Facebook) 			return;
377525ca84dSDennis Zhou (Facebook) 		}
3781fa4df3eSDennis Zhou 		/* reset to satisfy the second predicate above */
3791fa4df3eSDennis Zhou 		block_off = 0;
380525ca84dSDennis Zhou (Facebook) 
381525ca84dSDennis Zhou (Facebook) 		*bits = block->right_free;
382525ca84dSDennis Zhou (Facebook) 		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
383525ca84dSDennis Zhou (Facebook) 	}
384525ca84dSDennis Zhou (Facebook) }
385525ca84dSDennis Zhou (Facebook) 
386b4c2116cSDennis Zhou (Facebook) /**
387b4c2116cSDennis Zhou (Facebook)  * pcpu_next_fit_region - finds fit areas for a given allocation request
388b4c2116cSDennis Zhou (Facebook)  * @chunk: chunk of interest
389b4c2116cSDennis Zhou (Facebook)  * @alloc_bits: size of allocation
390b4c2116cSDennis Zhou (Facebook)  * @align: alignment of area (max PAGE_SIZE)
391b4c2116cSDennis Zhou (Facebook)  * @bit_off: chunk offset
392b4c2116cSDennis Zhou (Facebook)  * @bits: size of free area
393b4c2116cSDennis Zhou (Facebook)  *
394b4c2116cSDennis Zhou (Facebook)  * Finds the next free region that is viable for use with a given size and
395b4c2116cSDennis Zhou (Facebook)  * alignment.  This only returns if there is a valid area to be used for this
396b4c2116cSDennis Zhou (Facebook)  * allocation.  block->first_free is returned if the allocation request fits
397b4c2116cSDennis Zhou (Facebook)  * within the block to see if the request can be fulfilled prior to the contig
398b4c2116cSDennis Zhou (Facebook)  * hint.
399b4c2116cSDennis Zhou (Facebook)  */
400b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
401b4c2116cSDennis Zhou (Facebook) 				 int align, int *bit_off, int *bits)
402b4c2116cSDennis Zhou (Facebook) {
403b4c2116cSDennis Zhou (Facebook) 	int i = pcpu_off_to_block_index(*bit_off);
404b4c2116cSDennis Zhou (Facebook) 	int block_off = pcpu_off_to_block_off(*bit_off);
405b4c2116cSDennis Zhou (Facebook) 	struct pcpu_block_md *block;
406b4c2116cSDennis Zhou (Facebook) 
407b4c2116cSDennis Zhou (Facebook) 	*bits = 0;
408b4c2116cSDennis Zhou (Facebook) 	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
409b4c2116cSDennis Zhou (Facebook) 	     block++, i++) {
410b4c2116cSDennis Zhou (Facebook) 		/* handles contig area across blocks */
411b4c2116cSDennis Zhou (Facebook) 		if (*bits) {
412b4c2116cSDennis Zhou (Facebook) 			*bits += block->left_free;
413b4c2116cSDennis Zhou (Facebook) 			if (*bits >= alloc_bits)
414b4c2116cSDennis Zhou (Facebook) 				return;
415b4c2116cSDennis Zhou (Facebook) 			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
416b4c2116cSDennis Zhou (Facebook) 				continue;
417b4c2116cSDennis Zhou (Facebook) 		}
418b4c2116cSDennis Zhou (Facebook) 
419b4c2116cSDennis Zhou (Facebook) 		/* check block->contig_hint */
420b4c2116cSDennis Zhou (Facebook) 		*bits = ALIGN(block->contig_hint_start, align) -
421b4c2116cSDennis Zhou (Facebook) 			block->contig_hint_start;
422b4c2116cSDennis Zhou (Facebook) 		/*
423b4c2116cSDennis Zhou (Facebook) 		 * This uses the block offset to determine if this has been
424b4c2116cSDennis Zhou (Facebook) 		 * checked in the prior iteration.
425b4c2116cSDennis Zhou (Facebook) 		 */
426b4c2116cSDennis Zhou (Facebook) 		if (block->contig_hint &&
427b4c2116cSDennis Zhou (Facebook) 		    block->contig_hint_start >= block_off &&
428b4c2116cSDennis Zhou (Facebook) 		    block->contig_hint >= *bits + alloc_bits) {
429382b88e9SDennis Zhou 			int start = pcpu_next_hint(block, alloc_bits);
430382b88e9SDennis Zhou 
431b4c2116cSDennis Zhou (Facebook) 			*bits += alloc_bits + block->contig_hint_start -
432382b88e9SDennis Zhou 				 start;
433382b88e9SDennis Zhou 			*bit_off = pcpu_block_off_to_off(i, start);
434b4c2116cSDennis Zhou (Facebook) 			return;
435b4c2116cSDennis Zhou (Facebook) 		}
4361fa4df3eSDennis Zhou 		/* reset to satisfy the second predicate above */
4371fa4df3eSDennis Zhou 		block_off = 0;
438b4c2116cSDennis Zhou (Facebook) 
439b4c2116cSDennis Zhou (Facebook) 		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
440b4c2116cSDennis Zhou (Facebook) 				 align);
441b4c2116cSDennis Zhou (Facebook) 		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
442b4c2116cSDennis Zhou (Facebook) 		*bit_off = pcpu_block_off_to_off(i, *bit_off);
443b4c2116cSDennis Zhou (Facebook) 		if (*bits >= alloc_bits)
444b4c2116cSDennis Zhou (Facebook) 			return;
445b4c2116cSDennis Zhou (Facebook) 	}
446b4c2116cSDennis Zhou (Facebook) 
447b4c2116cSDennis Zhou (Facebook) 	/* no valid offsets were found - fail condition */
448b4c2116cSDennis Zhou (Facebook) 	*bit_off = pcpu_chunk_map_bits(chunk);
449b4c2116cSDennis Zhou (Facebook) }
450b4c2116cSDennis Zhou (Facebook) 
451525ca84dSDennis Zhou (Facebook) /*
452525ca84dSDennis Zhou (Facebook)  * Metadata free area iterators.  These perform aggregation of free areas
453525ca84dSDennis Zhou (Facebook)  * based on the metadata blocks and return the offset @bit_off and size in
454b4c2116cSDennis Zhou (Facebook)  * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
455b4c2116cSDennis Zhou (Facebook)  * a fit is found for the allocation request.
456525ca84dSDennis Zhou (Facebook)  */
457525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
458525ca84dSDennis Zhou (Facebook) 	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
459525ca84dSDennis Zhou (Facebook) 	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
460525ca84dSDennis Zhou (Facebook) 	     (bit_off) += (bits) + 1,					\
461525ca84dSDennis Zhou (Facebook) 	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
462525ca84dSDennis Zhou (Facebook) 
463b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
464b4c2116cSDennis Zhou (Facebook) 	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
465b4c2116cSDennis Zhou (Facebook) 				  &(bits));				      \
466b4c2116cSDennis Zhou (Facebook) 	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
467b4c2116cSDennis Zhou (Facebook) 	     (bit_off) += (bits),					      \
468b4c2116cSDennis Zhou (Facebook) 	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
469b4c2116cSDennis Zhou (Facebook) 				  &(bits)))
470b4c2116cSDennis Zhou (Facebook) 
471525ca84dSDennis Zhou (Facebook) /**
47290459ce0SBob Liu  * pcpu_mem_zalloc - allocate memory
4731880d93bSTejun Heo  * @size: bytes to allocate
47447504ee0SDennis Zhou  * @gfp: allocation flags
475fbf59bc9STejun Heo  *
4761880d93bSTejun Heo  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
47747504ee0SDennis Zhou  * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
47847504ee0SDennis Zhou  * This is to facilitate passing through whitelisted flags.  The
47947504ee0SDennis Zhou  * returned memory is always zeroed.
480fbf59bc9STejun Heo  *
481fbf59bc9STejun Heo  * RETURNS:
4821880d93bSTejun Heo  * Pointer to the allocated area on success, NULL on failure.
483fbf59bc9STejun Heo  */
48447504ee0SDennis Zhou static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
485fbf59bc9STejun Heo {
486099a19d9STejun Heo 	if (WARN_ON_ONCE(!slab_is_available()))
487099a19d9STejun Heo 		return NULL;
488099a19d9STejun Heo 
489fbf59bc9STejun Heo 	if (size <= PAGE_SIZE)
490554fef1cSDennis Zhou 		return kzalloc(size, gfp);
4917af4c093SJesper Juhl 	else
49288dca4caSChristoph Hellwig 		return __vmalloc(size, gfp | __GFP_ZERO);
4931880d93bSTejun Heo }
494fbf59bc9STejun Heo 
4951880d93bSTejun Heo /**
4961880d93bSTejun Heo  * pcpu_mem_free - free memory
4971880d93bSTejun Heo  * @ptr: memory to free
4981880d93bSTejun Heo  *
49990459ce0SBob Liu  * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
5001880d93bSTejun Heo  */
5011d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr)
5021880d93bSTejun Heo {
5031d5cfdb0STetsuo Handa 	kvfree(ptr);
504fbf59bc9STejun Heo }
505fbf59bc9STejun Heo 
5068744d859SDennis Zhou static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
5078744d859SDennis Zhou 			      bool move_front)
5088744d859SDennis Zhou {
5098744d859SDennis Zhou 	if (chunk != pcpu_reserved_chunk) {
5103c7be18aSRoman Gushchin 		struct list_head *pcpu_slot;
5113c7be18aSRoman Gushchin 
5123c7be18aSRoman Gushchin 		pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
5138744d859SDennis Zhou 		if (move_front)
5148744d859SDennis Zhou 			list_move(&chunk->list, &pcpu_slot[slot]);
5158744d859SDennis Zhou 		else
5168744d859SDennis Zhou 			list_move_tail(&chunk->list, &pcpu_slot[slot]);
5178744d859SDennis Zhou 	}
5188744d859SDennis Zhou }
5198744d859SDennis Zhou 
5208744d859SDennis Zhou static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
5218744d859SDennis Zhou {
5228744d859SDennis Zhou 	__pcpu_chunk_move(chunk, slot, true);
5238744d859SDennis Zhou }
5248744d859SDennis Zhou 
525fbf59bc9STejun Heo /**
526fbf59bc9STejun Heo  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
527fbf59bc9STejun Heo  * @chunk: chunk of interest
528fbf59bc9STejun Heo  * @oslot: the previous slot it was on
529fbf59bc9STejun Heo  *
530fbf59bc9STejun Heo  * This function is called after an allocation or free changed @chunk.
531fbf59bc9STejun Heo  * New slot according to the changed state is determined and @chunk is
532edcb4639STejun Heo  * moved to the slot.  Note that the reserved chunk is never put on
533edcb4639STejun Heo  * chunk slots.
534ccea34b5STejun Heo  *
535ccea34b5STejun Heo  * CONTEXT:
536ccea34b5STejun Heo  * pcpu_lock.
537fbf59bc9STejun Heo  */
538fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
539fbf59bc9STejun Heo {
540fbf59bc9STejun Heo 	int nslot = pcpu_chunk_slot(chunk);
541fbf59bc9STejun Heo 
5428744d859SDennis Zhou 	if (oslot != nslot)
5438744d859SDennis Zhou 		__pcpu_chunk_move(chunk, nslot, oslot < nslot);
54440064aecSDennis Zhou (Facebook) }
54540064aecSDennis Zhou (Facebook) 
54640064aecSDennis Zhou (Facebook) /*
547b239f7daSDennis Zhou  * pcpu_update_empty_pages - update empty page counters
548b239f7daSDennis Zhou  * @chunk: chunk of interest
549b239f7daSDennis Zhou  * @nr: nr of empty pages
55040064aecSDennis Zhou (Facebook)  *
551b239f7daSDennis Zhou  * This is used to keep track of the empty pages now based on the premise
552b239f7daSDennis Zhou  * a md_block covers a page.  The hint update functions recognize if a block
553b239f7daSDennis Zhou  * is made full or broken to calculate deltas for keeping track of free pages.
55440064aecSDennis Zhou (Facebook)  */
555b239f7daSDennis Zhou static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
556b239f7daSDennis Zhou {
557b239f7daSDennis Zhou 	chunk->nr_empty_pop_pages += nr;
55840064aecSDennis Zhou (Facebook) 	if (chunk != pcpu_reserved_chunk)
5590760fa3dSRoman Gushchin 		pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr;
56040064aecSDennis Zhou (Facebook) }
56140064aecSDennis Zhou (Facebook) 
562d9f3a01eSDennis Zhou /*
563d9f3a01eSDennis Zhou  * pcpu_region_overlap - determines if two regions overlap
564d9f3a01eSDennis Zhou  * @a: start of first region, inclusive
565d9f3a01eSDennis Zhou  * @b: end of first region, exclusive
566d9f3a01eSDennis Zhou  * @x: start of second region, inclusive
567d9f3a01eSDennis Zhou  * @y: end of second region, exclusive
568d9f3a01eSDennis Zhou  *
569d9f3a01eSDennis Zhou  * This is used to determine if the hint region [a, b) overlaps with the
570d9f3a01eSDennis Zhou  * allocated region [x, y).
571d9f3a01eSDennis Zhou  */
572d9f3a01eSDennis Zhou static inline bool pcpu_region_overlap(int a, int b, int x, int y)
573d9f3a01eSDennis Zhou {
574d9f3a01eSDennis Zhou 	return (a < y) && (x < b);
57540064aecSDennis Zhou (Facebook) }
57640064aecSDennis Zhou (Facebook) 
57740064aecSDennis Zhou (Facebook) /**
578ca460b3cSDennis Zhou (Facebook)  * pcpu_block_update - updates a block given a free area
579ca460b3cSDennis Zhou (Facebook)  * @block: block of interest
580ca460b3cSDennis Zhou (Facebook)  * @start: start offset in block
581ca460b3cSDennis Zhou (Facebook)  * @end: end offset in block
582ca460b3cSDennis Zhou (Facebook)  *
583ca460b3cSDennis Zhou (Facebook)  * Updates a block given a known free area.  The region [start, end) is
584268625a6SDennis Zhou (Facebook)  * expected to be the entirety of the free area within a block.  Chooses
585268625a6SDennis Zhou (Facebook)  * the best starting offset if the contig hints are equal.
586ca460b3cSDennis Zhou (Facebook)  */
587ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
588ca460b3cSDennis Zhou (Facebook) {
589ca460b3cSDennis Zhou (Facebook) 	int contig = end - start;
590ca460b3cSDennis Zhou (Facebook) 
591ca460b3cSDennis Zhou (Facebook) 	block->first_free = min(block->first_free, start);
592ca460b3cSDennis Zhou (Facebook) 	if (start == 0)
593ca460b3cSDennis Zhou (Facebook) 		block->left_free = contig;
594ca460b3cSDennis Zhou (Facebook) 
595047924c9SDennis Zhou 	if (end == block->nr_bits)
596ca460b3cSDennis Zhou (Facebook) 		block->right_free = contig;
597ca460b3cSDennis Zhou (Facebook) 
598ca460b3cSDennis Zhou (Facebook) 	if (contig > block->contig_hint) {
599382b88e9SDennis Zhou 		/* promote the old contig_hint to be the new scan_hint */
600382b88e9SDennis Zhou 		if (start > block->contig_hint_start) {
601382b88e9SDennis Zhou 			if (block->contig_hint > block->scan_hint) {
602382b88e9SDennis Zhou 				block->scan_hint_start =
603382b88e9SDennis Zhou 					block->contig_hint_start;
604382b88e9SDennis Zhou 				block->scan_hint = block->contig_hint;
605382b88e9SDennis Zhou 			} else if (start < block->scan_hint_start) {
606382b88e9SDennis Zhou 				/*
607382b88e9SDennis Zhou 				 * The old contig_hint == scan_hint.  But, the
608382b88e9SDennis Zhou 				 * new contig is larger so hold the invariant
609382b88e9SDennis Zhou 				 * scan_hint_start < contig_hint_start.
610382b88e9SDennis Zhou 				 */
611382b88e9SDennis Zhou 				block->scan_hint = 0;
612382b88e9SDennis Zhou 			}
613382b88e9SDennis Zhou 		} else {
614382b88e9SDennis Zhou 			block->scan_hint = 0;
615382b88e9SDennis Zhou 		}
616ca460b3cSDennis Zhou (Facebook) 		block->contig_hint_start = start;
617ca460b3cSDennis Zhou (Facebook) 		block->contig_hint = contig;
618382b88e9SDennis Zhou 	} else if (contig == block->contig_hint) {
619382b88e9SDennis Zhou 		if (block->contig_hint_start &&
620382b88e9SDennis Zhou 		    (!start ||
621382b88e9SDennis Zhou 		     __ffs(start) > __ffs(block->contig_hint_start))) {
622382b88e9SDennis Zhou 			/* start has a better alignment so use it */
623268625a6SDennis Zhou (Facebook) 			block->contig_hint_start = start;
624382b88e9SDennis Zhou 			if (start < block->scan_hint_start &&
625382b88e9SDennis Zhou 			    block->contig_hint > block->scan_hint)
626382b88e9SDennis Zhou 				block->scan_hint = 0;
627382b88e9SDennis Zhou 		} else if (start > block->scan_hint_start ||
628382b88e9SDennis Zhou 			   block->contig_hint > block->scan_hint) {
629382b88e9SDennis Zhou 			/*
630382b88e9SDennis Zhou 			 * Knowing contig == contig_hint, update the scan_hint
631382b88e9SDennis Zhou 			 * if it is farther than or larger than the current
632382b88e9SDennis Zhou 			 * scan_hint.
633382b88e9SDennis Zhou 			 */
634382b88e9SDennis Zhou 			block->scan_hint_start = start;
635382b88e9SDennis Zhou 			block->scan_hint = contig;
636382b88e9SDennis Zhou 		}
637382b88e9SDennis Zhou 	} else {
638382b88e9SDennis Zhou 		/*
639382b88e9SDennis Zhou 		 * The region is smaller than the contig_hint.  So only update
640382b88e9SDennis Zhou 		 * the scan_hint if it is larger than or equal and farther than
641382b88e9SDennis Zhou 		 * the current scan_hint.
642382b88e9SDennis Zhou 		 */
643382b88e9SDennis Zhou 		if ((start < block->contig_hint_start &&
644382b88e9SDennis Zhou 		     (contig > block->scan_hint ||
645382b88e9SDennis Zhou 		      (contig == block->scan_hint &&
646382b88e9SDennis Zhou 		       start > block->scan_hint_start)))) {
647382b88e9SDennis Zhou 			block->scan_hint_start = start;
648382b88e9SDennis Zhou 			block->scan_hint = contig;
649382b88e9SDennis Zhou 		}
650ca460b3cSDennis Zhou (Facebook) 	}
651ca460b3cSDennis Zhou (Facebook) }
652ca460b3cSDennis Zhou (Facebook) 
653b89462a9SDennis Zhou /*
654b89462a9SDennis Zhou  * pcpu_block_update_scan - update a block given a free area from a scan
655b89462a9SDennis Zhou  * @chunk: chunk of interest
656b89462a9SDennis Zhou  * @bit_off: chunk offset
657b89462a9SDennis Zhou  * @bits: size of free area
658b89462a9SDennis Zhou  *
659b89462a9SDennis Zhou  * Finding the final allocation spot first goes through pcpu_find_block_fit()
660b89462a9SDennis Zhou  * to find a block that can hold the allocation and then pcpu_alloc_area()
661b89462a9SDennis Zhou  * where a scan is used.  When allocations require specific alignments,
662b89462a9SDennis Zhou  * we can inadvertently create holes which will not be seen in the alloc
663b89462a9SDennis Zhou  * or free paths.
664b89462a9SDennis Zhou  *
665b89462a9SDennis Zhou  * This takes a given free area hole and updates a block as it may change the
666b89462a9SDennis Zhou  * scan_hint.  We need to scan backwards to ensure we don't miss free bits
667b89462a9SDennis Zhou  * from alignment.
668b89462a9SDennis Zhou  */
669b89462a9SDennis Zhou static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
670b89462a9SDennis Zhou 				   int bits)
671b89462a9SDennis Zhou {
672b89462a9SDennis Zhou 	int s_off = pcpu_off_to_block_off(bit_off);
673b89462a9SDennis Zhou 	int e_off = s_off + bits;
674b89462a9SDennis Zhou 	int s_index, l_bit;
675b89462a9SDennis Zhou 	struct pcpu_block_md *block;
676b89462a9SDennis Zhou 
677b89462a9SDennis Zhou 	if (e_off > PCPU_BITMAP_BLOCK_BITS)
678b89462a9SDennis Zhou 		return;
679b89462a9SDennis Zhou 
680b89462a9SDennis Zhou 	s_index = pcpu_off_to_block_index(bit_off);
681b89462a9SDennis Zhou 	block = chunk->md_blocks + s_index;
682b89462a9SDennis Zhou 
683b89462a9SDennis Zhou 	/* scan backwards in case of alignment skipping free bits */
684b89462a9SDennis Zhou 	l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
685b89462a9SDennis Zhou 	s_off = (s_off == l_bit) ? 0 : l_bit + 1;
686b89462a9SDennis Zhou 
687b89462a9SDennis Zhou 	pcpu_block_update(block, s_off, e_off);
688b89462a9SDennis Zhou }
689b89462a9SDennis Zhou 
690ca460b3cSDennis Zhou (Facebook) /**
69192c14cabSDennis Zhou  * pcpu_chunk_refresh_hint - updates metadata about a chunk
69292c14cabSDennis Zhou  * @chunk: chunk of interest
693d33d9f3dSDennis Zhou  * @full_scan: if we should scan from the beginning
69492c14cabSDennis Zhou  *
69592c14cabSDennis Zhou  * Iterates over the metadata blocks to find the largest contig area.
696d33d9f3dSDennis Zhou  * A full scan can be avoided on the allocation path as this is triggered
697d33d9f3dSDennis Zhou  * if we broke the contig_hint.  In doing so, the scan_hint will be before
698d33d9f3dSDennis Zhou  * the contig_hint or after if the scan_hint == contig_hint.  This cannot
699d33d9f3dSDennis Zhou  * be prevented on freeing as we want to find the largest area possibly
700d33d9f3dSDennis Zhou  * spanning blocks.
70192c14cabSDennis Zhou  */
702d33d9f3dSDennis Zhou static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
70392c14cabSDennis Zhou {
70492c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
70592c14cabSDennis Zhou 	int bit_off, bits;
70692c14cabSDennis Zhou 
707d33d9f3dSDennis Zhou 	/* promote scan_hint to contig_hint */
708d33d9f3dSDennis Zhou 	if (!full_scan && chunk_md->scan_hint) {
709d33d9f3dSDennis Zhou 		bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
710d33d9f3dSDennis Zhou 		chunk_md->contig_hint_start = chunk_md->scan_hint_start;
711d33d9f3dSDennis Zhou 		chunk_md->contig_hint = chunk_md->scan_hint;
712d33d9f3dSDennis Zhou 		chunk_md->scan_hint = 0;
713d33d9f3dSDennis Zhou 	} else {
71492c14cabSDennis Zhou 		bit_off = chunk_md->first_free;
715d33d9f3dSDennis Zhou 		chunk_md->contig_hint = 0;
716d33d9f3dSDennis Zhou 	}
717d33d9f3dSDennis Zhou 
71892c14cabSDennis Zhou 	bits = 0;
719e837dfdeSDennis Zhou 	pcpu_for_each_md_free_region(chunk, bit_off, bits)
72092c14cabSDennis Zhou 		pcpu_block_update(chunk_md, bit_off, bit_off + bits);
721ca460b3cSDennis Zhou (Facebook) }
722ca460b3cSDennis Zhou (Facebook) 
723ca460b3cSDennis Zhou (Facebook) /**
724ca460b3cSDennis Zhou (Facebook)  * pcpu_block_refresh_hint
725ca460b3cSDennis Zhou (Facebook)  * @chunk: chunk of interest
726ca460b3cSDennis Zhou (Facebook)  * @index: index of the metadata block
727ca460b3cSDennis Zhou (Facebook)  *
728ca460b3cSDennis Zhou (Facebook)  * Scans over the block beginning at first_free and updates the block
729ca460b3cSDennis Zhou (Facebook)  * metadata accordingly.
730ca460b3cSDennis Zhou (Facebook)  */
731ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
732ca460b3cSDennis Zhou (Facebook) {
733ca460b3cSDennis Zhou (Facebook) 	struct pcpu_block_md *block = chunk->md_blocks + index;
734ca460b3cSDennis Zhou (Facebook) 	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
735e837dfdeSDennis Zhou 	unsigned int rs, re, start;	/* region start, region end */
736ca460b3cSDennis Zhou (Facebook) 
737da3afdd5SDennis Zhou 	/* promote scan_hint to contig_hint */
738da3afdd5SDennis Zhou 	if (block->scan_hint) {
739da3afdd5SDennis Zhou 		start = block->scan_hint_start + block->scan_hint;
740da3afdd5SDennis Zhou 		block->contig_hint_start = block->scan_hint_start;
741da3afdd5SDennis Zhou 		block->contig_hint = block->scan_hint;
742da3afdd5SDennis Zhou 		block->scan_hint = 0;
743da3afdd5SDennis Zhou 	} else {
744da3afdd5SDennis Zhou 		start = block->first_free;
745ca460b3cSDennis Zhou (Facebook) 		block->contig_hint = 0;
746da3afdd5SDennis Zhou 	}
747da3afdd5SDennis Zhou 
748da3afdd5SDennis Zhou 	block->right_free = 0;
749ca460b3cSDennis Zhou (Facebook) 
750ca460b3cSDennis Zhou (Facebook) 	/* iterate over free areas and update the contig hints */
751e837dfdeSDennis Zhou 	bitmap_for_each_clear_region(alloc_map, rs, re, start,
752e837dfdeSDennis Zhou 				     PCPU_BITMAP_BLOCK_BITS)
753ca460b3cSDennis Zhou (Facebook) 		pcpu_block_update(block, rs, re);
754ca460b3cSDennis Zhou (Facebook) }
755ca460b3cSDennis Zhou (Facebook) 
756ca460b3cSDennis Zhou (Facebook) /**
757ca460b3cSDennis Zhou (Facebook)  * pcpu_block_update_hint_alloc - update hint on allocation path
758ca460b3cSDennis Zhou (Facebook)  * @chunk: chunk of interest
759ca460b3cSDennis Zhou (Facebook)  * @bit_off: chunk offset
760ca460b3cSDennis Zhou (Facebook)  * @bits: size of request
761fc304334SDennis Zhou (Facebook)  *
762fc304334SDennis Zhou (Facebook)  * Updates metadata for the allocation path.  The metadata only has to be
763fc304334SDennis Zhou (Facebook)  * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
764fc304334SDennis Zhou (Facebook)  * scans are required if the block's contig hint is broken.
765ca460b3cSDennis Zhou (Facebook)  */
766ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
767ca460b3cSDennis Zhou (Facebook) 					 int bits)
768ca460b3cSDennis Zhou (Facebook) {
76992c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
770b239f7daSDennis Zhou 	int nr_empty_pages = 0;
771ca460b3cSDennis Zhou (Facebook) 	struct pcpu_block_md *s_block, *e_block, *block;
772ca460b3cSDennis Zhou (Facebook) 	int s_index, e_index;	/* block indexes of the freed allocation */
773ca460b3cSDennis Zhou (Facebook) 	int s_off, e_off;	/* block offsets of the freed allocation */
774ca460b3cSDennis Zhou (Facebook) 
775ca460b3cSDennis Zhou (Facebook) 	/*
776ca460b3cSDennis Zhou (Facebook) 	 * Calculate per block offsets.
777ca460b3cSDennis Zhou (Facebook) 	 * The calculation uses an inclusive range, but the resulting offsets
778ca460b3cSDennis Zhou (Facebook) 	 * are [start, end).  e_index always points to the last block in the
779ca460b3cSDennis Zhou (Facebook) 	 * range.
780ca460b3cSDennis Zhou (Facebook) 	 */
781ca460b3cSDennis Zhou (Facebook) 	s_index = pcpu_off_to_block_index(bit_off);
782ca460b3cSDennis Zhou (Facebook) 	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
783ca460b3cSDennis Zhou (Facebook) 	s_off = pcpu_off_to_block_off(bit_off);
784ca460b3cSDennis Zhou (Facebook) 	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
785ca460b3cSDennis Zhou (Facebook) 
786ca460b3cSDennis Zhou (Facebook) 	s_block = chunk->md_blocks + s_index;
787ca460b3cSDennis Zhou (Facebook) 	e_block = chunk->md_blocks + e_index;
788ca460b3cSDennis Zhou (Facebook) 
789ca460b3cSDennis Zhou (Facebook) 	/*
790ca460b3cSDennis Zhou (Facebook) 	 * Update s_block.
791fc304334SDennis Zhou (Facebook) 	 * block->first_free must be updated if the allocation takes its place.
792fc304334SDennis Zhou (Facebook) 	 * If the allocation breaks the contig_hint, a scan is required to
793fc304334SDennis Zhou (Facebook) 	 * restore this hint.
794ca460b3cSDennis Zhou (Facebook) 	 */
795b239f7daSDennis Zhou 	if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
796b239f7daSDennis Zhou 		nr_empty_pages++;
797b239f7daSDennis Zhou 
798fc304334SDennis Zhou (Facebook) 	if (s_off == s_block->first_free)
799fc304334SDennis Zhou (Facebook) 		s_block->first_free = find_next_zero_bit(
800fc304334SDennis Zhou (Facebook) 					pcpu_index_alloc_map(chunk, s_index),
801fc304334SDennis Zhou (Facebook) 					PCPU_BITMAP_BLOCK_BITS,
802fc304334SDennis Zhou (Facebook) 					s_off + bits);
803fc304334SDennis Zhou (Facebook) 
804382b88e9SDennis Zhou 	if (pcpu_region_overlap(s_block->scan_hint_start,
805382b88e9SDennis Zhou 				s_block->scan_hint_start + s_block->scan_hint,
806382b88e9SDennis Zhou 				s_off,
807382b88e9SDennis Zhou 				s_off + bits))
808382b88e9SDennis Zhou 		s_block->scan_hint = 0;
809382b88e9SDennis Zhou 
810d9f3a01eSDennis Zhou 	if (pcpu_region_overlap(s_block->contig_hint_start,
811d9f3a01eSDennis Zhou 				s_block->contig_hint_start +
812d9f3a01eSDennis Zhou 				s_block->contig_hint,
813d9f3a01eSDennis Zhou 				s_off,
814d9f3a01eSDennis Zhou 				s_off + bits)) {
815fc304334SDennis Zhou (Facebook) 		/* block contig hint is broken - scan to fix it */
816da3afdd5SDennis Zhou 		if (!s_off)
817da3afdd5SDennis Zhou 			s_block->left_free = 0;
818ca460b3cSDennis Zhou (Facebook) 		pcpu_block_refresh_hint(chunk, s_index);
819fc304334SDennis Zhou (Facebook) 	} else {
820fc304334SDennis Zhou (Facebook) 		/* update left and right contig manually */
821fc304334SDennis Zhou (Facebook) 		s_block->left_free = min(s_block->left_free, s_off);
822fc304334SDennis Zhou (Facebook) 		if (s_index == e_index)
823fc304334SDennis Zhou (Facebook) 			s_block->right_free = min_t(int, s_block->right_free,
824fc304334SDennis Zhou (Facebook) 					PCPU_BITMAP_BLOCK_BITS - e_off);
825fc304334SDennis Zhou (Facebook) 		else
826fc304334SDennis Zhou (Facebook) 			s_block->right_free = 0;
827fc304334SDennis Zhou (Facebook) 	}
828ca460b3cSDennis Zhou (Facebook) 
829ca460b3cSDennis Zhou (Facebook) 	/*
830ca460b3cSDennis Zhou (Facebook) 	 * Update e_block.
831ca460b3cSDennis Zhou (Facebook) 	 */
832ca460b3cSDennis Zhou (Facebook) 	if (s_index != e_index) {
833b239f7daSDennis Zhou 		if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
834b239f7daSDennis Zhou 			nr_empty_pages++;
835b239f7daSDennis Zhou 
836fc304334SDennis Zhou (Facebook) 		/*
837fc304334SDennis Zhou (Facebook) 		 * When the allocation is across blocks, the end is along
838fc304334SDennis Zhou (Facebook) 		 * the left part of the e_block.
839fc304334SDennis Zhou (Facebook) 		 */
840fc304334SDennis Zhou (Facebook) 		e_block->first_free = find_next_zero_bit(
841fc304334SDennis Zhou (Facebook) 				pcpu_index_alloc_map(chunk, e_index),
842fc304334SDennis Zhou (Facebook) 				PCPU_BITMAP_BLOCK_BITS, e_off);
843fc304334SDennis Zhou (Facebook) 
844fc304334SDennis Zhou (Facebook) 		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
845fc304334SDennis Zhou (Facebook) 			/* reset the block */
846fc304334SDennis Zhou (Facebook) 			e_block++;
847fc304334SDennis Zhou (Facebook) 		} else {
848382b88e9SDennis Zhou 			if (e_off > e_block->scan_hint_start)
849382b88e9SDennis Zhou 				e_block->scan_hint = 0;
850382b88e9SDennis Zhou 
851da3afdd5SDennis Zhou 			e_block->left_free = 0;
852fc304334SDennis Zhou (Facebook) 			if (e_off > e_block->contig_hint_start) {
853fc304334SDennis Zhou (Facebook) 				/* contig hint is broken - scan to fix it */
854ca460b3cSDennis Zhou (Facebook) 				pcpu_block_refresh_hint(chunk, e_index);
855fc304334SDennis Zhou (Facebook) 			} else {
856fc304334SDennis Zhou (Facebook) 				e_block->right_free =
857fc304334SDennis Zhou (Facebook) 					min_t(int, e_block->right_free,
858fc304334SDennis Zhou (Facebook) 					      PCPU_BITMAP_BLOCK_BITS - e_off);
859fc304334SDennis Zhou (Facebook) 			}
860fc304334SDennis Zhou (Facebook) 		}
861ca460b3cSDennis Zhou (Facebook) 
862ca460b3cSDennis Zhou (Facebook) 		/* update in-between md_blocks */
863b239f7daSDennis Zhou 		nr_empty_pages += (e_index - s_index - 1);
864ca460b3cSDennis Zhou (Facebook) 		for (block = s_block + 1; block < e_block; block++) {
865382b88e9SDennis Zhou 			block->scan_hint = 0;
866ca460b3cSDennis Zhou (Facebook) 			block->contig_hint = 0;
867ca460b3cSDennis Zhou (Facebook) 			block->left_free = 0;
868ca460b3cSDennis Zhou (Facebook) 			block->right_free = 0;
869ca460b3cSDennis Zhou (Facebook) 		}
870ca460b3cSDennis Zhou (Facebook) 	}
871ca460b3cSDennis Zhou (Facebook) 
872b239f7daSDennis Zhou 	if (nr_empty_pages)
873b239f7daSDennis Zhou 		pcpu_update_empty_pages(chunk, -nr_empty_pages);
874b239f7daSDennis Zhou 
875d33d9f3dSDennis Zhou 	if (pcpu_region_overlap(chunk_md->scan_hint_start,
876d33d9f3dSDennis Zhou 				chunk_md->scan_hint_start +
877d33d9f3dSDennis Zhou 				chunk_md->scan_hint,
878d33d9f3dSDennis Zhou 				bit_off,
879d33d9f3dSDennis Zhou 				bit_off + bits))
880d33d9f3dSDennis Zhou 		chunk_md->scan_hint = 0;
881d33d9f3dSDennis Zhou 
882fc304334SDennis Zhou (Facebook) 	/*
883fc304334SDennis Zhou (Facebook) 	 * The only time a full chunk scan is required is if the chunk
884fc304334SDennis Zhou (Facebook) 	 * contig hint is broken.  Otherwise, it means a smaller space
885fc304334SDennis Zhou (Facebook) 	 * was used and therefore the chunk contig hint is still correct.
886fc304334SDennis Zhou (Facebook) 	 */
88792c14cabSDennis Zhou 	if (pcpu_region_overlap(chunk_md->contig_hint_start,
88892c14cabSDennis Zhou 				chunk_md->contig_hint_start +
88992c14cabSDennis Zhou 				chunk_md->contig_hint,
890d9f3a01eSDennis Zhou 				bit_off,
891d9f3a01eSDennis Zhou 				bit_off + bits))
892d33d9f3dSDennis Zhou 		pcpu_chunk_refresh_hint(chunk, false);
893ca460b3cSDennis Zhou (Facebook) }
894ca460b3cSDennis Zhou (Facebook) 
895ca460b3cSDennis Zhou (Facebook) /**
896ca460b3cSDennis Zhou (Facebook)  * pcpu_block_update_hint_free - updates the block hints on the free path
897ca460b3cSDennis Zhou (Facebook)  * @chunk: chunk of interest
898ca460b3cSDennis Zhou (Facebook)  * @bit_off: chunk offset
899ca460b3cSDennis Zhou (Facebook)  * @bits: size of request
900b185cd0dSDennis Zhou (Facebook)  *
901b185cd0dSDennis Zhou (Facebook)  * Updates metadata for the allocation path.  This avoids a blind block
902b185cd0dSDennis Zhou (Facebook)  * refresh by making use of the block contig hints.  If this fails, it scans
903b185cd0dSDennis Zhou (Facebook)  * forward and backward to determine the extent of the free area.  This is
904b185cd0dSDennis Zhou (Facebook)  * capped at the boundary of blocks.
905b185cd0dSDennis Zhou (Facebook)  *
906b185cd0dSDennis Zhou (Facebook)  * A chunk update is triggered if a page becomes free, a block becomes free,
907b185cd0dSDennis Zhou (Facebook)  * or the free spans across blocks.  This tradeoff is to minimize iterating
90892c14cabSDennis Zhou  * over the block metadata to update chunk_md->contig_hint.
90992c14cabSDennis Zhou  * chunk_md->contig_hint may be off by up to a page, but it will never be more
91092c14cabSDennis Zhou  * than the available space.  If the contig hint is contained in one block, it
91192c14cabSDennis Zhou  * will be accurate.
912ca460b3cSDennis Zhou (Facebook)  */
913ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
914ca460b3cSDennis Zhou (Facebook) 					int bits)
915ca460b3cSDennis Zhou (Facebook) {
916b239f7daSDennis Zhou 	int nr_empty_pages = 0;
917ca460b3cSDennis Zhou (Facebook) 	struct pcpu_block_md *s_block, *e_block, *block;
918ca460b3cSDennis Zhou (Facebook) 	int s_index, e_index;	/* block indexes of the freed allocation */
919ca460b3cSDennis Zhou (Facebook) 	int s_off, e_off;	/* block offsets of the freed allocation */
920b185cd0dSDennis Zhou (Facebook) 	int start, end;		/* start and end of the whole free area */
921ca460b3cSDennis Zhou (Facebook) 
922ca460b3cSDennis Zhou (Facebook) 	/*
923ca460b3cSDennis Zhou (Facebook) 	 * Calculate per block offsets.
924ca460b3cSDennis Zhou (Facebook) 	 * The calculation uses an inclusive range, but the resulting offsets
925ca460b3cSDennis Zhou (Facebook) 	 * are [start, end).  e_index always points to the last block in the
926ca460b3cSDennis Zhou (Facebook) 	 * range.
927ca460b3cSDennis Zhou (Facebook) 	 */
928ca460b3cSDennis Zhou (Facebook) 	s_index = pcpu_off_to_block_index(bit_off);
929ca460b3cSDennis Zhou (Facebook) 	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
930ca460b3cSDennis Zhou (Facebook) 	s_off = pcpu_off_to_block_off(bit_off);
931ca460b3cSDennis Zhou (Facebook) 	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
932ca460b3cSDennis Zhou (Facebook) 
933ca460b3cSDennis Zhou (Facebook) 	s_block = chunk->md_blocks + s_index;
934ca460b3cSDennis Zhou (Facebook) 	e_block = chunk->md_blocks + e_index;
935ca460b3cSDennis Zhou (Facebook) 
936b185cd0dSDennis Zhou (Facebook) 	/*
937b185cd0dSDennis Zhou (Facebook) 	 * Check if the freed area aligns with the block->contig_hint.
938b185cd0dSDennis Zhou (Facebook) 	 * If it does, then the scan to find the beginning/end of the
939b185cd0dSDennis Zhou (Facebook) 	 * larger free area can be avoided.
940b185cd0dSDennis Zhou (Facebook) 	 *
941b185cd0dSDennis Zhou (Facebook) 	 * start and end refer to beginning and end of the free area
942b185cd0dSDennis Zhou (Facebook) 	 * within each their respective blocks.  This is not necessarily
943b185cd0dSDennis Zhou (Facebook) 	 * the entire free area as it may span blocks past the beginning
944b185cd0dSDennis Zhou (Facebook) 	 * or end of the block.
945b185cd0dSDennis Zhou (Facebook) 	 */
946b185cd0dSDennis Zhou (Facebook) 	start = s_off;
947b185cd0dSDennis Zhou (Facebook) 	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
948b185cd0dSDennis Zhou (Facebook) 		start = s_block->contig_hint_start;
949b185cd0dSDennis Zhou (Facebook) 	} else {
950b185cd0dSDennis Zhou (Facebook) 		/*
951b185cd0dSDennis Zhou (Facebook) 		 * Scan backwards to find the extent of the free area.
952b185cd0dSDennis Zhou (Facebook) 		 * find_last_bit returns the starting bit, so if the start bit
953b185cd0dSDennis Zhou (Facebook) 		 * is returned, that means there was no last bit and the
954b185cd0dSDennis Zhou (Facebook) 		 * remainder of the chunk is free.
955b185cd0dSDennis Zhou (Facebook) 		 */
956b185cd0dSDennis Zhou (Facebook) 		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
957b185cd0dSDennis Zhou (Facebook) 					  start);
958b185cd0dSDennis Zhou (Facebook) 		start = (start == l_bit) ? 0 : l_bit + 1;
959b185cd0dSDennis Zhou (Facebook) 	}
960b185cd0dSDennis Zhou (Facebook) 
961b185cd0dSDennis Zhou (Facebook) 	end = e_off;
962b185cd0dSDennis Zhou (Facebook) 	if (e_off == e_block->contig_hint_start)
963b185cd0dSDennis Zhou (Facebook) 		end = e_block->contig_hint_start + e_block->contig_hint;
964b185cd0dSDennis Zhou (Facebook) 	else
965b185cd0dSDennis Zhou (Facebook) 		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
966b185cd0dSDennis Zhou (Facebook) 				    PCPU_BITMAP_BLOCK_BITS, end);
967b185cd0dSDennis Zhou (Facebook) 
968ca460b3cSDennis Zhou (Facebook) 	/* update s_block */
969b185cd0dSDennis Zhou (Facebook) 	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
970b239f7daSDennis Zhou 	if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
971b239f7daSDennis Zhou 		nr_empty_pages++;
972b185cd0dSDennis Zhou (Facebook) 	pcpu_block_update(s_block, start, e_off);
973ca460b3cSDennis Zhou (Facebook) 
974ca460b3cSDennis Zhou (Facebook) 	/* freeing in the same block */
975ca460b3cSDennis Zhou (Facebook) 	if (s_index != e_index) {
976ca460b3cSDennis Zhou (Facebook) 		/* update e_block */
977b239f7daSDennis Zhou 		if (end == PCPU_BITMAP_BLOCK_BITS)
978b239f7daSDennis Zhou 			nr_empty_pages++;
979b185cd0dSDennis Zhou (Facebook) 		pcpu_block_update(e_block, 0, end);
980ca460b3cSDennis Zhou (Facebook) 
981ca460b3cSDennis Zhou (Facebook) 		/* reset md_blocks in the middle */
982b239f7daSDennis Zhou 		nr_empty_pages += (e_index - s_index - 1);
983ca460b3cSDennis Zhou (Facebook) 		for (block = s_block + 1; block < e_block; block++) {
984ca460b3cSDennis Zhou (Facebook) 			block->first_free = 0;
985382b88e9SDennis Zhou 			block->scan_hint = 0;
986ca460b3cSDennis Zhou (Facebook) 			block->contig_hint_start = 0;
987ca460b3cSDennis Zhou (Facebook) 			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
988ca460b3cSDennis Zhou (Facebook) 			block->left_free = PCPU_BITMAP_BLOCK_BITS;
989ca460b3cSDennis Zhou (Facebook) 			block->right_free = PCPU_BITMAP_BLOCK_BITS;
990ca460b3cSDennis Zhou (Facebook) 		}
991ca460b3cSDennis Zhou (Facebook) 	}
992ca460b3cSDennis Zhou (Facebook) 
993b239f7daSDennis Zhou 	if (nr_empty_pages)
994b239f7daSDennis Zhou 		pcpu_update_empty_pages(chunk, nr_empty_pages);
995b239f7daSDennis Zhou 
996b185cd0dSDennis Zhou (Facebook) 	/*
997b239f7daSDennis Zhou 	 * Refresh chunk metadata when the free makes a block free or spans
998b239f7daSDennis Zhou 	 * across blocks.  The contig_hint may be off by up to a page, but if
999b239f7daSDennis Zhou 	 * the contig_hint is contained in a block, it will be accurate with
1000b239f7daSDennis Zhou 	 * the else condition below.
1001b185cd0dSDennis Zhou (Facebook) 	 */
1002b239f7daSDennis Zhou 	if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1003d33d9f3dSDennis Zhou 		pcpu_chunk_refresh_hint(chunk, true);
1004b185cd0dSDennis Zhou (Facebook) 	else
100592c14cabSDennis Zhou 		pcpu_block_update(&chunk->chunk_md,
100692c14cabSDennis Zhou 				  pcpu_block_off_to_off(s_index, start),
100792c14cabSDennis Zhou 				  end);
1008ca460b3cSDennis Zhou (Facebook) }
1009ca460b3cSDennis Zhou (Facebook) 
1010ca460b3cSDennis Zhou (Facebook) /**
101140064aecSDennis Zhou (Facebook)  * pcpu_is_populated - determines if the region is populated
101240064aecSDennis Zhou (Facebook)  * @chunk: chunk of interest
101340064aecSDennis Zhou (Facebook)  * @bit_off: chunk offset
101440064aecSDennis Zhou (Facebook)  * @bits: size of area
101540064aecSDennis Zhou (Facebook)  * @next_off: return value for the next offset to start searching
101640064aecSDennis Zhou (Facebook)  *
101740064aecSDennis Zhou (Facebook)  * For atomic allocations, check if the backing pages are populated.
101840064aecSDennis Zhou (Facebook)  *
101940064aecSDennis Zhou (Facebook)  * RETURNS:
102040064aecSDennis Zhou (Facebook)  * Bool if the backing pages are populated.
102140064aecSDennis Zhou (Facebook)  * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
102240064aecSDennis Zhou (Facebook)  */
102340064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
102440064aecSDennis Zhou (Facebook) 			      int *next_off)
102540064aecSDennis Zhou (Facebook) {
1026e837dfdeSDennis Zhou 	unsigned int page_start, page_end, rs, re;
102740064aecSDennis Zhou (Facebook) 
102840064aecSDennis Zhou (Facebook) 	page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
102940064aecSDennis Zhou (Facebook) 	page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
103040064aecSDennis Zhou (Facebook) 
103140064aecSDennis Zhou (Facebook) 	rs = page_start;
1032e837dfdeSDennis Zhou 	bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
103340064aecSDennis Zhou (Facebook) 	if (rs >= page_end)
103440064aecSDennis Zhou (Facebook) 		return true;
103540064aecSDennis Zhou (Facebook) 
103640064aecSDennis Zhou (Facebook) 	*next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
103740064aecSDennis Zhou (Facebook) 	return false;
103840064aecSDennis Zhou (Facebook) }
103940064aecSDennis Zhou (Facebook) 
104040064aecSDennis Zhou (Facebook) /**
104140064aecSDennis Zhou (Facebook)  * pcpu_find_block_fit - finds the block index to start searching
104240064aecSDennis Zhou (Facebook)  * @chunk: chunk of interest
104340064aecSDennis Zhou (Facebook)  * @alloc_bits: size of request in allocation units
104440064aecSDennis Zhou (Facebook)  * @align: alignment of area (max PAGE_SIZE bytes)
104540064aecSDennis Zhou (Facebook)  * @pop_only: use populated regions only
104640064aecSDennis Zhou (Facebook)  *
1047b4c2116cSDennis Zhou (Facebook)  * Given a chunk and an allocation spec, find the offset to begin searching
1048b4c2116cSDennis Zhou (Facebook)  * for a free region.  This iterates over the bitmap metadata blocks to
1049b4c2116cSDennis Zhou (Facebook)  * find an offset that will be guaranteed to fit the requirements.  It is
1050b4c2116cSDennis Zhou (Facebook)  * not quite first fit as if the allocation does not fit in the contig hint
1051b4c2116cSDennis Zhou (Facebook)  * of a block or chunk, it is skipped.  This errs on the side of caution
1052b4c2116cSDennis Zhou (Facebook)  * to prevent excess iteration.  Poor alignment can cause the allocator to
1053b4c2116cSDennis Zhou (Facebook)  * skip over blocks and chunks that have valid free areas.
1054b4c2116cSDennis Zhou (Facebook)  *
105540064aecSDennis Zhou (Facebook)  * RETURNS:
105640064aecSDennis Zhou (Facebook)  * The offset in the bitmap to begin searching.
105740064aecSDennis Zhou (Facebook)  * -1 if no offset is found.
105840064aecSDennis Zhou (Facebook)  */
105940064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
106040064aecSDennis Zhou (Facebook) 			       size_t align, bool pop_only)
106140064aecSDennis Zhou (Facebook) {
106292c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1063b4c2116cSDennis Zhou (Facebook) 	int bit_off, bits, next_off;
106440064aecSDennis Zhou (Facebook) 
106513f96637SDennis Zhou (Facebook) 	/*
106613f96637SDennis Zhou (Facebook) 	 * Check to see if the allocation can fit in the chunk's contig hint.
106713f96637SDennis Zhou (Facebook) 	 * This is an optimization to prevent scanning by assuming if it
106813f96637SDennis Zhou (Facebook) 	 * cannot fit in the global hint, there is memory pressure and creating
106913f96637SDennis Zhou (Facebook) 	 * a new chunk would happen soon.
107013f96637SDennis Zhou (Facebook) 	 */
107192c14cabSDennis Zhou 	bit_off = ALIGN(chunk_md->contig_hint_start, align) -
107292c14cabSDennis Zhou 		  chunk_md->contig_hint_start;
107392c14cabSDennis Zhou 	if (bit_off + alloc_bits > chunk_md->contig_hint)
107413f96637SDennis Zhou (Facebook) 		return -1;
107513f96637SDennis Zhou (Facebook) 
1076d33d9f3dSDennis Zhou 	bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1077b4c2116cSDennis Zhou (Facebook) 	bits = 0;
1078b4c2116cSDennis Zhou (Facebook) 	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
107940064aecSDennis Zhou (Facebook) 		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1080b4c2116cSDennis Zhou (Facebook) 						   &next_off))
108140064aecSDennis Zhou (Facebook) 			break;
108240064aecSDennis Zhou (Facebook) 
1083b4c2116cSDennis Zhou (Facebook) 		bit_off = next_off;
108440064aecSDennis Zhou (Facebook) 		bits = 0;
108540064aecSDennis Zhou (Facebook) 	}
108640064aecSDennis Zhou (Facebook) 
108740064aecSDennis Zhou (Facebook) 	if (bit_off == pcpu_chunk_map_bits(chunk))
108840064aecSDennis Zhou (Facebook) 		return -1;
108940064aecSDennis Zhou (Facebook) 
109040064aecSDennis Zhou (Facebook) 	return bit_off;
109140064aecSDennis Zhou (Facebook) }
109240064aecSDennis Zhou (Facebook) 
1093b89462a9SDennis Zhou /*
1094b89462a9SDennis Zhou  * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1095b89462a9SDennis Zhou  * @map: the address to base the search on
1096b89462a9SDennis Zhou  * @size: the bitmap size in bits
1097b89462a9SDennis Zhou  * @start: the bitnumber to start searching at
1098b89462a9SDennis Zhou  * @nr: the number of zeroed bits we're looking for
1099b89462a9SDennis Zhou  * @align_mask: alignment mask for zero area
1100b89462a9SDennis Zhou  * @largest_off: offset of the largest area skipped
1101b89462a9SDennis Zhou  * @largest_bits: size of the largest area skipped
1102b89462a9SDennis Zhou  *
1103b89462a9SDennis Zhou  * The @align_mask should be one less than a power of 2.
1104b89462a9SDennis Zhou  *
1105b89462a9SDennis Zhou  * This is a modified version of bitmap_find_next_zero_area_off() to remember
1106b89462a9SDennis Zhou  * the largest area that was skipped.  This is imperfect, but in general is
1107b89462a9SDennis Zhou  * good enough.  The largest remembered region is the largest failed region
1108b89462a9SDennis Zhou  * seen.  This does not include anything we possibly skipped due to alignment.
1109b89462a9SDennis Zhou  * pcpu_block_update_scan() does scan backwards to try and recover what was
1110b89462a9SDennis Zhou  * lost to alignment.  While this can cause scanning to miss earlier possible
1111b89462a9SDennis Zhou  * free areas, smaller allocations will eventually fill those holes.
1112b89462a9SDennis Zhou  */
1113b89462a9SDennis Zhou static unsigned long pcpu_find_zero_area(unsigned long *map,
1114b89462a9SDennis Zhou 					 unsigned long size,
1115b89462a9SDennis Zhou 					 unsigned long start,
1116b89462a9SDennis Zhou 					 unsigned long nr,
1117b89462a9SDennis Zhou 					 unsigned long align_mask,
1118b89462a9SDennis Zhou 					 unsigned long *largest_off,
1119b89462a9SDennis Zhou 					 unsigned long *largest_bits)
1120b89462a9SDennis Zhou {
1121b89462a9SDennis Zhou 	unsigned long index, end, i, area_off, area_bits;
1122b89462a9SDennis Zhou again:
1123b89462a9SDennis Zhou 	index = find_next_zero_bit(map, size, start);
1124b89462a9SDennis Zhou 
1125b89462a9SDennis Zhou 	/* Align allocation */
1126b89462a9SDennis Zhou 	index = __ALIGN_MASK(index, align_mask);
1127b89462a9SDennis Zhou 	area_off = index;
1128b89462a9SDennis Zhou 
1129b89462a9SDennis Zhou 	end = index + nr;
1130b89462a9SDennis Zhou 	if (end > size)
1131b89462a9SDennis Zhou 		return end;
1132b89462a9SDennis Zhou 	i = find_next_bit(map, end, index);
1133b89462a9SDennis Zhou 	if (i < end) {
1134b89462a9SDennis Zhou 		area_bits = i - area_off;
1135b89462a9SDennis Zhou 		/* remember largest unused area with best alignment */
1136b89462a9SDennis Zhou 		if (area_bits > *largest_bits ||
1137b89462a9SDennis Zhou 		    (area_bits == *largest_bits && *largest_off &&
1138b89462a9SDennis Zhou 		     (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1139b89462a9SDennis Zhou 			*largest_off = area_off;
1140b89462a9SDennis Zhou 			*largest_bits = area_bits;
1141b89462a9SDennis Zhou 		}
1142b89462a9SDennis Zhou 
1143b89462a9SDennis Zhou 		start = i + 1;
1144b89462a9SDennis Zhou 		goto again;
1145b89462a9SDennis Zhou 	}
1146b89462a9SDennis Zhou 	return index;
1147b89462a9SDennis Zhou }
1148b89462a9SDennis Zhou 
114940064aecSDennis Zhou (Facebook) /**
115040064aecSDennis Zhou (Facebook)  * pcpu_alloc_area - allocates an area from a pcpu_chunk
115140064aecSDennis Zhou (Facebook)  * @chunk: chunk of interest
115240064aecSDennis Zhou (Facebook)  * @alloc_bits: size of request in allocation units
115340064aecSDennis Zhou (Facebook)  * @align: alignment of area (max PAGE_SIZE)
115440064aecSDennis Zhou (Facebook)  * @start: bit_off to start searching
115540064aecSDennis Zhou (Facebook)  *
115640064aecSDennis Zhou (Facebook)  * This function takes in a @start offset to begin searching to fit an
1157b4c2116cSDennis Zhou (Facebook)  * allocation of @alloc_bits with alignment @align.  It needs to scan
1158b4c2116cSDennis Zhou (Facebook)  * the allocation map because if it fits within the block's contig hint,
1159b4c2116cSDennis Zhou (Facebook)  * @start will be block->first_free. This is an attempt to fill the
1160b4c2116cSDennis Zhou (Facebook)  * allocation prior to breaking the contig hint.  The allocation and
1161b4c2116cSDennis Zhou (Facebook)  * boundary maps are updated accordingly if it confirms a valid
1162b4c2116cSDennis Zhou (Facebook)  * free area.
116340064aecSDennis Zhou (Facebook)  *
116440064aecSDennis Zhou (Facebook)  * RETURNS:
116540064aecSDennis Zhou (Facebook)  * Allocated addr offset in @chunk on success.
116640064aecSDennis Zhou (Facebook)  * -1 if no matching area is found.
116740064aecSDennis Zhou (Facebook)  */
116840064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
116940064aecSDennis Zhou (Facebook) 			   size_t align, int start)
117040064aecSDennis Zhou (Facebook) {
117192c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
117240064aecSDennis Zhou (Facebook) 	size_t align_mask = (align) ? (align - 1) : 0;
1173b89462a9SDennis Zhou 	unsigned long area_off = 0, area_bits = 0;
117440064aecSDennis Zhou (Facebook) 	int bit_off, end, oslot;
11759f7dcf22STejun Heo 
11764f996e23STejun Heo 	lockdep_assert_held(&pcpu_lock);
11774f996e23STejun Heo 
117840064aecSDennis Zhou (Facebook) 	oslot = pcpu_chunk_slot(chunk);
1179833af842STejun Heo 
1180833af842STejun Heo 	/*
118140064aecSDennis Zhou (Facebook) 	 * Search to find a fit.
1182833af842STejun Heo 	 */
11838c43004aSDennis Zhou 	end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
11848c43004aSDennis Zhou 		    pcpu_chunk_map_bits(chunk));
1185b89462a9SDennis Zhou 	bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1186b89462a9SDennis Zhou 				      align_mask, &area_off, &area_bits);
118740064aecSDennis Zhou (Facebook) 	if (bit_off >= end)
1188a16037c8STejun Heo 		return -1;
1189a16037c8STejun Heo 
1190b89462a9SDennis Zhou 	if (area_bits)
1191b89462a9SDennis Zhou 		pcpu_block_update_scan(chunk, area_off, area_bits);
1192b89462a9SDennis Zhou 
119340064aecSDennis Zhou (Facebook) 	/* update alloc map */
119440064aecSDennis Zhou (Facebook) 	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1195a16037c8STejun Heo 
119640064aecSDennis Zhou (Facebook) 	/* update boundary map */
119740064aecSDennis Zhou (Facebook) 	set_bit(bit_off, chunk->bound_map);
119840064aecSDennis Zhou (Facebook) 	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
119940064aecSDennis Zhou (Facebook) 	set_bit(bit_off + alloc_bits, chunk->bound_map);
1200a16037c8STejun Heo 
120140064aecSDennis Zhou (Facebook) 	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
120240064aecSDennis Zhou (Facebook) 
120386b442fbSDennis Zhou (Facebook) 	/* update first free bit */
120492c14cabSDennis Zhou 	if (bit_off == chunk_md->first_free)
120592c14cabSDennis Zhou 		chunk_md->first_free = find_next_zero_bit(
120686b442fbSDennis Zhou (Facebook) 					chunk->alloc_map,
120786b442fbSDennis Zhou (Facebook) 					pcpu_chunk_map_bits(chunk),
120886b442fbSDennis Zhou (Facebook) 					bit_off + alloc_bits);
120986b442fbSDennis Zhou (Facebook) 
1210ca460b3cSDennis Zhou (Facebook) 	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
121140064aecSDennis Zhou (Facebook) 
121240064aecSDennis Zhou (Facebook) 	pcpu_chunk_relocate(chunk, oslot);
121340064aecSDennis Zhou (Facebook) 
121440064aecSDennis Zhou (Facebook) 	return bit_off * PCPU_MIN_ALLOC_SIZE;
1215a16037c8STejun Heo }
1216a16037c8STejun Heo 
1217a16037c8STejun Heo /**
121840064aecSDennis Zhou (Facebook)  * pcpu_free_area - frees the corresponding offset
1219fbf59bc9STejun Heo  * @chunk: chunk of interest
122040064aecSDennis Zhou (Facebook)  * @off: addr offset into chunk
1221fbf59bc9STejun Heo  *
122240064aecSDennis Zhou (Facebook)  * This function determines the size of an allocation to free using
122340064aecSDennis Zhou (Facebook)  * the boundary bitmap and clears the allocation map.
12245b32af91SRoman Gushchin  *
12255b32af91SRoman Gushchin  * RETURNS:
12265b32af91SRoman Gushchin  * Number of freed bytes.
1227fbf59bc9STejun Heo  */
12285b32af91SRoman Gushchin static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1229fbf59bc9STejun Heo {
123092c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
12315b32af91SRoman Gushchin 	int bit_off, bits, end, oslot, freed;
1232fbf59bc9STejun Heo 
12335ccd30e4SDennis Zhou 	lockdep_assert_held(&pcpu_lock);
123430a5b536SDennis Zhou 	pcpu_stats_area_dealloc(chunk);
12355ccd30e4SDennis Zhou 
123640064aecSDennis Zhou (Facebook) 	oslot = pcpu_chunk_slot(chunk);
1237723ad1d9SAl Viro 
123840064aecSDennis Zhou (Facebook) 	bit_off = off / PCPU_MIN_ALLOC_SIZE;
1239fbf59bc9STejun Heo 
124040064aecSDennis Zhou (Facebook) 	/* find end index */
124140064aecSDennis Zhou (Facebook) 	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
124240064aecSDennis Zhou (Facebook) 			    bit_off + 1);
124340064aecSDennis Zhou (Facebook) 	bits = end - bit_off;
124440064aecSDennis Zhou (Facebook) 	bitmap_clear(chunk->alloc_map, bit_off, bits);
12453d331ad7SAl Viro 
12465b32af91SRoman Gushchin 	freed = bits * PCPU_MIN_ALLOC_SIZE;
12475b32af91SRoman Gushchin 
124840064aecSDennis Zhou (Facebook) 	/* update metadata */
12495b32af91SRoman Gushchin 	chunk->free_bytes += freed;
1250fbf59bc9STejun Heo 
125186b442fbSDennis Zhou (Facebook) 	/* update first free bit */
125292c14cabSDennis Zhou 	chunk_md->first_free = min(chunk_md->first_free, bit_off);
125386b442fbSDennis Zhou (Facebook) 
1254ca460b3cSDennis Zhou (Facebook) 	pcpu_block_update_hint_free(chunk, bit_off, bits);
1255b539b87fSTejun Heo 
1256fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, oslot);
12575b32af91SRoman Gushchin 
12585b32af91SRoman Gushchin 	return freed;
1259fbf59bc9STejun Heo }
1260fbf59bc9STejun Heo 
1261047924c9SDennis Zhou static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1262047924c9SDennis Zhou {
1263047924c9SDennis Zhou 	block->scan_hint = 0;
1264047924c9SDennis Zhou 	block->contig_hint = nr_bits;
1265047924c9SDennis Zhou 	block->left_free = nr_bits;
1266047924c9SDennis Zhou 	block->right_free = nr_bits;
1267047924c9SDennis Zhou 	block->first_free = 0;
1268047924c9SDennis Zhou 	block->nr_bits = nr_bits;
1269047924c9SDennis Zhou }
1270047924c9SDennis Zhou 
1271ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1272ca460b3cSDennis Zhou (Facebook) {
1273ca460b3cSDennis Zhou (Facebook) 	struct pcpu_block_md *md_block;
1274ca460b3cSDennis Zhou (Facebook) 
127592c14cabSDennis Zhou 	/* init the chunk's block */
127692c14cabSDennis Zhou 	pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
127792c14cabSDennis Zhou 
1278ca460b3cSDennis Zhou (Facebook) 	for (md_block = chunk->md_blocks;
1279ca460b3cSDennis Zhou (Facebook) 	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1280047924c9SDennis Zhou 	     md_block++)
1281047924c9SDennis Zhou 		pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1282ca460b3cSDennis Zhou (Facebook) }
1283ca460b3cSDennis Zhou (Facebook) 
128440064aecSDennis Zhou (Facebook) /**
128540064aecSDennis Zhou (Facebook)  * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
128640064aecSDennis Zhou (Facebook)  * @tmp_addr: the start of the region served
128740064aecSDennis Zhou (Facebook)  * @map_size: size of the region served
128840064aecSDennis Zhou (Facebook)  *
128940064aecSDennis Zhou (Facebook)  * This is responsible for creating the chunks that serve the first chunk.  The
129040064aecSDennis Zhou (Facebook)  * base_addr is page aligned down of @tmp_addr while the region end is page
129140064aecSDennis Zhou (Facebook)  * aligned up.  Offsets are kept track of to determine the region served. All
129240064aecSDennis Zhou (Facebook)  * this is done to appease the bitmap allocator in avoiding partial blocks.
129340064aecSDennis Zhou (Facebook)  *
129440064aecSDennis Zhou (Facebook)  * RETURNS:
129540064aecSDennis Zhou (Facebook)  * Chunk serving the region at @tmp_addr of @map_size.
129640064aecSDennis Zhou (Facebook)  */
1297c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
129840064aecSDennis Zhou (Facebook) 							 int map_size)
129910edf5b0SDennis Zhou (Facebook) {
130010edf5b0SDennis Zhou (Facebook) 	struct pcpu_chunk *chunk;
1301ca460b3cSDennis Zhou (Facebook) 	unsigned long aligned_addr, lcm_align;
130240064aecSDennis Zhou (Facebook) 	int start_offset, offset_bits, region_size, region_bits;
1303f655f405SMike Rapoport 	size_t alloc_size;
1304c0ebfdc3SDennis Zhou (Facebook) 
1305c0ebfdc3SDennis Zhou (Facebook) 	/* region calculations */
1306c0ebfdc3SDennis Zhou (Facebook) 	aligned_addr = tmp_addr & PAGE_MASK;
1307c0ebfdc3SDennis Zhou (Facebook) 
1308c0ebfdc3SDennis Zhou (Facebook) 	start_offset = tmp_addr - aligned_addr;
13096b9d7c8eSDennis Zhou (Facebook) 
1310ca460b3cSDennis Zhou (Facebook) 	/*
1311ca460b3cSDennis Zhou (Facebook) 	 * Align the end of the region with the LCM of PAGE_SIZE and
1312ca460b3cSDennis Zhou (Facebook) 	 * PCPU_BITMAP_BLOCK_SIZE.  One of these constants is a multiple of
1313ca460b3cSDennis Zhou (Facebook) 	 * the other.
1314ca460b3cSDennis Zhou (Facebook) 	 */
1315ca460b3cSDennis Zhou (Facebook) 	lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1316ca460b3cSDennis Zhou (Facebook) 	region_size = ALIGN(start_offset + map_size, lcm_align);
131710edf5b0SDennis Zhou (Facebook) 
1318c0ebfdc3SDennis Zhou (Facebook) 	/* allocate chunk */
131961cf93d3SDennis Zhou 	alloc_size = struct_size(chunk, populated,
132061cf93d3SDennis Zhou 				 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1321f655f405SMike Rapoport 	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1322f655f405SMike Rapoport 	if (!chunk)
1323f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1324f655f405SMike Rapoport 		      alloc_size);
1325c0ebfdc3SDennis Zhou (Facebook) 
132610edf5b0SDennis Zhou (Facebook) 	INIT_LIST_HEAD(&chunk->list);
1327c0ebfdc3SDennis Zhou (Facebook) 
1328c0ebfdc3SDennis Zhou (Facebook) 	chunk->base_addr = (void *)aligned_addr;
132910edf5b0SDennis Zhou (Facebook) 	chunk->start_offset = start_offset;
13306b9d7c8eSDennis Zhou (Facebook) 	chunk->end_offset = region_size - chunk->start_offset - map_size;
1331c0ebfdc3SDennis Zhou (Facebook) 
13328ab16c43SDennis Zhou (Facebook) 	chunk->nr_pages = region_size >> PAGE_SHIFT;
133340064aecSDennis Zhou (Facebook) 	region_bits = pcpu_chunk_map_bits(chunk);
1334c0ebfdc3SDennis Zhou (Facebook) 
1335f655f405SMike Rapoport 	alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1336f655f405SMike Rapoport 	chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1337f655f405SMike Rapoport 	if (!chunk->alloc_map)
1338f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1339f655f405SMike Rapoport 		      alloc_size);
1340f655f405SMike Rapoport 
1341f655f405SMike Rapoport 	alloc_size =
1342f655f405SMike Rapoport 		BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1343f655f405SMike Rapoport 	chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1344f655f405SMike Rapoport 	if (!chunk->bound_map)
1345f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1346f655f405SMike Rapoport 		      alloc_size);
1347f655f405SMike Rapoport 
1348f655f405SMike Rapoport 	alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1349f655f405SMike Rapoport 	chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1350f655f405SMike Rapoport 	if (!chunk->md_blocks)
1351f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1352f655f405SMike Rapoport 		      alloc_size);
1353f655f405SMike Rapoport 
13543c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
13553c7be18aSRoman Gushchin 	/* first chunk isn't memcg-aware */
13563c7be18aSRoman Gushchin 	chunk->obj_cgroups = NULL;
13573c7be18aSRoman Gushchin #endif
1358ca460b3cSDennis Zhou (Facebook) 	pcpu_init_md_blocks(chunk);
135910edf5b0SDennis Zhou (Facebook) 
136010edf5b0SDennis Zhou (Facebook) 	/* manage populated page bitmap */
136110edf5b0SDennis Zhou (Facebook) 	chunk->immutable = true;
13628ab16c43SDennis Zhou (Facebook) 	bitmap_fill(chunk->populated, chunk->nr_pages);
13638ab16c43SDennis Zhou (Facebook) 	chunk->nr_populated = chunk->nr_pages;
1364b239f7daSDennis Zhou 	chunk->nr_empty_pop_pages = chunk->nr_pages;
136510edf5b0SDennis Zhou (Facebook) 
136640064aecSDennis Zhou (Facebook) 	chunk->free_bytes = map_size;
1367c0ebfdc3SDennis Zhou (Facebook) 
1368c0ebfdc3SDennis Zhou (Facebook) 	if (chunk->start_offset) {
1369c0ebfdc3SDennis Zhou (Facebook) 		/* hide the beginning of the bitmap */
137040064aecSDennis Zhou (Facebook) 		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
137140064aecSDennis Zhou (Facebook) 		bitmap_set(chunk->alloc_map, 0, offset_bits);
137240064aecSDennis Zhou (Facebook) 		set_bit(0, chunk->bound_map);
137340064aecSDennis Zhou (Facebook) 		set_bit(offset_bits, chunk->bound_map);
1374ca460b3cSDennis Zhou (Facebook) 
137592c14cabSDennis Zhou 		chunk->chunk_md.first_free = offset_bits;
137686b442fbSDennis Zhou (Facebook) 
1377ca460b3cSDennis Zhou (Facebook) 		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1378c0ebfdc3SDennis Zhou (Facebook) 	}
1379c0ebfdc3SDennis Zhou (Facebook) 
13806b9d7c8eSDennis Zhou (Facebook) 	if (chunk->end_offset) {
13816b9d7c8eSDennis Zhou (Facebook) 		/* hide the end of the bitmap */
138240064aecSDennis Zhou (Facebook) 		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
138340064aecSDennis Zhou (Facebook) 		bitmap_set(chunk->alloc_map,
138440064aecSDennis Zhou (Facebook) 			   pcpu_chunk_map_bits(chunk) - offset_bits,
138540064aecSDennis Zhou (Facebook) 			   offset_bits);
138640064aecSDennis Zhou (Facebook) 		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
138740064aecSDennis Zhou (Facebook) 			chunk->bound_map);
138840064aecSDennis Zhou (Facebook) 		set_bit(region_bits, chunk->bound_map);
13896b9d7c8eSDennis Zhou (Facebook) 
1390ca460b3cSDennis Zhou (Facebook) 		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1391ca460b3cSDennis Zhou (Facebook) 					     - offset_bits, offset_bits);
1392ca460b3cSDennis Zhou (Facebook) 	}
139340064aecSDennis Zhou (Facebook) 
139410edf5b0SDennis Zhou (Facebook) 	return chunk;
139510edf5b0SDennis Zhou (Facebook) }
139610edf5b0SDennis Zhou (Facebook) 
13973c7be18aSRoman Gushchin static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp)
13986081089fSTejun Heo {
13996081089fSTejun Heo 	struct pcpu_chunk *chunk;
140040064aecSDennis Zhou (Facebook) 	int region_bits;
14016081089fSTejun Heo 
140247504ee0SDennis Zhou 	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
14036081089fSTejun Heo 	if (!chunk)
14046081089fSTejun Heo 		return NULL;
14056081089fSTejun Heo 
14066081089fSTejun Heo 	INIT_LIST_HEAD(&chunk->list);
1407c0ebfdc3SDennis Zhou (Facebook) 	chunk->nr_pages = pcpu_unit_pages;
140840064aecSDennis Zhou (Facebook) 	region_bits = pcpu_chunk_map_bits(chunk);
140940064aecSDennis Zhou (Facebook) 
141040064aecSDennis Zhou (Facebook) 	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
141147504ee0SDennis Zhou 					   sizeof(chunk->alloc_map[0]), gfp);
141240064aecSDennis Zhou (Facebook) 	if (!chunk->alloc_map)
141340064aecSDennis Zhou (Facebook) 		goto alloc_map_fail;
141440064aecSDennis Zhou (Facebook) 
141540064aecSDennis Zhou (Facebook) 	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
141647504ee0SDennis Zhou 					   sizeof(chunk->bound_map[0]), gfp);
141740064aecSDennis Zhou (Facebook) 	if (!chunk->bound_map)
141840064aecSDennis Zhou (Facebook) 		goto bound_map_fail;
141940064aecSDennis Zhou (Facebook) 
1420ca460b3cSDennis Zhou (Facebook) 	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
142147504ee0SDennis Zhou 					   sizeof(chunk->md_blocks[0]), gfp);
1422ca460b3cSDennis Zhou (Facebook) 	if (!chunk->md_blocks)
1423ca460b3cSDennis Zhou (Facebook) 		goto md_blocks_fail;
1424ca460b3cSDennis Zhou (Facebook) 
14253c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
14263c7be18aSRoman Gushchin 	if (pcpu_is_memcg_chunk(type)) {
14273c7be18aSRoman Gushchin 		chunk->obj_cgroups =
14283c7be18aSRoman Gushchin 			pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
14293c7be18aSRoman Gushchin 					sizeof(struct obj_cgroup *), gfp);
14303c7be18aSRoman Gushchin 		if (!chunk->obj_cgroups)
14313c7be18aSRoman Gushchin 			goto objcg_fail;
14323c7be18aSRoman Gushchin 	}
14333c7be18aSRoman Gushchin #endif
14343c7be18aSRoman Gushchin 
1435ca460b3cSDennis Zhou (Facebook) 	pcpu_init_md_blocks(chunk);
1436ca460b3cSDennis Zhou (Facebook) 
143740064aecSDennis Zhou (Facebook) 	/* init metadata */
143840064aecSDennis Zhou (Facebook) 	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1439c0ebfdc3SDennis Zhou (Facebook) 
14406081089fSTejun Heo 	return chunk;
144140064aecSDennis Zhou (Facebook) 
14423c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
14433c7be18aSRoman Gushchin objcg_fail:
14443c7be18aSRoman Gushchin 	pcpu_mem_free(chunk->md_blocks);
14453c7be18aSRoman Gushchin #endif
1446ca460b3cSDennis Zhou (Facebook) md_blocks_fail:
1447ca460b3cSDennis Zhou (Facebook) 	pcpu_mem_free(chunk->bound_map);
144840064aecSDennis Zhou (Facebook) bound_map_fail:
144940064aecSDennis Zhou (Facebook) 	pcpu_mem_free(chunk->alloc_map);
145040064aecSDennis Zhou (Facebook) alloc_map_fail:
145140064aecSDennis Zhou (Facebook) 	pcpu_mem_free(chunk);
145240064aecSDennis Zhou (Facebook) 
145340064aecSDennis Zhou (Facebook) 	return NULL;
14546081089fSTejun Heo }
14556081089fSTejun Heo 
14566081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk)
14576081089fSTejun Heo {
14586081089fSTejun Heo 	if (!chunk)
14596081089fSTejun Heo 		return;
14603c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
14613c7be18aSRoman Gushchin 	pcpu_mem_free(chunk->obj_cgroups);
14623c7be18aSRoman Gushchin #endif
14636685b357SMike Rapoport 	pcpu_mem_free(chunk->md_blocks);
146440064aecSDennis Zhou (Facebook) 	pcpu_mem_free(chunk->bound_map);
146540064aecSDennis Zhou (Facebook) 	pcpu_mem_free(chunk->alloc_map);
14661d5cfdb0STetsuo Handa 	pcpu_mem_free(chunk);
14676081089fSTejun Heo }
14686081089fSTejun Heo 
1469b539b87fSTejun Heo /**
1470b539b87fSTejun Heo  * pcpu_chunk_populated - post-population bookkeeping
1471b539b87fSTejun Heo  * @chunk: pcpu_chunk which got populated
1472b539b87fSTejun Heo  * @page_start: the start page
1473b539b87fSTejun Heo  * @page_end: the end page
1474b539b87fSTejun Heo  *
1475b539b87fSTejun Heo  * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1476b539b87fSTejun Heo  * the bookkeeping information accordingly.  Must be called after each
1477b539b87fSTejun Heo  * successful population.
147840064aecSDennis Zhou (Facebook)  *
147940064aecSDennis Zhou (Facebook)  * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
148040064aecSDennis Zhou (Facebook)  * is to serve an allocation in that area.
1481b539b87fSTejun Heo  */
148240064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1483b239f7daSDennis Zhou 				 int page_end)
1484b539b87fSTejun Heo {
1485b539b87fSTejun Heo 	int nr = page_end - page_start;
1486b539b87fSTejun Heo 
1487b539b87fSTejun Heo 	lockdep_assert_held(&pcpu_lock);
1488b539b87fSTejun Heo 
1489b539b87fSTejun Heo 	bitmap_set(chunk->populated, page_start, nr);
1490b539b87fSTejun Heo 	chunk->nr_populated += nr;
14917e8a6304SDennis Zhou (Facebook) 	pcpu_nr_populated += nr;
149240064aecSDennis Zhou (Facebook) 
1493b239f7daSDennis Zhou 	pcpu_update_empty_pages(chunk, nr);
149440064aecSDennis Zhou (Facebook) }
1495b539b87fSTejun Heo 
1496b539b87fSTejun Heo /**
1497b539b87fSTejun Heo  * pcpu_chunk_depopulated - post-depopulation bookkeeping
1498b539b87fSTejun Heo  * @chunk: pcpu_chunk which got depopulated
1499b539b87fSTejun Heo  * @page_start: the start page
1500b539b87fSTejun Heo  * @page_end: the end page
1501b539b87fSTejun Heo  *
1502b539b87fSTejun Heo  * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1503b539b87fSTejun Heo  * Update the bookkeeping information accordingly.  Must be called after
1504b539b87fSTejun Heo  * each successful depopulation.
1505b539b87fSTejun Heo  */
1506b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1507b539b87fSTejun Heo 				   int page_start, int page_end)
1508b539b87fSTejun Heo {
1509b539b87fSTejun Heo 	int nr = page_end - page_start;
1510b539b87fSTejun Heo 
1511b539b87fSTejun Heo 	lockdep_assert_held(&pcpu_lock);
1512b539b87fSTejun Heo 
1513b539b87fSTejun Heo 	bitmap_clear(chunk->populated, page_start, nr);
1514b539b87fSTejun Heo 	chunk->nr_populated -= nr;
15157e8a6304SDennis Zhou (Facebook) 	pcpu_nr_populated -= nr;
1516b239f7daSDennis Zhou 
1517b239f7daSDennis Zhou 	pcpu_update_empty_pages(chunk, -nr);
1518b539b87fSTejun Heo }
1519b539b87fSTejun Heo 
1520fbf59bc9STejun Heo /*
15219f645532STejun Heo  * Chunk management implementation.
1522fbf59bc9STejun Heo  *
15239f645532STejun Heo  * To allow different implementations, chunk alloc/free and
15249f645532STejun Heo  * [de]population are implemented in a separate file which is pulled
15259f645532STejun Heo  * into this file and compiled together.  The following functions
15269f645532STejun Heo  * should be implemented.
1527ccea34b5STejun Heo  *
15289f645532STejun Heo  * pcpu_populate_chunk		- populate the specified range of a chunk
15299f645532STejun Heo  * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
15309f645532STejun Heo  * pcpu_create_chunk		- create a new chunk
15319f645532STejun Heo  * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
15329f645532STejun Heo  * pcpu_addr_to_page		- translate address to physical address
15339f645532STejun Heo  * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
1534fbf59bc9STejun Heo  */
153515d9f3d1SDennis Zhou static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
153647504ee0SDennis Zhou 			       int page_start, int page_end, gfp_t gfp);
153715d9f3d1SDennis Zhou static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
153815d9f3d1SDennis Zhou 				  int page_start, int page_end);
15393c7be18aSRoman Gushchin static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
15403c7be18aSRoman Gushchin 					    gfp_t gfp);
15419f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
15429f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr);
15439f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1544fbf59bc9STejun Heo 
1545b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM
1546b0c9778bSTejun Heo #include "percpu-km.c"
1547b0c9778bSTejun Heo #else
15489f645532STejun Heo #include "percpu-vm.c"
1549b0c9778bSTejun Heo #endif
1550fbf59bc9STejun Heo 
1551fbf59bc9STejun Heo /**
155288999a89STejun Heo  * pcpu_chunk_addr_search - determine chunk containing specified address
155388999a89STejun Heo  * @addr: address for which the chunk needs to be determined.
155488999a89STejun Heo  *
1555c0ebfdc3SDennis Zhou (Facebook)  * This is an internal function that handles all but static allocations.
1556c0ebfdc3SDennis Zhou (Facebook)  * Static percpu address values should never be passed into the allocator.
1557c0ebfdc3SDennis Zhou (Facebook)  *
155888999a89STejun Heo  * RETURNS:
155988999a89STejun Heo  * The address of the found chunk.
156088999a89STejun Heo  */
156188999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
156288999a89STejun Heo {
1563c0ebfdc3SDennis Zhou (Facebook) 	/* is it in the dynamic region (first chunk)? */
1564560f2c23SDennis Zhou (Facebook) 	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1565c0ebfdc3SDennis Zhou (Facebook) 		return pcpu_first_chunk;
1566c0ebfdc3SDennis Zhou (Facebook) 
1567c0ebfdc3SDennis Zhou (Facebook) 	/* is it in the reserved region? */
1568560f2c23SDennis Zhou (Facebook) 	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
156988999a89STejun Heo 		return pcpu_reserved_chunk;
157088999a89STejun Heo 
157188999a89STejun Heo 	/*
157288999a89STejun Heo 	 * The address is relative to unit0 which might be unused and
157388999a89STejun Heo 	 * thus unmapped.  Offset the address to the unit space of the
157488999a89STejun Heo 	 * current processor before looking it up in the vmalloc
157588999a89STejun Heo 	 * space.  Note that any possible cpu id can be used here, so
157688999a89STejun Heo 	 * there's no need to worry about preemption or cpu hotplug.
157788999a89STejun Heo 	 */
157888999a89STejun Heo 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
15799f645532STejun Heo 	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
158088999a89STejun Heo }
158188999a89STejun Heo 
15823c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
15833c7be18aSRoman Gushchin static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
15843c7be18aSRoman Gushchin 						     struct obj_cgroup **objcgp)
15853c7be18aSRoman Gushchin {
15863c7be18aSRoman Gushchin 	struct obj_cgroup *objcg;
15873c7be18aSRoman Gushchin 
1588279c3393SRoman Gushchin 	if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
15893c7be18aSRoman Gushchin 		return PCPU_CHUNK_ROOT;
15903c7be18aSRoman Gushchin 
15913c7be18aSRoman Gushchin 	objcg = get_obj_cgroup_from_current();
15923c7be18aSRoman Gushchin 	if (!objcg)
15933c7be18aSRoman Gushchin 		return PCPU_CHUNK_ROOT;
15943c7be18aSRoman Gushchin 
15953c7be18aSRoman Gushchin 	if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
15963c7be18aSRoman Gushchin 		obj_cgroup_put(objcg);
15973c7be18aSRoman Gushchin 		return PCPU_FAIL_ALLOC;
15983c7be18aSRoman Gushchin 	}
15993c7be18aSRoman Gushchin 
16003c7be18aSRoman Gushchin 	*objcgp = objcg;
16013c7be18aSRoman Gushchin 	return PCPU_CHUNK_MEMCG;
16023c7be18aSRoman Gushchin }
16033c7be18aSRoman Gushchin 
16043c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
16053c7be18aSRoman Gushchin 				       struct pcpu_chunk *chunk, int off,
16063c7be18aSRoman Gushchin 				       size_t size)
16073c7be18aSRoman Gushchin {
16083c7be18aSRoman Gushchin 	if (!objcg)
16093c7be18aSRoman Gushchin 		return;
16103c7be18aSRoman Gushchin 
16113c7be18aSRoman Gushchin 	if (chunk) {
16123c7be18aSRoman Gushchin 		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1613772616b0SRoman Gushchin 
1614772616b0SRoman Gushchin 		rcu_read_lock();
1615772616b0SRoman Gushchin 		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1616772616b0SRoman Gushchin 				size * num_possible_cpus());
1617772616b0SRoman Gushchin 		rcu_read_unlock();
16183c7be18aSRoman Gushchin 	} else {
16193c7be18aSRoman Gushchin 		obj_cgroup_uncharge(objcg, size * num_possible_cpus());
16203c7be18aSRoman Gushchin 		obj_cgroup_put(objcg);
16213c7be18aSRoman Gushchin 	}
16223c7be18aSRoman Gushchin }
16233c7be18aSRoman Gushchin 
16243c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
16253c7be18aSRoman Gushchin {
16263c7be18aSRoman Gushchin 	struct obj_cgroup *objcg;
16273c7be18aSRoman Gushchin 
16283c7be18aSRoman Gushchin 	if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk)))
16293c7be18aSRoman Gushchin 		return;
16303c7be18aSRoman Gushchin 
16313c7be18aSRoman Gushchin 	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
16323c7be18aSRoman Gushchin 	chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
16333c7be18aSRoman Gushchin 
16343c7be18aSRoman Gushchin 	obj_cgroup_uncharge(objcg, size * num_possible_cpus());
16353c7be18aSRoman Gushchin 
1636772616b0SRoman Gushchin 	rcu_read_lock();
1637772616b0SRoman Gushchin 	mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1638772616b0SRoman Gushchin 			-(size * num_possible_cpus()));
1639772616b0SRoman Gushchin 	rcu_read_unlock();
1640772616b0SRoman Gushchin 
16413c7be18aSRoman Gushchin 	obj_cgroup_put(objcg);
16423c7be18aSRoman Gushchin }
16433c7be18aSRoman Gushchin 
16443c7be18aSRoman Gushchin #else /* CONFIG_MEMCG_KMEM */
16453c7be18aSRoman Gushchin static enum pcpu_chunk_type
16463c7be18aSRoman Gushchin pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
16473c7be18aSRoman Gushchin {
16483c7be18aSRoman Gushchin 	return PCPU_CHUNK_ROOT;
16493c7be18aSRoman Gushchin }
16503c7be18aSRoman Gushchin 
16513c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
16523c7be18aSRoman Gushchin 				       struct pcpu_chunk *chunk, int off,
16533c7be18aSRoman Gushchin 				       size_t size)
16543c7be18aSRoman Gushchin {
16553c7be18aSRoman Gushchin }
16563c7be18aSRoman Gushchin 
16573c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
16583c7be18aSRoman Gushchin {
16593c7be18aSRoman Gushchin }
16603c7be18aSRoman Gushchin #endif /* CONFIG_MEMCG_KMEM */
16613c7be18aSRoman Gushchin 
166288999a89STejun Heo /**
1663edcb4639STejun Heo  * pcpu_alloc - the percpu allocator
1664cae3aeb8STejun Heo  * @size: size of area to allocate in bytes
1665fbf59bc9STejun Heo  * @align: alignment of area (max PAGE_SIZE)
1666edcb4639STejun Heo  * @reserved: allocate from the reserved chunk if available
16675835d96eSTejun Heo  * @gfp: allocation flags
1668fbf59bc9STejun Heo  *
16695835d96eSTejun Heo  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
16700ea7eeecSDaniel Borkmann  * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
16710ea7eeecSDaniel Borkmann  * then no warning will be triggered on invalid or failed allocation
16720ea7eeecSDaniel Borkmann  * requests.
1673fbf59bc9STejun Heo  *
1674fbf59bc9STejun Heo  * RETURNS:
1675fbf59bc9STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
1676fbf59bc9STejun Heo  */
16775835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
16785835d96eSTejun Heo 				 gfp_t gfp)
1679fbf59bc9STejun Heo {
168028307d93SFilipe Manana 	gfp_t pcpu_gfp;
168128307d93SFilipe Manana 	bool is_atomic;
168228307d93SFilipe Manana 	bool do_warn;
16833c7be18aSRoman Gushchin 	enum pcpu_chunk_type type;
16843c7be18aSRoman Gushchin 	struct list_head *pcpu_slot;
16853c7be18aSRoman Gushchin 	struct obj_cgroup *objcg = NULL;
1686f2badb0cSTejun Heo 	static int warn_limit = 10;
16878744d859SDennis Zhou 	struct pcpu_chunk *chunk, *next;
1688f2badb0cSTejun Heo 	const char *err;
168940064aecSDennis Zhou (Facebook) 	int slot, off, cpu, ret;
1690403a91b1SJiri Kosina 	unsigned long flags;
1691f528f0b8SCatalin Marinas 	void __percpu *ptr;
169240064aecSDennis Zhou (Facebook) 	size_t bits, bit_align;
1693fbf59bc9STejun Heo 
169428307d93SFilipe Manana 	gfp = current_gfp_context(gfp);
169528307d93SFilipe Manana 	/* whitelisted flags that can be passed to the backing allocators */
169628307d93SFilipe Manana 	pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
169728307d93SFilipe Manana 	is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
169828307d93SFilipe Manana 	do_warn = !(gfp & __GFP_NOWARN);
169928307d93SFilipe Manana 
1700723ad1d9SAl Viro 	/*
170140064aecSDennis Zhou (Facebook) 	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
170240064aecSDennis Zhou (Facebook) 	 * therefore alignment must be a minimum of that many bytes.
170340064aecSDennis Zhou (Facebook) 	 * An allocation may have internal fragmentation from rounding up
170440064aecSDennis Zhou (Facebook) 	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1705723ad1d9SAl Viro 	 */
1706d2f3c384SDennis Zhou (Facebook) 	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1707d2f3c384SDennis Zhou (Facebook) 		align = PCPU_MIN_ALLOC_SIZE;
1708723ad1d9SAl Viro 
1709d2f3c384SDennis Zhou (Facebook) 	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
171040064aecSDennis Zhou (Facebook) 	bits = size >> PCPU_MIN_ALLOC_SHIFT;
171140064aecSDennis Zhou (Facebook) 	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
17122f69fa82SViro 
17133ca45a46Szijun_hu 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
17143ca45a46Szijun_hu 		     !is_power_of_2(align))) {
17150ea7eeecSDaniel Borkmann 		WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1716756a025fSJoe Perches 		     size, align);
1717fbf59bc9STejun Heo 		return NULL;
1718fbf59bc9STejun Heo 	}
1719fbf59bc9STejun Heo 
17203c7be18aSRoman Gushchin 	type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg);
17213c7be18aSRoman Gushchin 	if (unlikely(type == PCPU_FAIL_ALLOC))
17223c7be18aSRoman Gushchin 		return NULL;
17233c7be18aSRoman Gushchin 	pcpu_slot = pcpu_chunk_list(type);
17243c7be18aSRoman Gushchin 
1725f52ba1feSKirill Tkhai 	if (!is_atomic) {
1726f52ba1feSKirill Tkhai 		/*
1727f52ba1feSKirill Tkhai 		 * pcpu_balance_workfn() allocates memory under this mutex,
1728f52ba1feSKirill Tkhai 		 * and it may wait for memory reclaim. Allow current task
1729f52ba1feSKirill Tkhai 		 * to become OOM victim, in case of memory pressure.
1730f52ba1feSKirill Tkhai 		 */
17313c7be18aSRoman Gushchin 		if (gfp & __GFP_NOFAIL) {
17326710e594STejun Heo 			mutex_lock(&pcpu_alloc_mutex);
17333c7be18aSRoman Gushchin 		} else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
17343c7be18aSRoman Gushchin 			pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1735f52ba1feSKirill Tkhai 			return NULL;
1736f52ba1feSKirill Tkhai 		}
17373c7be18aSRoman Gushchin 	}
17386710e594STejun Heo 
1739403a91b1SJiri Kosina 	spin_lock_irqsave(&pcpu_lock, flags);
1740fbf59bc9STejun Heo 
1741edcb4639STejun Heo 	/* serve reserved allocations from the reserved chunk if available */
1742edcb4639STejun Heo 	if (reserved && pcpu_reserved_chunk) {
1743edcb4639STejun Heo 		chunk = pcpu_reserved_chunk;
1744833af842STejun Heo 
174540064aecSDennis Zhou (Facebook) 		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
174640064aecSDennis Zhou (Facebook) 		if (off < 0) {
1747833af842STejun Heo 			err = "alloc from reserved chunk failed";
1748ccea34b5STejun Heo 			goto fail_unlock;
1749f2badb0cSTejun Heo 		}
1750833af842STejun Heo 
175140064aecSDennis Zhou (Facebook) 		off = pcpu_alloc_area(chunk, bits, bit_align, off);
1752edcb4639STejun Heo 		if (off >= 0)
1753edcb4639STejun Heo 			goto area_found;
1754833af842STejun Heo 
1755f2badb0cSTejun Heo 		err = "alloc from reserved chunk failed";
1756ccea34b5STejun Heo 		goto fail_unlock;
1757edcb4639STejun Heo 	}
1758edcb4639STejun Heo 
1759ccea34b5STejun Heo restart:
1760edcb4639STejun Heo 	/* search through normal chunks */
1761fbf59bc9STejun Heo 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
17628744d859SDennis Zhou 		list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) {
176340064aecSDennis Zhou (Facebook) 			off = pcpu_find_block_fit(chunk, bits, bit_align,
176440064aecSDennis Zhou (Facebook) 						  is_atomic);
17658744d859SDennis Zhou 			if (off < 0) {
17668744d859SDennis Zhou 				if (slot < PCPU_SLOT_FAIL_THRESHOLD)
17678744d859SDennis Zhou 					pcpu_chunk_move(chunk, 0);
1768fbf59bc9STejun Heo 				continue;
17698744d859SDennis Zhou 			}
1770ccea34b5STejun Heo 
177140064aecSDennis Zhou (Facebook) 			off = pcpu_alloc_area(chunk, bits, bit_align, off);
1772fbf59bc9STejun Heo 			if (off >= 0)
1773fbf59bc9STejun Heo 				goto area_found;
177440064aecSDennis Zhou (Facebook) 
1775fbf59bc9STejun Heo 		}
1776fbf59bc9STejun Heo 	}
1777fbf59bc9STejun Heo 
1778403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
1779ccea34b5STejun Heo 
1780b38d08f3STejun Heo 	/*
1781b38d08f3STejun Heo 	 * No space left.  Create a new chunk.  We don't want multiple
1782b38d08f3STejun Heo 	 * tasks to create chunks simultaneously.  Serialize and create iff
1783b38d08f3STejun Heo 	 * there's still no empty chunk after grabbing the mutex.
1784b38d08f3STejun Heo 	 */
178511df02bfSDennis Zhou 	if (is_atomic) {
178611df02bfSDennis Zhou 		err = "atomic alloc failed, no space left";
17875835d96eSTejun Heo 		goto fail;
178811df02bfSDennis Zhou 	}
17895835d96eSTejun Heo 
1790b38d08f3STejun Heo 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
17913c7be18aSRoman Gushchin 		chunk = pcpu_create_chunk(type, pcpu_gfp);
1792f2badb0cSTejun Heo 		if (!chunk) {
1793f2badb0cSTejun Heo 			err = "failed to allocate new chunk";
1794b38d08f3STejun Heo 			goto fail;
1795f2badb0cSTejun Heo 		}
1796ccea34b5STejun Heo 
1797403a91b1SJiri Kosina 		spin_lock_irqsave(&pcpu_lock, flags);
1798fbf59bc9STejun Heo 		pcpu_chunk_relocate(chunk, -1);
1799b38d08f3STejun Heo 	} else {
1800b38d08f3STejun Heo 		spin_lock_irqsave(&pcpu_lock, flags);
1801b38d08f3STejun Heo 	}
1802b38d08f3STejun Heo 
1803ccea34b5STejun Heo 	goto restart;
1804fbf59bc9STejun Heo 
1805fbf59bc9STejun Heo area_found:
180630a5b536SDennis Zhou 	pcpu_stats_area_alloc(chunk, size);
1807403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
1808ccea34b5STejun Heo 
1809dca49645STejun Heo 	/* populate if not all pages are already there */
18105835d96eSTejun Heo 	if (!is_atomic) {
1811e837dfdeSDennis Zhou 		unsigned int page_start, page_end, rs, re;
1812e04d3208STejun Heo 
1813dca49645STejun Heo 		page_start = PFN_DOWN(off);
1814dca49645STejun Heo 		page_end = PFN_UP(off + size);
1815dca49645STejun Heo 
1816e837dfdeSDennis Zhou 		bitmap_for_each_clear_region(chunk->populated, rs, re,
181791e914c5SDennis Zhou (Facebook) 					     page_start, page_end) {
1818dca49645STejun Heo 			WARN_ON(chunk->immutable);
1819dca49645STejun Heo 
1820554fef1cSDennis Zhou 			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1821b38d08f3STejun Heo 
1822403a91b1SJiri Kosina 			spin_lock_irqsave(&pcpu_lock, flags);
1823b38d08f3STejun Heo 			if (ret) {
182440064aecSDennis Zhou (Facebook) 				pcpu_free_area(chunk, off);
1825f2badb0cSTejun Heo 				err = "failed to populate";
1826ccea34b5STejun Heo 				goto fail_unlock;
1827fbf59bc9STejun Heo 			}
1828b239f7daSDennis Zhou 			pcpu_chunk_populated(chunk, rs, re);
1829b38d08f3STejun Heo 			spin_unlock_irqrestore(&pcpu_lock, flags);
1830dca49645STejun Heo 		}
1831dca49645STejun Heo 
1832ccea34b5STejun Heo 		mutex_unlock(&pcpu_alloc_mutex);
1833e04d3208STejun Heo 	}
1834ccea34b5STejun Heo 
18350760fa3dSRoman Gushchin 	if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW)
18361a4d7607STejun Heo 		pcpu_schedule_balance_work();
18371a4d7607STejun Heo 
1838dca49645STejun Heo 	/* clear the areas and return address relative to base address */
1839dca49645STejun Heo 	for_each_possible_cpu(cpu)
1840dca49645STejun Heo 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1841dca49645STejun Heo 
1842f528f0b8SCatalin Marinas 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
18438a8c35faSLarry Finger 	kmemleak_alloc_percpu(ptr, size, gfp);
1844df95e795SDennis Zhou 
1845df95e795SDennis Zhou 	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1846df95e795SDennis Zhou 			chunk->base_addr, off, ptr);
1847df95e795SDennis Zhou 
18483c7be18aSRoman Gushchin 	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
18493c7be18aSRoman Gushchin 
1850f528f0b8SCatalin Marinas 	return ptr;
1851ccea34b5STejun Heo 
1852ccea34b5STejun Heo fail_unlock:
1853403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
1854b38d08f3STejun Heo fail:
1855df95e795SDennis Zhou 	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1856df95e795SDennis Zhou 
18570ea7eeecSDaniel Borkmann 	if (!is_atomic && do_warn && warn_limit) {
1858870d4b12SJoe Perches 		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
18595835d96eSTejun Heo 			size, align, is_atomic, err);
1860f2badb0cSTejun Heo 		dump_stack();
1861f2badb0cSTejun Heo 		if (!--warn_limit)
1862870d4b12SJoe Perches 			pr_info("limit reached, disable warning\n");
1863f2badb0cSTejun Heo 	}
18641a4d7607STejun Heo 	if (is_atomic) {
1865*f0953a1bSIngo Molnar 		/* see the flag handling in pcpu_balance_workfn() */
18661a4d7607STejun Heo 		pcpu_atomic_alloc_failed = true;
18671a4d7607STejun Heo 		pcpu_schedule_balance_work();
18686710e594STejun Heo 	} else {
18696710e594STejun Heo 		mutex_unlock(&pcpu_alloc_mutex);
18701a4d7607STejun Heo 	}
18713c7be18aSRoman Gushchin 
18723c7be18aSRoman Gushchin 	pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
18733c7be18aSRoman Gushchin 
1874ccea34b5STejun Heo 	return NULL;
1875fbf59bc9STejun Heo }
1876edcb4639STejun Heo 
1877edcb4639STejun Heo /**
18785835d96eSTejun Heo  * __alloc_percpu_gfp - allocate dynamic percpu area
1879edcb4639STejun Heo  * @size: size of area to allocate in bytes
1880edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
18815835d96eSTejun Heo  * @gfp: allocation flags
1882edcb4639STejun Heo  *
18835835d96eSTejun Heo  * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
18845835d96eSTejun Heo  * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
18850ea7eeecSDaniel Borkmann  * be called from any context but is a lot more likely to fail. If @gfp
18860ea7eeecSDaniel Borkmann  * has __GFP_NOWARN then no warning will be triggered on invalid or failed
18870ea7eeecSDaniel Borkmann  * allocation requests.
1888ccea34b5STejun Heo  *
1889edcb4639STejun Heo  * RETURNS:
1890edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
1891edcb4639STejun Heo  */
18925835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
18935835d96eSTejun Heo {
18945835d96eSTejun Heo 	return pcpu_alloc(size, align, false, gfp);
18955835d96eSTejun Heo }
18965835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
18975835d96eSTejun Heo 
18985835d96eSTejun Heo /**
18995835d96eSTejun Heo  * __alloc_percpu - allocate dynamic percpu area
19005835d96eSTejun Heo  * @size: size of area to allocate in bytes
19015835d96eSTejun Heo  * @align: alignment of area (max PAGE_SIZE)
19025835d96eSTejun Heo  *
19035835d96eSTejun Heo  * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
19045835d96eSTejun Heo  */
190543cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align)
1906edcb4639STejun Heo {
19075835d96eSTejun Heo 	return pcpu_alloc(size, align, false, GFP_KERNEL);
1908edcb4639STejun Heo }
1909fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu);
1910fbf59bc9STejun Heo 
1911edcb4639STejun Heo /**
1912edcb4639STejun Heo  * __alloc_reserved_percpu - allocate reserved percpu area
1913edcb4639STejun Heo  * @size: size of area to allocate in bytes
1914edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
1915edcb4639STejun Heo  *
19169329ba97STejun Heo  * Allocate zero-filled percpu area of @size bytes aligned at @align
19179329ba97STejun Heo  * from reserved percpu area if arch has set it up; otherwise,
19189329ba97STejun Heo  * allocation is served from the same dynamic area.  Might sleep.
19199329ba97STejun Heo  * Might trigger writeouts.
1920edcb4639STejun Heo  *
1921ccea34b5STejun Heo  * CONTEXT:
1922ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
1923ccea34b5STejun Heo  *
1924edcb4639STejun Heo  * RETURNS:
1925edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
1926edcb4639STejun Heo  */
192743cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1928edcb4639STejun Heo {
19295835d96eSTejun Heo 	return pcpu_alloc(size, align, true, GFP_KERNEL);
1930edcb4639STejun Heo }
1931edcb4639STejun Heo 
1932a56dbddfSTejun Heo /**
19333c7be18aSRoman Gushchin  * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
19343c7be18aSRoman Gushchin  * @type: chunk type
1935a56dbddfSTejun Heo  *
193647504ee0SDennis Zhou  * Reclaim all fully free chunks except for the first one.  This is also
193747504ee0SDennis Zhou  * responsible for maintaining the pool of empty populated pages.  However,
193847504ee0SDennis Zhou  * it is possible that this is called when physical memory is scarce causing
193947504ee0SDennis Zhou  * OOM killer to be triggered.  We should avoid doing so until an actual
194047504ee0SDennis Zhou  * allocation causes the failure as it is possible that requests can be
194147504ee0SDennis Zhou  * serviced from already backed regions.
1942a56dbddfSTejun Heo  */
19433c7be18aSRoman Gushchin static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
1944fbf59bc9STejun Heo {
194547504ee0SDennis Zhou 	/* gfp flags passed to underlying allocators */
1946554fef1cSDennis Zhou 	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1947fe6bd8c3STejun Heo 	LIST_HEAD(to_free);
19483c7be18aSRoman Gushchin 	struct list_head *pcpu_slot = pcpu_chunk_list(type);
1949fe6bd8c3STejun Heo 	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1950a56dbddfSTejun Heo 	struct pcpu_chunk *chunk, *next;
19511a4d7607STejun Heo 	int slot, nr_to_pop, ret;
1952a56dbddfSTejun Heo 
19531a4d7607STejun Heo 	/*
19541a4d7607STejun Heo 	 * There's no reason to keep around multiple unused chunks and VM
19551a4d7607STejun Heo 	 * areas can be scarce.  Destroy all free chunks except for one.
19561a4d7607STejun Heo 	 */
1957ccea34b5STejun Heo 	mutex_lock(&pcpu_alloc_mutex);
1958ccea34b5STejun Heo 	spin_lock_irq(&pcpu_lock);
1959a56dbddfSTejun Heo 
1960fe6bd8c3STejun Heo 	list_for_each_entry_safe(chunk, next, free_head, list) {
19618d408b4bSTejun Heo 		WARN_ON(chunk->immutable);
1962a56dbddfSTejun Heo 
1963a56dbddfSTejun Heo 		/* spare the first one */
1964fe6bd8c3STejun Heo 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1965a56dbddfSTejun Heo 			continue;
1966a56dbddfSTejun Heo 
1967fe6bd8c3STejun Heo 		list_move(&chunk->list, &to_free);
1968a56dbddfSTejun Heo 	}
1969a56dbddfSTejun Heo 
1970ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
1971a56dbddfSTejun Heo 
1972fe6bd8c3STejun Heo 	list_for_each_entry_safe(chunk, next, &to_free, list) {
1973e837dfdeSDennis Zhou 		unsigned int rs, re;
1974dca49645STejun Heo 
1975e837dfdeSDennis Zhou 		bitmap_for_each_set_region(chunk->populated, rs, re, 0,
197691e914c5SDennis Zhou (Facebook) 					   chunk->nr_pages) {
1977a93ace48STejun Heo 			pcpu_depopulate_chunk(chunk, rs, re);
1978b539b87fSTejun Heo 			spin_lock_irq(&pcpu_lock);
1979b539b87fSTejun Heo 			pcpu_chunk_depopulated(chunk, rs, re);
1980b539b87fSTejun Heo 			spin_unlock_irq(&pcpu_lock);
1981a93ace48STejun Heo 		}
19826081089fSTejun Heo 		pcpu_destroy_chunk(chunk);
1983accd4f36SEric Dumazet 		cond_resched();
1984fbf59bc9STejun Heo 	}
1985971f3918STejun Heo 
19861a4d7607STejun Heo 	/*
19871a4d7607STejun Heo 	 * Ensure there are certain number of free populated pages for
19881a4d7607STejun Heo 	 * atomic allocs.  Fill up from the most packed so that atomic
19891a4d7607STejun Heo 	 * allocs don't increase fragmentation.  If atomic allocation
19901a4d7607STejun Heo 	 * failed previously, always populate the maximum amount.  This
19911a4d7607STejun Heo 	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
19921a4d7607STejun Heo 	 * failing indefinitely; however, large atomic allocs are not
19931a4d7607STejun Heo 	 * something we support properly and can be highly unreliable and
19941a4d7607STejun Heo 	 * inefficient.
19951a4d7607STejun Heo 	 */
19961a4d7607STejun Heo retry_pop:
19971a4d7607STejun Heo 	if (pcpu_atomic_alloc_failed) {
19981a4d7607STejun Heo 		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
19991a4d7607STejun Heo 		/* best effort anyway, don't worry about synchronization */
20001a4d7607STejun Heo 		pcpu_atomic_alloc_failed = false;
20011a4d7607STejun Heo 	} else {
20021a4d7607STejun Heo 		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
20030760fa3dSRoman Gushchin 				  pcpu_nr_empty_pop_pages[type],
20041a4d7607STejun Heo 				  0, PCPU_EMPTY_POP_PAGES_HIGH);
20051a4d7607STejun Heo 	}
20061a4d7607STejun Heo 
20071a4d7607STejun Heo 	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
2008e837dfdeSDennis Zhou 		unsigned int nr_unpop = 0, rs, re;
20091a4d7607STejun Heo 
20101a4d7607STejun Heo 		if (!nr_to_pop)
20111a4d7607STejun Heo 			break;
20121a4d7607STejun Heo 
20131a4d7607STejun Heo 		spin_lock_irq(&pcpu_lock);
20141a4d7607STejun Heo 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
20158ab16c43SDennis Zhou (Facebook) 			nr_unpop = chunk->nr_pages - chunk->nr_populated;
20161a4d7607STejun Heo 			if (nr_unpop)
20171a4d7607STejun Heo 				break;
20181a4d7607STejun Heo 		}
20191a4d7607STejun Heo 		spin_unlock_irq(&pcpu_lock);
20201a4d7607STejun Heo 
20211a4d7607STejun Heo 		if (!nr_unpop)
20221a4d7607STejun Heo 			continue;
20231a4d7607STejun Heo 
20241a4d7607STejun Heo 		/* @chunk can't go away while pcpu_alloc_mutex is held */
2025e837dfdeSDennis Zhou 		bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
202691e914c5SDennis Zhou (Facebook) 					     chunk->nr_pages) {
2027e837dfdeSDennis Zhou 			int nr = min_t(int, re - rs, nr_to_pop);
20281a4d7607STejun Heo 
202947504ee0SDennis Zhou 			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
20301a4d7607STejun Heo 			if (!ret) {
20311a4d7607STejun Heo 				nr_to_pop -= nr;
20321a4d7607STejun Heo 				spin_lock_irq(&pcpu_lock);
2033b239f7daSDennis Zhou 				pcpu_chunk_populated(chunk, rs, rs + nr);
20341a4d7607STejun Heo 				spin_unlock_irq(&pcpu_lock);
20351a4d7607STejun Heo 			} else {
20361a4d7607STejun Heo 				nr_to_pop = 0;
20371a4d7607STejun Heo 			}
20381a4d7607STejun Heo 
20391a4d7607STejun Heo 			if (!nr_to_pop)
20401a4d7607STejun Heo 				break;
20411a4d7607STejun Heo 		}
20421a4d7607STejun Heo 	}
20431a4d7607STejun Heo 
20441a4d7607STejun Heo 	if (nr_to_pop) {
20451a4d7607STejun Heo 		/* ran out of chunks to populate, create a new one and retry */
20463c7be18aSRoman Gushchin 		chunk = pcpu_create_chunk(type, gfp);
20471a4d7607STejun Heo 		if (chunk) {
20481a4d7607STejun Heo 			spin_lock_irq(&pcpu_lock);
20491a4d7607STejun Heo 			pcpu_chunk_relocate(chunk, -1);
20501a4d7607STejun Heo 			spin_unlock_irq(&pcpu_lock);
20511a4d7607STejun Heo 			goto retry_pop;
20521a4d7607STejun Heo 		}
20531a4d7607STejun Heo 	}
20541a4d7607STejun Heo 
2055971f3918STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
2056a56dbddfSTejun Heo }
2057fbf59bc9STejun Heo 
2058fbf59bc9STejun Heo /**
20593c7be18aSRoman Gushchin  * pcpu_balance_workfn - manage the amount of free chunks and populated pages
20603c7be18aSRoman Gushchin  * @work: unused
20613c7be18aSRoman Gushchin  *
20623c7be18aSRoman Gushchin  * Call __pcpu_balance_workfn() for each chunk type.
20633c7be18aSRoman Gushchin  */
20643c7be18aSRoman Gushchin static void pcpu_balance_workfn(struct work_struct *work)
20653c7be18aSRoman Gushchin {
20663c7be18aSRoman Gushchin 	enum pcpu_chunk_type type;
20673c7be18aSRoman Gushchin 
20683c7be18aSRoman Gushchin 	for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
20693c7be18aSRoman Gushchin 		__pcpu_balance_workfn(type);
20703c7be18aSRoman Gushchin }
20713c7be18aSRoman Gushchin 
20723c7be18aSRoman Gushchin /**
2073fbf59bc9STejun Heo  * free_percpu - free percpu area
2074fbf59bc9STejun Heo  * @ptr: pointer to area to free
2075fbf59bc9STejun Heo  *
2076ccea34b5STejun Heo  * Free percpu area @ptr.
2077ccea34b5STejun Heo  *
2078ccea34b5STejun Heo  * CONTEXT:
2079ccea34b5STejun Heo  * Can be called from atomic context.
2080fbf59bc9STejun Heo  */
208143cf38ebSTejun Heo void free_percpu(void __percpu *ptr)
2082fbf59bc9STejun Heo {
2083129182e5SAndrew Morton 	void *addr;
2084fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
2085ccea34b5STejun Heo 	unsigned long flags;
20863c7be18aSRoman Gushchin 	int size, off;
2087198790d9SJohn Sperbeck 	bool need_balance = false;
20883c7be18aSRoman Gushchin 	struct list_head *pcpu_slot;
2089fbf59bc9STejun Heo 
2090fbf59bc9STejun Heo 	if (!ptr)
2091fbf59bc9STejun Heo 		return;
2092fbf59bc9STejun Heo 
2093f528f0b8SCatalin Marinas 	kmemleak_free_percpu(ptr);
2094f528f0b8SCatalin Marinas 
2095129182e5SAndrew Morton 	addr = __pcpu_ptr_to_addr(ptr);
2096129182e5SAndrew Morton 
2097ccea34b5STejun Heo 	spin_lock_irqsave(&pcpu_lock, flags);
2098fbf59bc9STejun Heo 
2099fbf59bc9STejun Heo 	chunk = pcpu_chunk_addr_search(addr);
2100bba174f5STejun Heo 	off = addr - chunk->base_addr;
2101fbf59bc9STejun Heo 
21023c7be18aSRoman Gushchin 	size = pcpu_free_area(chunk, off);
21033c7be18aSRoman Gushchin 
21043c7be18aSRoman Gushchin 	pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
21053c7be18aSRoman Gushchin 
21063c7be18aSRoman Gushchin 	pcpu_memcg_free_hook(chunk, off, size);
2107fbf59bc9STejun Heo 
2108a56dbddfSTejun Heo 	/* if there are more than one fully free chunks, wake up grim reaper */
210940064aecSDennis Zhou (Facebook) 	if (chunk->free_bytes == pcpu_unit_size) {
2110fbf59bc9STejun Heo 		struct pcpu_chunk *pos;
2111fbf59bc9STejun Heo 
2112a56dbddfSTejun Heo 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
2113fbf59bc9STejun Heo 			if (pos != chunk) {
2114198790d9SJohn Sperbeck 				need_balance = true;
2115fbf59bc9STejun Heo 				break;
2116fbf59bc9STejun Heo 			}
2117fbf59bc9STejun Heo 	}
2118fbf59bc9STejun Heo 
2119df95e795SDennis Zhou 	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2120df95e795SDennis Zhou 
2121ccea34b5STejun Heo 	spin_unlock_irqrestore(&pcpu_lock, flags);
2122198790d9SJohn Sperbeck 
2123198790d9SJohn Sperbeck 	if (need_balance)
2124198790d9SJohn Sperbeck 		pcpu_schedule_balance_work();
2125fbf59bc9STejun Heo }
2126fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu);
2127fbf59bc9STejun Heo 
2128383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2129383776faSThomas Gleixner {
2130383776faSThomas Gleixner #ifdef CONFIG_SMP
2131383776faSThomas Gleixner 	const size_t static_size = __per_cpu_end - __per_cpu_start;
2132383776faSThomas Gleixner 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2133383776faSThomas Gleixner 	unsigned int cpu;
2134383776faSThomas Gleixner 
2135383776faSThomas Gleixner 	for_each_possible_cpu(cpu) {
2136383776faSThomas Gleixner 		void *start = per_cpu_ptr(base, cpu);
2137383776faSThomas Gleixner 		void *va = (void *)addr;
2138383776faSThomas Gleixner 
2139383776faSThomas Gleixner 		if (va >= start && va < start + static_size) {
21408ce371f9SPeter Zijlstra 			if (can_addr) {
2141383776faSThomas Gleixner 				*can_addr = (unsigned long) (va - start);
21428ce371f9SPeter Zijlstra 				*can_addr += (unsigned long)
21438ce371f9SPeter Zijlstra 					per_cpu_ptr(base, get_boot_cpu_id());
21448ce371f9SPeter Zijlstra 			}
2145383776faSThomas Gleixner 			return true;
2146383776faSThomas Gleixner 		}
2147383776faSThomas Gleixner 	}
2148383776faSThomas Gleixner #endif
2149383776faSThomas Gleixner 	/* on UP, can't distinguish from other static vars, always false */
2150383776faSThomas Gleixner 	return false;
2151383776faSThomas Gleixner }
2152383776faSThomas Gleixner 
21533b034b0dSVivek Goyal /**
215410fad5e4STejun Heo  * is_kernel_percpu_address - test whether address is from static percpu area
215510fad5e4STejun Heo  * @addr: address to test
215610fad5e4STejun Heo  *
215710fad5e4STejun Heo  * Test whether @addr belongs to in-kernel static percpu area.  Module
215810fad5e4STejun Heo  * static percpu areas are not considered.  For those, use
215910fad5e4STejun Heo  * is_module_percpu_address().
216010fad5e4STejun Heo  *
216110fad5e4STejun Heo  * RETURNS:
216210fad5e4STejun Heo  * %true if @addr is from in-kernel static percpu area, %false otherwise.
216310fad5e4STejun Heo  */
216410fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr)
216510fad5e4STejun Heo {
2166383776faSThomas Gleixner 	return __is_kernel_percpu_address(addr, NULL);
216710fad5e4STejun Heo }
216810fad5e4STejun Heo 
216910fad5e4STejun Heo /**
21703b034b0dSVivek Goyal  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
21713b034b0dSVivek Goyal  * @addr: the address to be converted to physical address
21723b034b0dSVivek Goyal  *
21733b034b0dSVivek Goyal  * Given @addr which is dereferenceable address obtained via one of
21743b034b0dSVivek Goyal  * percpu access macros, this function translates it into its physical
21753b034b0dSVivek Goyal  * address.  The caller is responsible for ensuring @addr stays valid
21763b034b0dSVivek Goyal  * until this function finishes.
21773b034b0dSVivek Goyal  *
217867589c71SDave Young  * percpu allocator has special setup for the first chunk, which currently
217967589c71SDave Young  * supports either embedding in linear address space or vmalloc mapping,
218067589c71SDave Young  * and, from the second one, the backing allocator (currently either vm or
218167589c71SDave Young  * km) provides translation.
218267589c71SDave Young  *
2183bffc4375SYannick Guerrini  * The addr can be translated simply without checking if it falls into the
218467589c71SDave Young  * first chunk. But the current code reflects better how percpu allocator
218567589c71SDave Young  * actually works, and the verification can discover both bugs in percpu
218667589c71SDave Young  * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
218767589c71SDave Young  * code.
218867589c71SDave Young  *
21893b034b0dSVivek Goyal  * RETURNS:
21903b034b0dSVivek Goyal  * The physical address for @addr.
21913b034b0dSVivek Goyal  */
21923b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr)
21933b034b0dSVivek Goyal {
21949983b6f0STejun Heo 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
21959983b6f0STejun Heo 	bool in_first_chunk = false;
2196a855b84cSTejun Heo 	unsigned long first_low, first_high;
21979983b6f0STejun Heo 	unsigned int cpu;
21989983b6f0STejun Heo 
21999983b6f0STejun Heo 	/*
2200a855b84cSTejun Heo 	 * The following test on unit_low/high isn't strictly
22019983b6f0STejun Heo 	 * necessary but will speed up lookups of addresses which
22029983b6f0STejun Heo 	 * aren't in the first chunk.
2203c0ebfdc3SDennis Zhou (Facebook) 	 *
2204c0ebfdc3SDennis Zhou (Facebook) 	 * The address check is against full chunk sizes.  pcpu_base_addr
2205c0ebfdc3SDennis Zhou (Facebook) 	 * points to the beginning of the first chunk including the
2206c0ebfdc3SDennis Zhou (Facebook) 	 * static region.  Assumes good intent as the first chunk may
2207c0ebfdc3SDennis Zhou (Facebook) 	 * not be full (ie. < pcpu_unit_pages in size).
22089983b6f0STejun Heo 	 */
2209c0ebfdc3SDennis Zhou (Facebook) 	first_low = (unsigned long)pcpu_base_addr +
2210c0ebfdc3SDennis Zhou (Facebook) 		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2211c0ebfdc3SDennis Zhou (Facebook) 	first_high = (unsigned long)pcpu_base_addr +
2212c0ebfdc3SDennis Zhou (Facebook) 		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2213a855b84cSTejun Heo 	if ((unsigned long)addr >= first_low &&
2214a855b84cSTejun Heo 	    (unsigned long)addr < first_high) {
22159983b6f0STejun Heo 		for_each_possible_cpu(cpu) {
22169983b6f0STejun Heo 			void *start = per_cpu_ptr(base, cpu);
22179983b6f0STejun Heo 
22189983b6f0STejun Heo 			if (addr >= start && addr < start + pcpu_unit_size) {
22199983b6f0STejun Heo 				in_first_chunk = true;
22209983b6f0STejun Heo 				break;
22219983b6f0STejun Heo 			}
22229983b6f0STejun Heo 		}
22239983b6f0STejun Heo 	}
22249983b6f0STejun Heo 
22259983b6f0STejun Heo 	if (in_first_chunk) {
2226eac522efSDavid Howells 		if (!is_vmalloc_addr(addr))
22273b034b0dSVivek Goyal 			return __pa(addr);
22283b034b0dSVivek Goyal 		else
22299f57bd4dSEugene Surovegin 			return page_to_phys(vmalloc_to_page(addr)) +
22309f57bd4dSEugene Surovegin 			       offset_in_page(addr);
2231020ec653STejun Heo 	} else
22329f57bd4dSEugene Surovegin 		return page_to_phys(pcpu_addr_to_page(addr)) +
22339f57bd4dSEugene Surovegin 		       offset_in_page(addr);
22343b034b0dSVivek Goyal }
22353b034b0dSVivek Goyal 
2236fbf59bc9STejun Heo /**
2237fd1e8a1fSTejun Heo  * pcpu_alloc_alloc_info - allocate percpu allocation info
2238fd1e8a1fSTejun Heo  * @nr_groups: the number of groups
2239fd1e8a1fSTejun Heo  * @nr_units: the number of units
2240033e48fbSTejun Heo  *
2241fd1e8a1fSTejun Heo  * Allocate ai which is large enough for @nr_groups groups containing
2242fd1e8a1fSTejun Heo  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
2243fd1e8a1fSTejun Heo  * cpu_map array which is long enough for @nr_units and filled with
2244fd1e8a1fSTejun Heo  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
2245fd1e8a1fSTejun Heo  * pointer of other groups.
2246033e48fbSTejun Heo  *
2247033e48fbSTejun Heo  * RETURNS:
2248fd1e8a1fSTejun Heo  * Pointer to the allocated pcpu_alloc_info on success, NULL on
2249fd1e8a1fSTejun Heo  * failure.
2250033e48fbSTejun Heo  */
2251fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2252fd1e8a1fSTejun Heo 						      int nr_units)
2253fd1e8a1fSTejun Heo {
2254fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
2255fd1e8a1fSTejun Heo 	size_t base_size, ai_size;
2256fd1e8a1fSTejun Heo 	void *ptr;
2257fd1e8a1fSTejun Heo 	int unit;
2258fd1e8a1fSTejun Heo 
225914d37612SGustavo A. R. Silva 	base_size = ALIGN(struct_size(ai, groups, nr_groups),
2260fd1e8a1fSTejun Heo 			  __alignof__(ai->groups[0].cpu_map[0]));
2261fd1e8a1fSTejun Heo 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2262fd1e8a1fSTejun Heo 
226326fb3daeSMike Rapoport 	ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2264fd1e8a1fSTejun Heo 	if (!ptr)
2265fd1e8a1fSTejun Heo 		return NULL;
2266fd1e8a1fSTejun Heo 	ai = ptr;
2267fd1e8a1fSTejun Heo 	ptr += base_size;
2268fd1e8a1fSTejun Heo 
2269fd1e8a1fSTejun Heo 	ai->groups[0].cpu_map = ptr;
2270fd1e8a1fSTejun Heo 
2271fd1e8a1fSTejun Heo 	for (unit = 0; unit < nr_units; unit++)
2272fd1e8a1fSTejun Heo 		ai->groups[0].cpu_map[unit] = NR_CPUS;
2273fd1e8a1fSTejun Heo 
2274fd1e8a1fSTejun Heo 	ai->nr_groups = nr_groups;
2275fd1e8a1fSTejun Heo 	ai->__ai_size = PFN_ALIGN(ai_size);
2276fd1e8a1fSTejun Heo 
2277fd1e8a1fSTejun Heo 	return ai;
2278fd1e8a1fSTejun Heo }
2279fd1e8a1fSTejun Heo 
2280fd1e8a1fSTejun Heo /**
2281fd1e8a1fSTejun Heo  * pcpu_free_alloc_info - free percpu allocation info
2282fd1e8a1fSTejun Heo  * @ai: pcpu_alloc_info to free
2283fd1e8a1fSTejun Heo  *
2284fd1e8a1fSTejun Heo  * Free @ai which was allocated by pcpu_alloc_alloc_info().
2285fd1e8a1fSTejun Heo  */
2286fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2287fd1e8a1fSTejun Heo {
2288999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(ai), ai->__ai_size);
2289fd1e8a1fSTejun Heo }
2290fd1e8a1fSTejun Heo 
2291fd1e8a1fSTejun Heo /**
2292fd1e8a1fSTejun Heo  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2293fd1e8a1fSTejun Heo  * @lvl: loglevel
2294fd1e8a1fSTejun Heo  * @ai: allocation info to dump
2295fd1e8a1fSTejun Heo  *
2296fd1e8a1fSTejun Heo  * Print out information about @ai using loglevel @lvl.
2297fd1e8a1fSTejun Heo  */
2298fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl,
2299fd1e8a1fSTejun Heo 				 const struct pcpu_alloc_info *ai)
2300033e48fbSTejun Heo {
2301fd1e8a1fSTejun Heo 	int group_width = 1, cpu_width = 1, width;
2302033e48fbSTejun Heo 	char empty_str[] = "--------";
2303fd1e8a1fSTejun Heo 	int alloc = 0, alloc_end = 0;
2304fd1e8a1fSTejun Heo 	int group, v;
2305fd1e8a1fSTejun Heo 	int upa, apl;	/* units per alloc, allocs per line */
2306033e48fbSTejun Heo 
2307fd1e8a1fSTejun Heo 	v = ai->nr_groups;
2308033e48fbSTejun Heo 	while (v /= 10)
2309fd1e8a1fSTejun Heo 		group_width++;
2310033e48fbSTejun Heo 
2311fd1e8a1fSTejun Heo 	v = num_possible_cpus();
2312fd1e8a1fSTejun Heo 	while (v /= 10)
2313fd1e8a1fSTejun Heo 		cpu_width++;
2314fd1e8a1fSTejun Heo 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2315033e48fbSTejun Heo 
2316fd1e8a1fSTejun Heo 	upa = ai->alloc_size / ai->unit_size;
2317fd1e8a1fSTejun Heo 	width = upa * (cpu_width + 1) + group_width + 3;
2318fd1e8a1fSTejun Heo 	apl = rounddown_pow_of_two(max(60 / width, 1));
2319033e48fbSTejun Heo 
2320fd1e8a1fSTejun Heo 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2321fd1e8a1fSTejun Heo 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2322fd1e8a1fSTejun Heo 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2323fd1e8a1fSTejun Heo 
2324fd1e8a1fSTejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
2325fd1e8a1fSTejun Heo 		const struct pcpu_group_info *gi = &ai->groups[group];
2326fd1e8a1fSTejun Heo 		int unit = 0, unit_end = 0;
2327fd1e8a1fSTejun Heo 
2328fd1e8a1fSTejun Heo 		BUG_ON(gi->nr_units % upa);
2329fd1e8a1fSTejun Heo 		for (alloc_end += gi->nr_units / upa;
2330fd1e8a1fSTejun Heo 		     alloc < alloc_end; alloc++) {
2331fd1e8a1fSTejun Heo 			if (!(alloc % apl)) {
23321170532bSJoe Perches 				pr_cont("\n");
2333fd1e8a1fSTejun Heo 				printk("%spcpu-alloc: ", lvl);
2334033e48fbSTejun Heo 			}
23351170532bSJoe Perches 			pr_cont("[%0*d] ", group_width, group);
2336fd1e8a1fSTejun Heo 
2337fd1e8a1fSTejun Heo 			for (unit_end += upa; unit < unit_end; unit++)
2338fd1e8a1fSTejun Heo 				if (gi->cpu_map[unit] != NR_CPUS)
23391170532bSJoe Perches 					pr_cont("%0*d ",
23401170532bSJoe Perches 						cpu_width, gi->cpu_map[unit]);
2341033e48fbSTejun Heo 				else
23421170532bSJoe Perches 					pr_cont("%s ", empty_str);
2343033e48fbSTejun Heo 		}
2344fd1e8a1fSTejun Heo 	}
23451170532bSJoe Perches 	pr_cont("\n");
2346033e48fbSTejun Heo }
2347033e48fbSTejun Heo 
2348fbf59bc9STejun Heo /**
23498d408b4bSTejun Heo  * pcpu_setup_first_chunk - initialize the first percpu chunk
2350fd1e8a1fSTejun Heo  * @ai: pcpu_alloc_info describing how to percpu area is shaped
235138a6be52STejun Heo  * @base_addr: mapped address
2352fbf59bc9STejun Heo  *
23538d408b4bSTejun Heo  * Initialize the first percpu chunk which contains the kernel static
235469ab285bSChristophe JAILLET  * percpu area.  This function is to be called from arch percpu area
235538a6be52STejun Heo  * setup path.
23568d408b4bSTejun Heo  *
2357fd1e8a1fSTejun Heo  * @ai contains all information necessary to initialize the first
2358fd1e8a1fSTejun Heo  * chunk and prime the dynamic percpu allocator.
23598d408b4bSTejun Heo  *
2360fd1e8a1fSTejun Heo  * @ai->static_size is the size of static percpu area.
2361fd1e8a1fSTejun Heo  *
2362fd1e8a1fSTejun Heo  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2363edcb4639STejun Heo  * reserve after the static area in the first chunk.  This reserves
2364edcb4639STejun Heo  * the first chunk such that it's available only through reserved
2365edcb4639STejun Heo  * percpu allocation.  This is primarily used to serve module percpu
2366edcb4639STejun Heo  * static areas on architectures where the addressing model has
2367edcb4639STejun Heo  * limited offset range for symbol relocations to guarantee module
2368edcb4639STejun Heo  * percpu symbols fall inside the relocatable range.
2369edcb4639STejun Heo  *
2370fd1e8a1fSTejun Heo  * @ai->dyn_size determines the number of bytes available for dynamic
2371fd1e8a1fSTejun Heo  * allocation in the first chunk.  The area between @ai->static_size +
2372fd1e8a1fSTejun Heo  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
23736074d5b0STejun Heo  *
2374fd1e8a1fSTejun Heo  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2375fd1e8a1fSTejun Heo  * and equal to or larger than @ai->static_size + @ai->reserved_size +
2376fd1e8a1fSTejun Heo  * @ai->dyn_size.
23778d408b4bSTejun Heo  *
2378fd1e8a1fSTejun Heo  * @ai->atom_size is the allocation atom size and used as alignment
2379fd1e8a1fSTejun Heo  * for vm areas.
23808d408b4bSTejun Heo  *
2381fd1e8a1fSTejun Heo  * @ai->alloc_size is the allocation size and always multiple of
2382fd1e8a1fSTejun Heo  * @ai->atom_size.  This is larger than @ai->atom_size if
2383fd1e8a1fSTejun Heo  * @ai->unit_size is larger than @ai->atom_size.
2384fd1e8a1fSTejun Heo  *
2385fd1e8a1fSTejun Heo  * @ai->nr_groups and @ai->groups describe virtual memory layout of
2386fd1e8a1fSTejun Heo  * percpu areas.  Units which should be colocated are put into the
2387fd1e8a1fSTejun Heo  * same group.  Dynamic VM areas will be allocated according to these
2388fd1e8a1fSTejun Heo  * groupings.  If @ai->nr_groups is zero, a single group containing
2389fd1e8a1fSTejun Heo  * all units is assumed.
23908d408b4bSTejun Heo  *
239138a6be52STejun Heo  * The caller should have mapped the first chunk at @base_addr and
239238a6be52STejun Heo  * copied static data to each unit.
2393fbf59bc9STejun Heo  *
2394c0ebfdc3SDennis Zhou (Facebook)  * The first chunk will always contain a static and a dynamic region.
2395c0ebfdc3SDennis Zhou (Facebook)  * However, the static region is not managed by any chunk.  If the first
2396c0ebfdc3SDennis Zhou (Facebook)  * chunk also contains a reserved region, it is served by two chunks -
2397c0ebfdc3SDennis Zhou (Facebook)  * one for the reserved region and one for the dynamic region.  They
2398c0ebfdc3SDennis Zhou (Facebook)  * share the same vm, but use offset regions in the area allocation map.
2399c0ebfdc3SDennis Zhou (Facebook)  * The chunk serving the dynamic region is circulated in the chunk slots
2400c0ebfdc3SDennis Zhou (Facebook)  * and available for dynamic allocation like any other chunk.
2401fbf59bc9STejun Heo  */
2402163fa234SKefeng Wang void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2403fd1e8a1fSTejun Heo 				   void *base_addr)
2404fbf59bc9STejun Heo {
2405b9c39442SDennis Zhou (Facebook) 	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2406d2f3c384SDennis Zhou (Facebook) 	size_t static_size, dyn_size;
24070c4169c3SDennis Zhou (Facebook) 	struct pcpu_chunk *chunk;
24086563297cSTejun Heo 	unsigned long *group_offsets;
24096563297cSTejun Heo 	size_t *group_sizes;
2410fb435d52STejun Heo 	unsigned long *unit_off;
2411fbf59bc9STejun Heo 	unsigned int cpu;
2412fd1e8a1fSTejun Heo 	int *unit_map;
2413fd1e8a1fSTejun Heo 	int group, unit, i;
2414c0ebfdc3SDennis Zhou (Facebook) 	int map_size;
2415c0ebfdc3SDennis Zhou (Facebook) 	unsigned long tmp_addr;
2416f655f405SMike Rapoport 	size_t alloc_size;
24173c7be18aSRoman Gushchin 	enum pcpu_chunk_type type;
2418fbf59bc9STejun Heo 
2419635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond)	do {					\
2420635b75fcSTejun Heo 	if (unlikely(cond)) {						\
2421870d4b12SJoe Perches 		pr_emerg("failed to initialize, %s\n", #cond);		\
2422870d4b12SJoe Perches 		pr_emerg("cpu_possible_mask=%*pb\n",			\
2423807de073STejun Heo 			 cpumask_pr_args(cpu_possible_mask));		\
2424635b75fcSTejun Heo 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
2425635b75fcSTejun Heo 		BUG();							\
2426635b75fcSTejun Heo 	}								\
2427635b75fcSTejun Heo } while (0)
2428635b75fcSTejun Heo 
24292f39e637STejun Heo 	/* sanity checks */
2430635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2431bbddff05STejun Heo #ifdef CONFIG_SMP
2432635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(!ai->static_size);
2433f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2434bbddff05STejun Heo #endif
2435635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(!base_addr);
2436f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2437635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2438f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2439635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2440ca460b3cSDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2441099a19d9STejun Heo 	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2442fb29a2ccSDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!ai->dyn_size);
2443d2f3c384SDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2444ca460b3cSDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2445ca460b3cSDennis Zhou (Facebook) 			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
24469f645532STejun Heo 	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
24478d408b4bSTejun Heo 
24486563297cSTejun Heo 	/* process group information and build config tables accordingly */
2449f655f405SMike Rapoport 	alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2450f655f405SMike Rapoport 	group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2451f655f405SMike Rapoport 	if (!group_offsets)
2452f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2453f655f405SMike Rapoport 		      alloc_size);
2454f655f405SMike Rapoport 
2455f655f405SMike Rapoport 	alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2456f655f405SMike Rapoport 	group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2457f655f405SMike Rapoport 	if (!group_sizes)
2458f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2459f655f405SMike Rapoport 		      alloc_size);
2460f655f405SMike Rapoport 
2461f655f405SMike Rapoport 	alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2462f655f405SMike Rapoport 	unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2463f655f405SMike Rapoport 	if (!unit_map)
2464f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2465f655f405SMike Rapoport 		      alloc_size);
2466f655f405SMike Rapoport 
2467f655f405SMike Rapoport 	alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2468f655f405SMike Rapoport 	unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2469f655f405SMike Rapoport 	if (!unit_off)
2470f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2471f655f405SMike Rapoport 		      alloc_size);
24722f39e637STejun Heo 
2473fd1e8a1fSTejun Heo 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2474ffe0d5a5STejun Heo 		unit_map[cpu] = UINT_MAX;
2475a855b84cSTejun Heo 
2476a855b84cSTejun Heo 	pcpu_low_unit_cpu = NR_CPUS;
2477a855b84cSTejun Heo 	pcpu_high_unit_cpu = NR_CPUS;
24782f39e637STejun Heo 
2479fd1e8a1fSTejun Heo 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2480fd1e8a1fSTejun Heo 		const struct pcpu_group_info *gi = &ai->groups[group];
24812f39e637STejun Heo 
24826563297cSTejun Heo 		group_offsets[group] = gi->base_offset;
24836563297cSTejun Heo 		group_sizes[group] = gi->nr_units * ai->unit_size;
24846563297cSTejun Heo 
2485fd1e8a1fSTejun Heo 		for (i = 0; i < gi->nr_units; i++) {
2486fd1e8a1fSTejun Heo 			cpu = gi->cpu_map[i];
2487fd1e8a1fSTejun Heo 			if (cpu == NR_CPUS)
2488fd1e8a1fSTejun Heo 				continue;
2489fd1e8a1fSTejun Heo 
24909f295664SDan Carpenter 			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2491635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2492635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2493fd1e8a1fSTejun Heo 
2494fd1e8a1fSTejun Heo 			unit_map[cpu] = unit + i;
2495fb435d52STejun Heo 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2496fb435d52STejun Heo 
2497a855b84cSTejun Heo 			/* determine low/high unit_cpu */
2498a855b84cSTejun Heo 			if (pcpu_low_unit_cpu == NR_CPUS ||
2499a855b84cSTejun Heo 			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2500a855b84cSTejun Heo 				pcpu_low_unit_cpu = cpu;
2501a855b84cSTejun Heo 			if (pcpu_high_unit_cpu == NR_CPUS ||
2502a855b84cSTejun Heo 			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2503a855b84cSTejun Heo 				pcpu_high_unit_cpu = cpu;
25040fc0531eSLinus Torvalds 		}
25050fc0531eSLinus Torvalds 	}
2506fd1e8a1fSTejun Heo 	pcpu_nr_units = unit;
25072f39e637STejun Heo 
25082f39e637STejun Heo 	for_each_possible_cpu(cpu)
2509635b75fcSTejun Heo 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2510635b75fcSTejun Heo 
2511635b75fcSTejun Heo 	/* we're done parsing the input, undefine BUG macro and dump config */
2512635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON
2513bcbea798STejun Heo 	pcpu_dump_alloc_info(KERN_DEBUG, ai);
25142f39e637STejun Heo 
25156563297cSTejun Heo 	pcpu_nr_groups = ai->nr_groups;
25166563297cSTejun Heo 	pcpu_group_offsets = group_offsets;
25176563297cSTejun Heo 	pcpu_group_sizes = group_sizes;
2518fd1e8a1fSTejun Heo 	pcpu_unit_map = unit_map;
2519fb435d52STejun Heo 	pcpu_unit_offsets = unit_off;
25202f39e637STejun Heo 
25212f39e637STejun Heo 	/* determine basic parameters */
2522fd1e8a1fSTejun Heo 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2523d9b55eebSTejun Heo 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
25246563297cSTejun Heo 	pcpu_atom_size = ai->atom_size;
252561cf93d3SDennis Zhou 	pcpu_chunk_struct_size = struct_size(chunk, populated,
252661cf93d3SDennis Zhou 					     BITS_TO_LONGS(pcpu_unit_pages));
2527cafe8816STejun Heo 
252830a5b536SDennis Zhou 	pcpu_stats_save_ai(ai);
252930a5b536SDennis Zhou 
2530d9b55eebSTejun Heo 	/*
2531d9b55eebSTejun Heo 	 * Allocate chunk slots.  The additional last slot is for
2532d9b55eebSTejun Heo 	 * empty chunks.
2533d9b55eebSTejun Heo 	 */
2534d9b55eebSTejun Heo 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
25353c7be18aSRoman Gushchin 	pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
25363c7be18aSRoman Gushchin 					  sizeof(pcpu_chunk_lists[0]) *
25373c7be18aSRoman Gushchin 					  PCPU_NR_CHUNK_TYPES,
25387e1c4e27SMike Rapoport 					  SMP_CACHE_BYTES);
25393c7be18aSRoman Gushchin 	if (!pcpu_chunk_lists)
2540f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
25413c7be18aSRoman Gushchin 		      pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) *
25423c7be18aSRoman Gushchin 		      PCPU_NR_CHUNK_TYPES);
25433c7be18aSRoman Gushchin 
25443c7be18aSRoman Gushchin 	for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
2545fbf59bc9STejun Heo 		for (i = 0; i < pcpu_nr_slots; i++)
25463c7be18aSRoman Gushchin 			INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]);
2547fbf59bc9STejun Heo 
2548edcb4639STejun Heo 	/*
2549d2f3c384SDennis Zhou (Facebook) 	 * The end of the static region needs to be aligned with the
2550d2f3c384SDennis Zhou (Facebook) 	 * minimum allocation size as this offsets the reserved and
2551d2f3c384SDennis Zhou (Facebook) 	 * dynamic region.  The first chunk ends page aligned by
2552d2f3c384SDennis Zhou (Facebook) 	 * expanding the dynamic region, therefore the dynamic region
2553d2f3c384SDennis Zhou (Facebook) 	 * can be shrunk to compensate while still staying above the
2554d2f3c384SDennis Zhou (Facebook) 	 * configured sizes.
2555d2f3c384SDennis Zhou (Facebook) 	 */
2556d2f3c384SDennis Zhou (Facebook) 	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2557d2f3c384SDennis Zhou (Facebook) 	dyn_size = ai->dyn_size - (static_size - ai->static_size);
2558d2f3c384SDennis Zhou (Facebook) 
2559d2f3c384SDennis Zhou (Facebook) 	/*
2560c0ebfdc3SDennis Zhou (Facebook) 	 * Initialize first chunk.
2561c0ebfdc3SDennis Zhou (Facebook) 	 * If the reserved_size is non-zero, this initializes the reserved
2562c0ebfdc3SDennis Zhou (Facebook) 	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
2563c0ebfdc3SDennis Zhou (Facebook) 	 * and the dynamic region is initialized here.  The first chunk,
2564c0ebfdc3SDennis Zhou (Facebook) 	 * pcpu_first_chunk, will always point to the chunk that serves
2565c0ebfdc3SDennis Zhou (Facebook) 	 * the dynamic region.
2566edcb4639STejun Heo 	 */
2567d2f3c384SDennis Zhou (Facebook) 	tmp_addr = (unsigned long)base_addr + static_size;
2568d2f3c384SDennis Zhou (Facebook) 	map_size = ai->reserved_size ?: dyn_size;
256940064aecSDennis Zhou (Facebook) 	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
257061ace7faSTejun Heo 
2571edcb4639STejun Heo 	/* init dynamic chunk if necessary */
2572b9c39442SDennis Zhou (Facebook) 	if (ai->reserved_size) {
25730c4169c3SDennis Zhou (Facebook) 		pcpu_reserved_chunk = chunk;
2574b9c39442SDennis Zhou (Facebook) 
2575d2f3c384SDennis Zhou (Facebook) 		tmp_addr = (unsigned long)base_addr + static_size +
2576c0ebfdc3SDennis Zhou (Facebook) 			   ai->reserved_size;
2577d2f3c384SDennis Zhou (Facebook) 		map_size = dyn_size;
257840064aecSDennis Zhou (Facebook) 		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2579edcb4639STejun Heo 	}
2580edcb4639STejun Heo 
25812441d15cSTejun Heo 	/* link the first chunk in */
25820c4169c3SDennis Zhou (Facebook) 	pcpu_first_chunk = chunk;
25830760fa3dSRoman Gushchin 	pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages;
2584ae9e6bc9STejun Heo 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
2585fbf59bc9STejun Heo 
25867e8a6304SDennis Zhou (Facebook) 	/* include all regions of the first chunk */
25877e8a6304SDennis Zhou (Facebook) 	pcpu_nr_populated += PFN_DOWN(size_sum);
25887e8a6304SDennis Zhou (Facebook) 
258930a5b536SDennis Zhou 	pcpu_stats_chunk_alloc();
2590df95e795SDennis Zhou 	trace_percpu_create_chunk(base_addr);
259130a5b536SDennis Zhou 
2592fbf59bc9STejun Heo 	/* we're done */
2593bba174f5STejun Heo 	pcpu_base_addr = base_addr;
2594fbf59bc9STejun Heo }
259566c3a757STejun Heo 
2596bbddff05STejun Heo #ifdef CONFIG_SMP
2597bbddff05STejun Heo 
259817f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2599f58dc01bSTejun Heo 	[PCPU_FC_AUTO]	= "auto",
2600f58dc01bSTejun Heo 	[PCPU_FC_EMBED]	= "embed",
2601f58dc01bSTejun Heo 	[PCPU_FC_PAGE]	= "page",
2602f58dc01bSTejun Heo };
260366c3a757STejun Heo 
2604f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2605f58dc01bSTejun Heo 
2606f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str)
260766c3a757STejun Heo {
26085479c78aSCyrill Gorcunov 	if (!str)
26095479c78aSCyrill Gorcunov 		return -EINVAL;
26105479c78aSCyrill Gorcunov 
2611f58dc01bSTejun Heo 	if (0)
2612f58dc01bSTejun Heo 		/* nada */;
2613f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2614f58dc01bSTejun Heo 	else if (!strcmp(str, "embed"))
2615f58dc01bSTejun Heo 		pcpu_chosen_fc = PCPU_FC_EMBED;
2616f58dc01bSTejun Heo #endif
2617f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2618f58dc01bSTejun Heo 	else if (!strcmp(str, "page"))
2619f58dc01bSTejun Heo 		pcpu_chosen_fc = PCPU_FC_PAGE;
2620f58dc01bSTejun Heo #endif
2621f58dc01bSTejun Heo 	else
2622870d4b12SJoe Perches 		pr_warn("unknown allocator %s specified\n", str);
262366c3a757STejun Heo 
2624f58dc01bSTejun Heo 	return 0;
262566c3a757STejun Heo }
2626f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup);
262766c3a757STejun Heo 
26283c9a024fSTejun Heo /*
26293c9a024fSTejun Heo  * pcpu_embed_first_chunk() is used by the generic percpu setup.
26303c9a024fSTejun Heo  * Build it if needed by the arch config or the generic setup is going
26313c9a024fSTejun Heo  * to be used.
26323c9a024fSTejun Heo  */
263308fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
263408fc4580STejun Heo 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
26353c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK
26363c9a024fSTejun Heo #endif
26373c9a024fSTejun Heo 
26383c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */
26393c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
26403c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK
26413c9a024fSTejun Heo #endif
26423c9a024fSTejun Heo 
26433c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */
26443c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
26453c9a024fSTejun Heo /**
2646fbf59bc9STejun Heo  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2647fbf59bc9STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
2648fbf59bc9STejun Heo  * @dyn_size: minimum free size for dynamic allocation in bytes
2649fbf59bc9STejun Heo  * @atom_size: allocation atom size
2650fbf59bc9STejun Heo  * @cpu_distance_fn: callback to determine distance between cpus, optional
2651fbf59bc9STejun Heo  *
2652fbf59bc9STejun Heo  * This function determines grouping of units, their mappings to cpus
2653fbf59bc9STejun Heo  * and other parameters considering needed percpu size, allocation
2654fbf59bc9STejun Heo  * atom size and distances between CPUs.
2655fbf59bc9STejun Heo  *
2656bffc4375SYannick Guerrini  * Groups are always multiples of atom size and CPUs which are of
2657fbf59bc9STejun Heo  * LOCAL_DISTANCE both ways are grouped together and share space for
2658fbf59bc9STejun Heo  * units in the same group.  The returned configuration is guaranteed
2659fbf59bc9STejun Heo  * to have CPUs on different nodes on different groups and >=75% usage
2660fbf59bc9STejun Heo  * of allocated virtual address space.
2661fbf59bc9STejun Heo  *
2662fbf59bc9STejun Heo  * RETURNS:
2663fbf59bc9STejun Heo  * On success, pointer to the new allocation_info is returned.  On
2664fbf59bc9STejun Heo  * failure, ERR_PTR value is returned.
2665fbf59bc9STejun Heo  */
2666258e0815SDennis Zhou static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
2667fbf59bc9STejun Heo 				size_t reserved_size, size_t dyn_size,
2668fbf59bc9STejun Heo 				size_t atom_size,
2669fbf59bc9STejun Heo 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2670fbf59bc9STejun Heo {
2671fbf59bc9STejun Heo 	static int group_map[NR_CPUS] __initdata;
2672fbf59bc9STejun Heo 	static int group_cnt[NR_CPUS] __initdata;
2673d7d29ac7SWonhyuk Yang 	static struct cpumask mask __initdata;
2674fbf59bc9STejun Heo 	const size_t static_size = __per_cpu_end - __per_cpu_start;
2675fbf59bc9STejun Heo 	int nr_groups = 1, nr_units = 0;
2676fbf59bc9STejun Heo 	size_t size_sum, min_unit_size, alloc_size;
26773f649ab7SKees Cook 	int upa, max_upa, best_upa;	/* units_per_alloc */
2678fbf59bc9STejun Heo 	int last_allocs, group, unit;
2679fbf59bc9STejun Heo 	unsigned int cpu, tcpu;
2680fbf59bc9STejun Heo 	struct pcpu_alloc_info *ai;
2681fbf59bc9STejun Heo 	unsigned int *cpu_map;
2682fbf59bc9STejun Heo 
2683fbf59bc9STejun Heo 	/* this function may be called multiple times */
2684fbf59bc9STejun Heo 	memset(group_map, 0, sizeof(group_map));
2685fbf59bc9STejun Heo 	memset(group_cnt, 0, sizeof(group_cnt));
2686d7d29ac7SWonhyuk Yang 	cpumask_clear(&mask);
2687fbf59bc9STejun Heo 
2688fbf59bc9STejun Heo 	/* calculate size_sum and ensure dyn_size is enough for early alloc */
2689fbf59bc9STejun Heo 	size_sum = PFN_ALIGN(static_size + reserved_size +
2690fbf59bc9STejun Heo 			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2691fbf59bc9STejun Heo 	dyn_size = size_sum - static_size - reserved_size;
2692fbf59bc9STejun Heo 
2693fbf59bc9STejun Heo 	/*
2694fbf59bc9STejun Heo 	 * Determine min_unit_size, alloc_size and max_upa such that
2695fbf59bc9STejun Heo 	 * alloc_size is multiple of atom_size and is the smallest
269625985edcSLucas De Marchi 	 * which can accommodate 4k aligned segments which are equal to
2697fbf59bc9STejun Heo 	 * or larger than min_unit_size.
2698fbf59bc9STejun Heo 	 */
2699fbf59bc9STejun Heo 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2700fbf59bc9STejun Heo 
27019c015162SDennis Zhou (Facebook) 	/* determine the maximum # of units that can fit in an allocation */
2702fbf59bc9STejun Heo 	alloc_size = roundup(min_unit_size, atom_size);
2703fbf59bc9STejun Heo 	upa = alloc_size / min_unit_size;
2704f09f1243SAlexander Kuleshov 	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2705fbf59bc9STejun Heo 		upa--;
2706fbf59bc9STejun Heo 	max_upa = upa;
2707fbf59bc9STejun Heo 
2708d7d29ac7SWonhyuk Yang 	cpumask_copy(&mask, cpu_possible_mask);
2709d7d29ac7SWonhyuk Yang 
2710fbf59bc9STejun Heo 	/* group cpus according to their proximity */
2711d7d29ac7SWonhyuk Yang 	for (group = 0; !cpumask_empty(&mask); group++) {
2712d7d29ac7SWonhyuk Yang 		/* pop the group's first cpu */
2713d7d29ac7SWonhyuk Yang 		cpu = cpumask_first(&mask);
2714fbf59bc9STejun Heo 		group_map[cpu] = group;
2715fbf59bc9STejun Heo 		group_cnt[group]++;
2716d7d29ac7SWonhyuk Yang 		cpumask_clear_cpu(cpu, &mask);
2717d7d29ac7SWonhyuk Yang 
2718d7d29ac7SWonhyuk Yang 		for_each_cpu(tcpu, &mask) {
2719d7d29ac7SWonhyuk Yang 			if (!cpu_distance_fn ||
2720d7d29ac7SWonhyuk Yang 			    (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2721d7d29ac7SWonhyuk Yang 			     cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2722d7d29ac7SWonhyuk Yang 				group_map[tcpu] = group;
2723d7d29ac7SWonhyuk Yang 				group_cnt[group]++;
2724d7d29ac7SWonhyuk Yang 				cpumask_clear_cpu(tcpu, &mask);
2725fbf59bc9STejun Heo 			}
2726d7d29ac7SWonhyuk Yang 		}
2727d7d29ac7SWonhyuk Yang 	}
2728d7d29ac7SWonhyuk Yang 	nr_groups = group;
2729fbf59bc9STejun Heo 
2730fbf59bc9STejun Heo 	/*
27319c015162SDennis Zhou (Facebook) 	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
27329c015162SDennis Zhou (Facebook) 	 * Expand the unit_size until we use >= 75% of the units allocated.
27339c015162SDennis Zhou (Facebook) 	 * Related to atom_size, which could be much larger than the unit_size.
2734fbf59bc9STejun Heo 	 */
2735fbf59bc9STejun Heo 	last_allocs = INT_MAX;
2736fbf59bc9STejun Heo 	for (upa = max_upa; upa; upa--) {
2737fbf59bc9STejun Heo 		int allocs = 0, wasted = 0;
2738fbf59bc9STejun Heo 
2739f09f1243SAlexander Kuleshov 		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2740fbf59bc9STejun Heo 			continue;
2741fbf59bc9STejun Heo 
2742fbf59bc9STejun Heo 		for (group = 0; group < nr_groups; group++) {
2743fbf59bc9STejun Heo 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2744fbf59bc9STejun Heo 			allocs += this_allocs;
2745fbf59bc9STejun Heo 			wasted += this_allocs * upa - group_cnt[group];
2746fbf59bc9STejun Heo 		}
2747fbf59bc9STejun Heo 
2748fbf59bc9STejun Heo 		/*
2749fbf59bc9STejun Heo 		 * Don't accept if wastage is over 1/3.  The
2750fbf59bc9STejun Heo 		 * greater-than comparison ensures upa==1 always
2751fbf59bc9STejun Heo 		 * passes the following check.
2752fbf59bc9STejun Heo 		 */
2753fbf59bc9STejun Heo 		if (wasted > num_possible_cpus() / 3)
2754fbf59bc9STejun Heo 			continue;
2755fbf59bc9STejun Heo 
2756fbf59bc9STejun Heo 		/* and then don't consume more memory */
2757fbf59bc9STejun Heo 		if (allocs > last_allocs)
2758fbf59bc9STejun Heo 			break;
2759fbf59bc9STejun Heo 		last_allocs = allocs;
2760fbf59bc9STejun Heo 		best_upa = upa;
2761fbf59bc9STejun Heo 	}
2762fbf59bc9STejun Heo 	upa = best_upa;
2763fbf59bc9STejun Heo 
2764fbf59bc9STejun Heo 	/* allocate and fill alloc_info */
2765fbf59bc9STejun Heo 	for (group = 0; group < nr_groups; group++)
2766fbf59bc9STejun Heo 		nr_units += roundup(group_cnt[group], upa);
2767fbf59bc9STejun Heo 
2768fbf59bc9STejun Heo 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2769fbf59bc9STejun Heo 	if (!ai)
2770fbf59bc9STejun Heo 		return ERR_PTR(-ENOMEM);
2771fbf59bc9STejun Heo 	cpu_map = ai->groups[0].cpu_map;
2772fbf59bc9STejun Heo 
2773fbf59bc9STejun Heo 	for (group = 0; group < nr_groups; group++) {
2774fbf59bc9STejun Heo 		ai->groups[group].cpu_map = cpu_map;
2775fbf59bc9STejun Heo 		cpu_map += roundup(group_cnt[group], upa);
2776fbf59bc9STejun Heo 	}
2777fbf59bc9STejun Heo 
2778fbf59bc9STejun Heo 	ai->static_size = static_size;
2779fbf59bc9STejun Heo 	ai->reserved_size = reserved_size;
2780fbf59bc9STejun Heo 	ai->dyn_size = dyn_size;
2781fbf59bc9STejun Heo 	ai->unit_size = alloc_size / upa;
2782fbf59bc9STejun Heo 	ai->atom_size = atom_size;
2783fbf59bc9STejun Heo 	ai->alloc_size = alloc_size;
2784fbf59bc9STejun Heo 
27852de7852fSPeng Fan 	for (group = 0, unit = 0; group < nr_groups; group++) {
2786fbf59bc9STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
2787fbf59bc9STejun Heo 
2788fbf59bc9STejun Heo 		/*
2789fbf59bc9STejun Heo 		 * Initialize base_offset as if all groups are located
2790fbf59bc9STejun Heo 		 * back-to-back.  The caller should update this to
2791fbf59bc9STejun Heo 		 * reflect actual allocation.
2792fbf59bc9STejun Heo 		 */
2793fbf59bc9STejun Heo 		gi->base_offset = unit * ai->unit_size;
2794fbf59bc9STejun Heo 
2795fbf59bc9STejun Heo 		for_each_possible_cpu(cpu)
2796fbf59bc9STejun Heo 			if (group_map[cpu] == group)
2797fbf59bc9STejun Heo 				gi->cpu_map[gi->nr_units++] = cpu;
2798fbf59bc9STejun Heo 		gi->nr_units = roundup(gi->nr_units, upa);
2799fbf59bc9STejun Heo 		unit += gi->nr_units;
2800fbf59bc9STejun Heo 	}
2801fbf59bc9STejun Heo 	BUG_ON(unit != nr_units);
2802fbf59bc9STejun Heo 
2803fbf59bc9STejun Heo 	return ai;
2804fbf59bc9STejun Heo }
28053c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2806fbf59bc9STejun Heo 
28073c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK)
280866c3a757STejun Heo /**
280966c3a757STejun Heo  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
281066c3a757STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
28114ba6ce25STejun Heo  * @dyn_size: minimum free size for dynamic allocation in bytes
2812c8826dd5STejun Heo  * @atom_size: allocation atom size
2813c8826dd5STejun Heo  * @cpu_distance_fn: callback to determine distance between cpus, optional
2814c8826dd5STejun Heo  * @alloc_fn: function to allocate percpu page
281525985edcSLucas De Marchi  * @free_fn: function to free percpu page
281666c3a757STejun Heo  *
281766c3a757STejun Heo  * This is a helper to ease setting up embedded first percpu chunk and
281866c3a757STejun Heo  * can be called where pcpu_setup_first_chunk() is expected.
281966c3a757STejun Heo  *
282066c3a757STejun Heo  * If this function is used to setup the first chunk, it is allocated
2821c8826dd5STejun Heo  * by calling @alloc_fn and used as-is without being mapped into
2822c8826dd5STejun Heo  * vmalloc area.  Allocations are always whole multiples of @atom_size
2823c8826dd5STejun Heo  * aligned to @atom_size.
2824c8826dd5STejun Heo  *
2825c8826dd5STejun Heo  * This enables the first chunk to piggy back on the linear physical
2826c8826dd5STejun Heo  * mapping which often uses larger page size.  Please note that this
2827c8826dd5STejun Heo  * can result in very sparse cpu->unit mapping on NUMA machines thus
2828c8826dd5STejun Heo  * requiring large vmalloc address space.  Don't use this allocator if
2829c8826dd5STejun Heo  * vmalloc space is not orders of magnitude larger than distances
2830c8826dd5STejun Heo  * between node memory addresses (ie. 32bit NUMA machines).
283166c3a757STejun Heo  *
28324ba6ce25STejun Heo  * @dyn_size specifies the minimum dynamic area size.
283366c3a757STejun Heo  *
283466c3a757STejun Heo  * If the needed size is smaller than the minimum or specified unit
2835c8826dd5STejun Heo  * size, the leftover is returned using @free_fn.
283666c3a757STejun Heo  *
283766c3a757STejun Heo  * RETURNS:
2838fb435d52STejun Heo  * 0 on success, -errno on failure.
283966c3a757STejun Heo  */
28404ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2841c8826dd5STejun Heo 				  size_t atom_size,
2842c8826dd5STejun Heo 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2843c8826dd5STejun Heo 				  pcpu_fc_alloc_fn_t alloc_fn,
2844c8826dd5STejun Heo 				  pcpu_fc_free_fn_t free_fn)
284566c3a757STejun Heo {
2846c8826dd5STejun Heo 	void *base = (void *)ULONG_MAX;
2847c8826dd5STejun Heo 	void **areas = NULL;
2848fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
284993c76b6bSzijun_hu 	size_t size_sum, areas_size;
285093c76b6bSzijun_hu 	unsigned long max_distance;
2851163fa234SKefeng Wang 	int group, i, highest_group, rc = 0;
285266c3a757STejun Heo 
2853c8826dd5STejun Heo 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2854c8826dd5STejun Heo 				   cpu_distance_fn);
2855fd1e8a1fSTejun Heo 	if (IS_ERR(ai))
2856fd1e8a1fSTejun Heo 		return PTR_ERR(ai);
285766c3a757STejun Heo 
2858fd1e8a1fSTejun Heo 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2859c8826dd5STejun Heo 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
286066c3a757STejun Heo 
286126fb3daeSMike Rapoport 	areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
2862c8826dd5STejun Heo 	if (!areas) {
2863fb435d52STejun Heo 		rc = -ENOMEM;
2864c8826dd5STejun Heo 		goto out_free;
2865fa8a7094STejun Heo 	}
286666c3a757STejun Heo 
28679b739662Szijun_hu 	/* allocate, copy and determine base address & max_distance */
28689b739662Szijun_hu 	highest_group = 0;
2869c8826dd5STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
2870c8826dd5STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
2871c8826dd5STejun Heo 		unsigned int cpu = NR_CPUS;
2872c8826dd5STejun Heo 		void *ptr;
287366c3a757STejun Heo 
2874c8826dd5STejun Heo 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2875c8826dd5STejun Heo 			cpu = gi->cpu_map[i];
2876c8826dd5STejun Heo 		BUG_ON(cpu == NR_CPUS);
2877c8826dd5STejun Heo 
2878c8826dd5STejun Heo 		/* allocate space for the whole group */
2879c8826dd5STejun Heo 		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2880c8826dd5STejun Heo 		if (!ptr) {
2881c8826dd5STejun Heo 			rc = -ENOMEM;
2882c8826dd5STejun Heo 			goto out_free_areas;
2883c8826dd5STejun Heo 		}
2884f528f0b8SCatalin Marinas 		/* kmemleak tracks the percpu allocations separately */
2885f528f0b8SCatalin Marinas 		kmemleak_free(ptr);
2886c8826dd5STejun Heo 		areas[group] = ptr;
2887c8826dd5STejun Heo 
2888c8826dd5STejun Heo 		base = min(ptr, base);
28899b739662Szijun_hu 		if (ptr > areas[highest_group])
28909b739662Szijun_hu 			highest_group = group;
28919b739662Szijun_hu 	}
28929b739662Szijun_hu 	max_distance = areas[highest_group] - base;
28939b739662Szijun_hu 	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
28949b739662Szijun_hu 
28959b739662Szijun_hu 	/* warn if maximum distance is further than 75% of vmalloc space */
28969b739662Szijun_hu 	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
28979b739662Szijun_hu 		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
28989b739662Szijun_hu 				max_distance, VMALLOC_TOTAL);
28999b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
29009b739662Szijun_hu 		/* and fail if we have fallback */
29019b739662Szijun_hu 		rc = -EINVAL;
29029b739662Szijun_hu 		goto out_free_areas;
29039b739662Szijun_hu #endif
290442b64281STejun Heo 	}
290542b64281STejun Heo 
290642b64281STejun Heo 	/*
290742b64281STejun Heo 	 * Copy data and free unused parts.  This should happen after all
290842b64281STejun Heo 	 * allocations are complete; otherwise, we may end up with
290942b64281STejun Heo 	 * overlapping groups.
291042b64281STejun Heo 	 */
291142b64281STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
291242b64281STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
291342b64281STejun Heo 		void *ptr = areas[group];
2914c8826dd5STejun Heo 
2915c8826dd5STejun Heo 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2916c8826dd5STejun Heo 			if (gi->cpu_map[i] == NR_CPUS) {
2917c8826dd5STejun Heo 				/* unused unit, free whole */
2918c8826dd5STejun Heo 				free_fn(ptr, ai->unit_size);
2919c8826dd5STejun Heo 				continue;
2920c8826dd5STejun Heo 			}
2921c8826dd5STejun Heo 			/* copy and return the unused part */
2922fd1e8a1fSTejun Heo 			memcpy(ptr, __per_cpu_load, ai->static_size);
2923c8826dd5STejun Heo 			free_fn(ptr + size_sum, ai->unit_size - size_sum);
2924c8826dd5STejun Heo 		}
292566c3a757STejun Heo 	}
292666c3a757STejun Heo 
2927c8826dd5STejun Heo 	/* base address is now known, determine group base offsets */
29286ea529a2STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
2929c8826dd5STejun Heo 		ai->groups[group].base_offset = areas[group] - base;
29306ea529a2STejun Heo 	}
2931c8826dd5STejun Heo 
293200206a69SMatteo Croce 	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
293300206a69SMatteo Croce 		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2934fd1e8a1fSTejun Heo 		ai->dyn_size, ai->unit_size);
293566c3a757STejun Heo 
2936163fa234SKefeng Wang 	pcpu_setup_first_chunk(ai, base);
2937c8826dd5STejun Heo 	goto out_free;
2938c8826dd5STejun Heo 
2939c8826dd5STejun Heo out_free_areas:
2940c8826dd5STejun Heo 	for (group = 0; group < ai->nr_groups; group++)
2941f851c8d8SMichael Holzheu 		if (areas[group])
2942c8826dd5STejun Heo 			free_fn(areas[group],
2943c8826dd5STejun Heo 				ai->groups[group].nr_units * ai->unit_size);
2944c8826dd5STejun Heo out_free:
2945fd1e8a1fSTejun Heo 	pcpu_free_alloc_info(ai);
2946c8826dd5STejun Heo 	if (areas)
2947999c17e3SSantosh Shilimkar 		memblock_free_early(__pa(areas), areas_size);
2948fb435d52STejun Heo 	return rc;
2949d4b95f80STejun Heo }
29503c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */
2951d4b95f80STejun Heo 
29523c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK
2953d4b95f80STejun Heo /**
295400ae4064STejun Heo  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2955d4b95f80STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
2956d4b95f80STejun Heo  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
295725985edcSLucas De Marchi  * @free_fn: function to free percpu page, always called with PAGE_SIZE
2958d4b95f80STejun Heo  * @populate_pte_fn: function to populate pte
2959d4b95f80STejun Heo  *
296000ae4064STejun Heo  * This is a helper to ease setting up page-remapped first percpu
296100ae4064STejun Heo  * chunk and can be called where pcpu_setup_first_chunk() is expected.
2962d4b95f80STejun Heo  *
2963d4b95f80STejun Heo  * This is the basic allocator.  Static percpu area is allocated
2964d4b95f80STejun Heo  * page-by-page into vmalloc area.
2965d4b95f80STejun Heo  *
2966d4b95f80STejun Heo  * RETURNS:
2967fb435d52STejun Heo  * 0 on success, -errno on failure.
2968d4b95f80STejun Heo  */
2969fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size,
2970d4b95f80STejun Heo 				 pcpu_fc_alloc_fn_t alloc_fn,
2971d4b95f80STejun Heo 				 pcpu_fc_free_fn_t free_fn,
2972d4b95f80STejun Heo 				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2973d4b95f80STejun Heo {
29748f05a6a6STejun Heo 	static struct vm_struct vm;
2975fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
297600ae4064STejun Heo 	char psize_str[16];
2977ce3141a2STejun Heo 	int unit_pages;
2978d4b95f80STejun Heo 	size_t pages_size;
2979ce3141a2STejun Heo 	struct page **pages;
2980163fa234SKefeng Wang 	int unit, i, j, rc = 0;
29818f606604Szijun_hu 	int upa;
29828f606604Szijun_hu 	int nr_g0_units;
2983d4b95f80STejun Heo 
298400ae4064STejun Heo 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
298500ae4064STejun Heo 
29864ba6ce25STejun Heo 	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2987fd1e8a1fSTejun Heo 	if (IS_ERR(ai))
2988fd1e8a1fSTejun Heo 		return PTR_ERR(ai);
2989fd1e8a1fSTejun Heo 	BUG_ON(ai->nr_groups != 1);
29908f606604Szijun_hu 	upa = ai->alloc_size/ai->unit_size;
29918f606604Szijun_hu 	nr_g0_units = roundup(num_possible_cpus(), upa);
29920b59c25fSIgor Stoppa 	if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
29938f606604Szijun_hu 		pcpu_free_alloc_info(ai);
29948f606604Szijun_hu 		return -EINVAL;
29958f606604Szijun_hu 	}
2996fd1e8a1fSTejun Heo 
2997fd1e8a1fSTejun Heo 	unit_pages = ai->unit_size >> PAGE_SHIFT;
2998d4b95f80STejun Heo 
2999d4b95f80STejun Heo 	/* unaligned allocations can't be freed, round up to page size */
3000fd1e8a1fSTejun Heo 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3001fd1e8a1fSTejun Heo 			       sizeof(pages[0]));
30027e1c4e27SMike Rapoport 	pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
3003f655f405SMike Rapoport 	if (!pages)
3004f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
3005f655f405SMike Rapoport 		      pages_size);
3006d4b95f80STejun Heo 
30078f05a6a6STejun Heo 	/* allocate pages */
3008d4b95f80STejun Heo 	j = 0;
30098f606604Szijun_hu 	for (unit = 0; unit < num_possible_cpus(); unit++) {
3010fd1e8a1fSTejun Heo 		unsigned int cpu = ai->groups[0].cpu_map[unit];
30118f606604Szijun_hu 		for (i = 0; i < unit_pages; i++) {
3012d4b95f80STejun Heo 			void *ptr;
3013d4b95f80STejun Heo 
30143cbc8565STejun Heo 			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
3015d4b95f80STejun Heo 			if (!ptr) {
3016870d4b12SJoe Perches 				pr_warn("failed to allocate %s page for cpu%u\n",
3017598d8091SJoe Perches 						psize_str, cpu);
3018d4b95f80STejun Heo 				goto enomem;
3019d4b95f80STejun Heo 			}
3020f528f0b8SCatalin Marinas 			/* kmemleak tracks the percpu allocations separately */
3021f528f0b8SCatalin Marinas 			kmemleak_free(ptr);
3022ce3141a2STejun Heo 			pages[j++] = virt_to_page(ptr);
3023d4b95f80STejun Heo 		}
30248f606604Szijun_hu 	}
3025d4b95f80STejun Heo 
30268f05a6a6STejun Heo 	/* allocate vm area, map the pages and copy static data */
30278f05a6a6STejun Heo 	vm.flags = VM_ALLOC;
3028fd1e8a1fSTejun Heo 	vm.size = num_possible_cpus() * ai->unit_size;
30298f05a6a6STejun Heo 	vm_area_register_early(&vm, PAGE_SIZE);
30308f05a6a6STejun Heo 
3031fd1e8a1fSTejun Heo 	for (unit = 0; unit < num_possible_cpus(); unit++) {
30321d9d3257STejun Heo 		unsigned long unit_addr =
3033fd1e8a1fSTejun Heo 			(unsigned long)vm.addr + unit * ai->unit_size;
30348f05a6a6STejun Heo 
3035ce3141a2STejun Heo 		for (i = 0; i < unit_pages; i++)
30368f05a6a6STejun Heo 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
30378f05a6a6STejun Heo 
30388f05a6a6STejun Heo 		/* pte already populated, the following shouldn't fail */
3039fb435d52STejun Heo 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3040ce3141a2STejun Heo 				      unit_pages);
3041fb435d52STejun Heo 		if (rc < 0)
3042fb435d52STejun Heo 			panic("failed to map percpu area, err=%d\n", rc);
30438f05a6a6STejun Heo 
30448f05a6a6STejun Heo 		/*
30458f05a6a6STejun Heo 		 * FIXME: Archs with virtual cache should flush local
30468f05a6a6STejun Heo 		 * cache for the linear mapping here - something
30478f05a6a6STejun Heo 		 * equivalent to flush_cache_vmap() on the local cpu.
30488f05a6a6STejun Heo 		 * flush_cache_vmap() can't be used as most supporting
30498f05a6a6STejun Heo 		 * data structures are not set up yet.
30508f05a6a6STejun Heo 		 */
30518f05a6a6STejun Heo 
30528f05a6a6STejun Heo 		/* copy static data */
3053fd1e8a1fSTejun Heo 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
305466c3a757STejun Heo 	}
305566c3a757STejun Heo 
305666c3a757STejun Heo 	/* we're ready, commit */
305700206a69SMatteo Croce 	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
305800206a69SMatteo Croce 		unit_pages, psize_str, ai->static_size,
3059fd1e8a1fSTejun Heo 		ai->reserved_size, ai->dyn_size);
306066c3a757STejun Heo 
3061163fa234SKefeng Wang 	pcpu_setup_first_chunk(ai, vm.addr);
3062d4b95f80STejun Heo 	goto out_free_ar;
3063d4b95f80STejun Heo 
3064d4b95f80STejun Heo enomem:
3065d4b95f80STejun Heo 	while (--j >= 0)
3066ce3141a2STejun Heo 		free_fn(page_address(pages[j]), PAGE_SIZE);
3067fb435d52STejun Heo 	rc = -ENOMEM;
3068d4b95f80STejun Heo out_free_ar:
3069999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(pages), pages_size);
3070fd1e8a1fSTejun Heo 	pcpu_free_alloc_info(ai);
3071fb435d52STejun Heo 	return rc;
307266c3a757STejun Heo }
30733c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */
3074d4b95f80STejun Heo 
3075bbddff05STejun Heo #ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
30768c4bfc6eSTejun Heo /*
3077bbddff05STejun Heo  * Generic SMP percpu area setup.
3078e74e3962STejun Heo  *
3079e74e3962STejun Heo  * The embedding helper is used because its behavior closely resembles
3080e74e3962STejun Heo  * the original non-dynamic generic percpu area setup.  This is
3081e74e3962STejun Heo  * important because many archs have addressing restrictions and might
3082e74e3962STejun Heo  * fail if the percpu area is located far away from the previous
3083e74e3962STejun Heo  * location.  As an added bonus, in non-NUMA cases, embedding is
3084e74e3962STejun Heo  * generally a good idea TLB-wise because percpu area can piggy back
3085e74e3962STejun Heo  * on the physical linear memory mapping which uses large page
3086e74e3962STejun Heo  * mappings on applicable archs.
3087e74e3962STejun Heo  */
3088e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3089e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset);
3090e74e3962STejun Heo 
3091c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
3092c8826dd5STejun Heo 				       size_t align)
3093c8826dd5STejun Heo {
309426fb3daeSMike Rapoport 	return  memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
3095c8826dd5STejun Heo }
3096c8826dd5STejun Heo 
3097c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
3098c8826dd5STejun Heo {
3099999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(ptr), size);
3100c8826dd5STejun Heo }
3101c8826dd5STejun Heo 
3102e74e3962STejun Heo void __init setup_per_cpu_areas(void)
3103e74e3962STejun Heo {
3104e74e3962STejun Heo 	unsigned long delta;
3105e74e3962STejun Heo 	unsigned int cpu;
3106fb435d52STejun Heo 	int rc;
3107e74e3962STejun Heo 
3108e74e3962STejun Heo 	/*
3109e74e3962STejun Heo 	 * Always reserve area for module percpu variables.  That's
3110e74e3962STejun Heo 	 * what the legacy allocator did.
3111e74e3962STejun Heo 	 */
3112fb435d52STejun Heo 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
3113c8826dd5STejun Heo 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
3114c8826dd5STejun Heo 				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
3115fb435d52STejun Heo 	if (rc < 0)
3116bbddff05STejun Heo 		panic("Failed to initialize percpu areas.");
3117e74e3962STejun Heo 
3118e74e3962STejun Heo 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3119e74e3962STejun Heo 	for_each_possible_cpu(cpu)
3120fb435d52STejun Heo 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3121e74e3962STejun Heo }
3122e74e3962STejun Heo #endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3123099a19d9STejun Heo 
3124bbddff05STejun Heo #else	/* CONFIG_SMP */
3125bbddff05STejun Heo 
3126bbddff05STejun Heo /*
3127bbddff05STejun Heo  * UP percpu area setup.
3128bbddff05STejun Heo  *
3129bbddff05STejun Heo  * UP always uses km-based percpu allocator with identity mapping.
3130bbddff05STejun Heo  * Static percpu variables are indistinguishable from the usual static
3131bbddff05STejun Heo  * variables and don't require any special preparation.
3132bbddff05STejun Heo  */
3133bbddff05STejun Heo void __init setup_per_cpu_areas(void)
3134bbddff05STejun Heo {
3135bbddff05STejun Heo 	const size_t unit_size =
3136bbddff05STejun Heo 		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3137bbddff05STejun Heo 					 PERCPU_DYNAMIC_RESERVE));
3138bbddff05STejun Heo 	struct pcpu_alloc_info *ai;
3139bbddff05STejun Heo 	void *fc;
3140bbddff05STejun Heo 
3141bbddff05STejun Heo 	ai = pcpu_alloc_alloc_info(1, 1);
314226fb3daeSMike Rapoport 	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3143bbddff05STejun Heo 	if (!ai || !fc)
3144bbddff05STejun Heo 		panic("Failed to allocate memory for percpu areas.");
3145100d13c3SCatalin Marinas 	/* kmemleak tracks the percpu allocations separately */
3146100d13c3SCatalin Marinas 	kmemleak_free(fc);
3147bbddff05STejun Heo 
3148bbddff05STejun Heo 	ai->dyn_size = unit_size;
3149bbddff05STejun Heo 	ai->unit_size = unit_size;
3150bbddff05STejun Heo 	ai->atom_size = unit_size;
3151bbddff05STejun Heo 	ai->alloc_size = unit_size;
3152bbddff05STejun Heo 	ai->groups[0].nr_units = 1;
3153bbddff05STejun Heo 	ai->groups[0].cpu_map[0] = 0;
3154bbddff05STejun Heo 
3155163fa234SKefeng Wang 	pcpu_setup_first_chunk(ai, fc);
3156438a5061SNicolas Pitre 	pcpu_free_alloc_info(ai);
3157bbddff05STejun Heo }
3158bbddff05STejun Heo 
3159bbddff05STejun Heo #endif	/* CONFIG_SMP */
3160bbddff05STejun Heo 
3161099a19d9STejun Heo /*
31627e8a6304SDennis Zhou (Facebook)  * pcpu_nr_pages - calculate total number of populated backing pages
31637e8a6304SDennis Zhou (Facebook)  *
31647e8a6304SDennis Zhou (Facebook)  * This reflects the number of pages populated to back chunks.  Metadata is
31657e8a6304SDennis Zhou (Facebook)  * excluded in the number exposed in meminfo as the number of backing pages
31667e8a6304SDennis Zhou (Facebook)  * scales with the number of cpus and can quickly outweigh the memory used for
31677e8a6304SDennis Zhou (Facebook)  * metadata.  It also keeps this calculation nice and simple.
31687e8a6304SDennis Zhou (Facebook)  *
31697e8a6304SDennis Zhou (Facebook)  * RETURNS:
31707e8a6304SDennis Zhou (Facebook)  * Total number of populated backing pages in use by the allocator.
31717e8a6304SDennis Zhou (Facebook)  */
31727e8a6304SDennis Zhou (Facebook) unsigned long pcpu_nr_pages(void)
31737e8a6304SDennis Zhou (Facebook) {
31747e8a6304SDennis Zhou (Facebook) 	return pcpu_nr_populated * pcpu_nr_units;
31757e8a6304SDennis Zhou (Facebook) }
31767e8a6304SDennis Zhou (Facebook) 
31777e8a6304SDennis Zhou (Facebook) /*
31781a4d7607STejun Heo  * Percpu allocator is initialized early during boot when neither slab or
31791a4d7607STejun Heo  * workqueue is available.  Plug async management until everything is up
31801a4d7607STejun Heo  * and running.
31811a4d7607STejun Heo  */
31821a4d7607STejun Heo static int __init percpu_enable_async(void)
31831a4d7607STejun Heo {
31841a4d7607STejun Heo 	pcpu_async_enabled = true;
31851a4d7607STejun Heo 	return 0;
31861a4d7607STejun Heo }
31871a4d7607STejun Heo subsys_initcall(percpu_enable_async);
3188