xref: /linux/mm/percpu.c (revision 3c7be18ac9a06bc67196bfdabb7c21e1bbacdc13)
155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2fbf59bc9STejun Heo /*
388999a89STejun Heo  * mm/percpu.c - percpu memory allocator
4fbf59bc9STejun Heo  *
5fbf59bc9STejun Heo  * Copyright (C) 2009		SUSE Linux Products GmbH
6fbf59bc9STejun Heo  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
7fbf59bc9STejun Heo  *
85e81ee3eSDennis Zhou (Facebook)  * Copyright (C) 2017		Facebook Inc.
9bfacd38fSDennis Zhou  * Copyright (C) 2017		Dennis Zhou <dennis@kernel.org>
105e81ee3eSDennis Zhou (Facebook)  *
119c015162SDennis Zhou (Facebook)  * The percpu allocator handles both static and dynamic areas.  Percpu
129c015162SDennis Zhou (Facebook)  * areas are allocated in chunks which are divided into units.  There is
139c015162SDennis Zhou (Facebook)  * a 1-to-1 mapping for units to possible cpus.  These units are grouped
149c015162SDennis Zhou (Facebook)  * based on NUMA properties of the machine.
15fbf59bc9STejun Heo  *
16fbf59bc9STejun Heo  *  c0                           c1                         c2
17fbf59bc9STejun Heo  *  -------------------          -------------------        ------------
18fbf59bc9STejun Heo  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
19fbf59bc9STejun Heo  *  -------------------  ......  -------------------  ....  ------------
20fbf59bc9STejun Heo  *
219c015162SDennis Zhou (Facebook)  * Allocation is done by offsets into a unit's address space.  Ie., an
229c015162SDennis Zhou (Facebook)  * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
239c015162SDennis Zhou (Facebook)  * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
249c015162SDennis Zhou (Facebook)  * and even sparse.  Access is handled by configuring percpu base
259c015162SDennis Zhou (Facebook)  * registers according to the cpu to unit mappings and offsetting the
269c015162SDennis Zhou (Facebook)  * base address using pcpu_unit_size.
27fbf59bc9STejun Heo  *
289c015162SDennis Zhou (Facebook)  * There is special consideration for the first chunk which must handle
299c015162SDennis Zhou (Facebook)  * the static percpu variables in the kernel image as allocation services
305e81ee3eSDennis Zhou (Facebook)  * are not online yet.  In short, the first chunk is structured like so:
319c015162SDennis Zhou (Facebook)  *
329c015162SDennis Zhou (Facebook)  *                  <Static | [Reserved] | Dynamic>
339c015162SDennis Zhou (Facebook)  *
349c015162SDennis Zhou (Facebook)  * The static data is copied from the original section managed by the
359c015162SDennis Zhou (Facebook)  * linker.  The reserved section, if non-zero, primarily manages static
369c015162SDennis Zhou (Facebook)  * percpu variables from kernel modules.  Finally, the dynamic section
379c015162SDennis Zhou (Facebook)  * takes care of normal allocations.
38fbf59bc9STejun Heo  *
395e81ee3eSDennis Zhou (Facebook)  * The allocator organizes chunks into lists according to free size and
40*3c7be18aSRoman Gushchin  * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41*3c7be18aSRoman Gushchin  * flag should be passed.  All memcg-aware allocations are sharing one set
42*3c7be18aSRoman Gushchin  * of chunks and all unaccounted allocations and allocations performed
43*3c7be18aSRoman Gushchin  * by processes belonging to the root memory cgroup are using the second set.
44*3c7be18aSRoman Gushchin  *
45*3c7be18aSRoman Gushchin  * The allocator tries to allocate from the fullest chunk first. Each chunk
46*3c7be18aSRoman Gushchin  * is managed by a bitmap with metadata blocks.  The allocation map is updated
47*3c7be18aSRoman Gushchin  * on every allocation and free to reflect the current state while the boundary
485e81ee3eSDennis Zhou (Facebook)  * map is only updated on allocation.  Each metadata block contains
495e81ee3eSDennis Zhou (Facebook)  * information to help mitigate the need to iterate over large portions
505e81ee3eSDennis Zhou (Facebook)  * of the bitmap.  The reverse mapping from page to chunk is stored in
515e81ee3eSDennis Zhou (Facebook)  * the page's index.  Lastly, units are lazily backed and grow in unison.
52fbf59bc9STejun Heo  *
535e81ee3eSDennis Zhou (Facebook)  * There is a unique conversion that goes on here between bytes and bits.
545e81ee3eSDennis Zhou (Facebook)  * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
555e81ee3eSDennis Zhou (Facebook)  * tracks the number of pages it is responsible for in nr_pages.  Helper
565e81ee3eSDennis Zhou (Facebook)  * functions are used to convert from between the bytes, bits, and blocks.
575e81ee3eSDennis Zhou (Facebook)  * All hints are managed in bits unless explicitly stated.
589c015162SDennis Zhou (Facebook)  *
594091fb95SMasahiro Yamada  * To use this allocator, arch code should do the following:
60fbf59bc9STejun Heo  *
61fbf59bc9STejun Heo  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62e0100983STejun Heo  *   regular address to percpu pointer and back if they need to be
63e0100983STejun Heo  *   different from the default
64fbf59bc9STejun Heo  *
658d408b4bSTejun Heo  * - use pcpu_setup_first_chunk() during percpu area initialization to
668d408b4bSTejun Heo  *   setup the first chunk containing the kernel static percpu area
67fbf59bc9STejun Heo  */
68fbf59bc9STejun Heo 
69870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70870d4b12SJoe Perches 
71fbf59bc9STejun Heo #include <linux/bitmap.h>
7257c8a661SMike Rapoport #include <linux/memblock.h>
73fd1e8a1fSTejun Heo #include <linux/err.h>
74ca460b3cSDennis Zhou (Facebook) #include <linux/lcm.h>
75fbf59bc9STejun Heo #include <linux/list.h>
76a530b795STejun Heo #include <linux/log2.h>
77fbf59bc9STejun Heo #include <linux/mm.h>
78fbf59bc9STejun Heo #include <linux/module.h>
79fbf59bc9STejun Heo #include <linux/mutex.h>
80fbf59bc9STejun Heo #include <linux/percpu.h>
81fbf59bc9STejun Heo #include <linux/pfn.h>
82fbf59bc9STejun Heo #include <linux/slab.h>
83ccea34b5STejun Heo #include <linux/spinlock.h>
84fbf59bc9STejun Heo #include <linux/vmalloc.h>
85a56dbddfSTejun Heo #include <linux/workqueue.h>
86f528f0b8SCatalin Marinas #include <linux/kmemleak.h>
8771546d10STejun Heo #include <linux/sched.h>
8828307d93SFilipe Manana #include <linux/sched/mm.h>
89*3c7be18aSRoman Gushchin #include <linux/memcontrol.h>
90fbf59bc9STejun Heo 
91fbf59bc9STejun Heo #include <asm/cacheflush.h>
92e0100983STejun Heo #include <asm/sections.h>
93fbf59bc9STejun Heo #include <asm/tlbflush.h>
943b034b0dSVivek Goyal #include <asm/io.h>
95fbf59bc9STejun Heo 
96df95e795SDennis Zhou #define CREATE_TRACE_POINTS
97df95e795SDennis Zhou #include <trace/events/percpu.h>
98df95e795SDennis Zhou 
998fa3ed80SDennis Zhou #include "percpu-internal.h"
1008fa3ed80SDennis Zhou 
10140064aecSDennis Zhou (Facebook) /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
10240064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT		5
1038744d859SDennis Zhou /* chunks in slots below this are subject to being sidelined on failed alloc */
1048744d859SDennis Zhou #define PCPU_SLOT_FAIL_THRESHOLD	3
10540064aecSDennis Zhou (Facebook) 
1061a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW	2
1071a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH	4
108fbf59bc9STejun Heo 
109bbddff05STejun Heo #ifdef CONFIG_SMP
110e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
111e0100983STejun Heo #ifndef __addr_to_pcpu_ptr
112e0100983STejun Heo #define __addr_to_pcpu_ptr(addr)					\
11343cf38ebSTejun Heo 	(void __percpu *)((unsigned long)(addr) -			\
11443cf38ebSTejun Heo 			  (unsigned long)pcpu_base_addr	+		\
11543cf38ebSTejun Heo 			  (unsigned long)__per_cpu_start)
116e0100983STejun Heo #endif
117e0100983STejun Heo #ifndef __pcpu_ptr_to_addr
118e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr)						\
11943cf38ebSTejun Heo 	(void __force *)((unsigned long)(ptr) +				\
12043cf38ebSTejun Heo 			 (unsigned long)pcpu_base_addr -		\
12143cf38ebSTejun Heo 			 (unsigned long)__per_cpu_start)
122e0100983STejun Heo #endif
123bbddff05STejun Heo #else	/* CONFIG_SMP */
124bbddff05STejun Heo /* on UP, it's always identity mapped */
125bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
126bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
127bbddff05STejun Heo #endif	/* CONFIG_SMP */
128e0100983STejun Heo 
1291328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init;
1301328710bSDaniel Micay static int pcpu_unit_size __ro_after_init;
1311328710bSDaniel Micay static int pcpu_nr_units __ro_after_init;
1321328710bSDaniel Micay static int pcpu_atom_size __ro_after_init;
1338fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init;
1341328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init;
135fbf59bc9STejun Heo 
136a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */
1371328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init;
1381328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init;
1392f39e637STejun Heo 
140fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */
1411328710bSDaniel Micay void *pcpu_base_addr __ro_after_init;
142fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr);
143fbf59bc9STejun Heo 
1441328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
1451328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
1462f39e637STejun Heo 
1476563297cSTejun Heo /* group information, used for vm allocation */
1481328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init;
1491328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init;
1501328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init;
1516563297cSTejun Heo 
152ae9e6bc9STejun Heo /*
153ae9e6bc9STejun Heo  * The first chunk which always exists.  Note that unlike other
154ae9e6bc9STejun Heo  * chunks, this one can be allocated and mapped in several different
155ae9e6bc9STejun Heo  * ways and thus often doesn't live in the vmalloc area.
156ae9e6bc9STejun Heo  */
1578fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
158ae9e6bc9STejun Heo 
159ae9e6bc9STejun Heo /*
160ae9e6bc9STejun Heo  * Optional reserved chunk.  This chunk reserves part of the first
161e2266705SDennis Zhou (Facebook)  * chunk and serves it for reserved allocations.  When the reserved
162e2266705SDennis Zhou (Facebook)  * region doesn't exist, the following variable is NULL.
163ae9e6bc9STejun Heo  */
1648fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
165edcb4639STejun Heo 
1668fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
1676710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
168fbf59bc9STejun Heo 
169*3c7be18aSRoman Gushchin struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
170fbf59bc9STejun Heo 
1714f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */
1724f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks);
1734f996e23STejun Heo 
174b539b87fSTejun Heo /*
175b539b87fSTejun Heo  * The number of empty populated pages, protected by pcpu_lock.  The
176b539b87fSTejun Heo  * reserved chunk doesn't contribute to the count.
177b539b87fSTejun Heo  */
1786b9b6f39SDennis Zhou (Facebook) int pcpu_nr_empty_pop_pages;
179b539b87fSTejun Heo 
1801a4d7607STejun Heo /*
1817e8a6304SDennis Zhou (Facebook)  * The number of populated pages in use by the allocator, protected by
1827e8a6304SDennis Zhou (Facebook)  * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
1837e8a6304SDennis Zhou (Facebook)  * allocated/deallocated, it is allocated/deallocated in all units of a chunk
1847e8a6304SDennis Zhou (Facebook)  * and increments/decrements this count by 1).
1857e8a6304SDennis Zhou (Facebook)  */
1867e8a6304SDennis Zhou (Facebook) static unsigned long pcpu_nr_populated;
1877e8a6304SDennis Zhou (Facebook) 
1887e8a6304SDennis Zhou (Facebook) /*
1891a4d7607STejun Heo  * Balance work is used to populate or destroy chunks asynchronously.  We
1901a4d7607STejun Heo  * try to keep the number of populated free pages between
1911a4d7607STejun Heo  * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
1921a4d7607STejun Heo  * empty chunk.
1931a4d7607STejun Heo  */
194fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work);
195fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
1961a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly;
1971a4d7607STejun Heo static bool pcpu_atomic_alloc_failed;
1981a4d7607STejun Heo 
1991a4d7607STejun Heo static void pcpu_schedule_balance_work(void)
2001a4d7607STejun Heo {
2011a4d7607STejun Heo 	if (pcpu_async_enabled)
2021a4d7607STejun Heo 		schedule_work(&pcpu_balance_work);
2031a4d7607STejun Heo }
204a56dbddfSTejun Heo 
205c0ebfdc3SDennis Zhou (Facebook) /**
206560f2c23SDennis Zhou (Facebook)  * pcpu_addr_in_chunk - check if the address is served from this chunk
207560f2c23SDennis Zhou (Facebook)  * @chunk: chunk of interest
208560f2c23SDennis Zhou (Facebook)  * @addr: percpu address
209c0ebfdc3SDennis Zhou (Facebook)  *
210c0ebfdc3SDennis Zhou (Facebook)  * RETURNS:
211560f2c23SDennis Zhou (Facebook)  * True if the address is served from this chunk.
212c0ebfdc3SDennis Zhou (Facebook)  */
213560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
214020ec653STejun Heo {
215c0ebfdc3SDennis Zhou (Facebook) 	void *start_addr, *end_addr;
216020ec653STejun Heo 
217560f2c23SDennis Zhou (Facebook) 	if (!chunk)
218c0ebfdc3SDennis Zhou (Facebook) 		return false;
219c0ebfdc3SDennis Zhou (Facebook) 
220560f2c23SDennis Zhou (Facebook) 	start_addr = chunk->base_addr + chunk->start_offset;
221560f2c23SDennis Zhou (Facebook) 	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
222560f2c23SDennis Zhou (Facebook) 		   chunk->end_offset;
223c0ebfdc3SDennis Zhou (Facebook) 
224c0ebfdc3SDennis Zhou (Facebook) 	return addr >= start_addr && addr < end_addr;
225020ec653STejun Heo }
226020ec653STejun Heo 
227d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size)
228fbf59bc9STejun Heo {
229cae3aeb8STejun Heo 	int highbit = fls(size);	/* size is in bytes */
230fbf59bc9STejun Heo 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
231fbf59bc9STejun Heo }
232fbf59bc9STejun Heo 
233d9b55eebSTejun Heo static int pcpu_size_to_slot(int size)
234d9b55eebSTejun Heo {
235d9b55eebSTejun Heo 	if (size == pcpu_unit_size)
236d9b55eebSTejun Heo 		return pcpu_nr_slots - 1;
237d9b55eebSTejun Heo 	return __pcpu_size_to_slot(size);
238d9b55eebSTejun Heo }
239d9b55eebSTejun Heo 
240fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
241fbf59bc9STejun Heo {
24292c14cabSDennis Zhou 	const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
24392c14cabSDennis Zhou 
24492c14cabSDennis Zhou 	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
24592c14cabSDennis Zhou 	    chunk_md->contig_hint == 0)
246fbf59bc9STejun Heo 		return 0;
247fbf59bc9STejun Heo 
24892c14cabSDennis Zhou 	return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
249fbf59bc9STejun Heo }
250fbf59bc9STejun Heo 
25188999a89STejun Heo /* set the pointer to a chunk in a page struct */
25288999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
25388999a89STejun Heo {
25488999a89STejun Heo 	page->index = (unsigned long)pcpu;
25588999a89STejun Heo }
25688999a89STejun Heo 
25788999a89STejun Heo /* obtain pointer to a chunk from a page struct */
25888999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
25988999a89STejun Heo {
26088999a89STejun Heo 	return (struct pcpu_chunk *)page->index;
26188999a89STejun Heo }
26288999a89STejun Heo 
26388999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
264fbf59bc9STejun Heo {
2652f39e637STejun Heo 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
266fbf59bc9STejun Heo }
267fbf59bc9STejun Heo 
268c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
269c0ebfdc3SDennis Zhou (Facebook) {
270c0ebfdc3SDennis Zhou (Facebook) 	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
271c0ebfdc3SDennis Zhou (Facebook) }
272c0ebfdc3SDennis Zhou (Facebook) 
2739983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
274fbf59bc9STejun Heo 				     unsigned int cpu, int page_idx)
275fbf59bc9STejun Heo {
276c0ebfdc3SDennis Zhou (Facebook) 	return (unsigned long)chunk->base_addr +
277c0ebfdc3SDennis Zhou (Facebook) 	       pcpu_unit_page_offset(cpu, page_idx);
278fbf59bc9STejun Heo }
279fbf59bc9STejun Heo 
280ca460b3cSDennis Zhou (Facebook) /*
281ca460b3cSDennis Zhou (Facebook)  * The following are helper functions to help access bitmaps and convert
282ca460b3cSDennis Zhou (Facebook)  * between bitmap offsets to address offsets.
283ca460b3cSDennis Zhou (Facebook)  */
284ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
285ca460b3cSDennis Zhou (Facebook) {
286ca460b3cSDennis Zhou (Facebook) 	return chunk->alloc_map +
287ca460b3cSDennis Zhou (Facebook) 	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
288ca460b3cSDennis Zhou (Facebook) }
289ca460b3cSDennis Zhou (Facebook) 
290ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off)
291ca460b3cSDennis Zhou (Facebook) {
292ca460b3cSDennis Zhou (Facebook) 	return off / PCPU_BITMAP_BLOCK_BITS;
293ca460b3cSDennis Zhou (Facebook) }
294ca460b3cSDennis Zhou (Facebook) 
295ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off)
296ca460b3cSDennis Zhou (Facebook) {
297ca460b3cSDennis Zhou (Facebook) 	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
298ca460b3cSDennis Zhou (Facebook) }
299ca460b3cSDennis Zhou (Facebook) 
300b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off)
301b185cd0dSDennis Zhou (Facebook) {
302b185cd0dSDennis Zhou (Facebook) 	return index * PCPU_BITMAP_BLOCK_BITS + off;
303b185cd0dSDennis Zhou (Facebook) }
304b185cd0dSDennis Zhou (Facebook) 
305382b88e9SDennis Zhou /*
306382b88e9SDennis Zhou  * pcpu_next_hint - determine which hint to use
307382b88e9SDennis Zhou  * @block: block of interest
308382b88e9SDennis Zhou  * @alloc_bits: size of allocation
309382b88e9SDennis Zhou  *
310382b88e9SDennis Zhou  * This determines if we should scan based on the scan_hint or first_free.
311382b88e9SDennis Zhou  * In general, we want to scan from first_free to fulfill allocations by
312382b88e9SDennis Zhou  * first fit.  However, if we know a scan_hint at position scan_hint_start
313382b88e9SDennis Zhou  * cannot fulfill an allocation, we can begin scanning from there knowing
314382b88e9SDennis Zhou  * the contig_hint will be our fallback.
315382b88e9SDennis Zhou  */
316382b88e9SDennis Zhou static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
317382b88e9SDennis Zhou {
318382b88e9SDennis Zhou 	/*
319382b88e9SDennis Zhou 	 * The three conditions below determine if we can skip past the
320382b88e9SDennis Zhou 	 * scan_hint.  First, does the scan hint exist.  Second, is the
321382b88e9SDennis Zhou 	 * contig_hint after the scan_hint (possibly not true iff
322382b88e9SDennis Zhou 	 * contig_hint == scan_hint).  Third, is the allocation request
323382b88e9SDennis Zhou 	 * larger than the scan_hint.
324382b88e9SDennis Zhou 	 */
325382b88e9SDennis Zhou 	if (block->scan_hint &&
326382b88e9SDennis Zhou 	    block->contig_hint_start > block->scan_hint_start &&
327382b88e9SDennis Zhou 	    alloc_bits > block->scan_hint)
328382b88e9SDennis Zhou 		return block->scan_hint_start + block->scan_hint;
329382b88e9SDennis Zhou 
330382b88e9SDennis Zhou 	return block->first_free;
331382b88e9SDennis Zhou }
332382b88e9SDennis Zhou 
333fbf59bc9STejun Heo /**
334525ca84dSDennis Zhou (Facebook)  * pcpu_next_md_free_region - finds the next hint free area
335525ca84dSDennis Zhou (Facebook)  * @chunk: chunk of interest
336525ca84dSDennis Zhou (Facebook)  * @bit_off: chunk offset
337525ca84dSDennis Zhou (Facebook)  * @bits: size of free area
338525ca84dSDennis Zhou (Facebook)  *
339525ca84dSDennis Zhou (Facebook)  * Helper function for pcpu_for_each_md_free_region.  It checks
340525ca84dSDennis Zhou (Facebook)  * block->contig_hint and performs aggregation across blocks to find the
341525ca84dSDennis Zhou (Facebook)  * next hint.  It modifies bit_off and bits in-place to be consumed in the
342525ca84dSDennis Zhou (Facebook)  * loop.
343525ca84dSDennis Zhou (Facebook)  */
344525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
345525ca84dSDennis Zhou (Facebook) 				     int *bits)
346525ca84dSDennis Zhou (Facebook) {
347525ca84dSDennis Zhou (Facebook) 	int i = pcpu_off_to_block_index(*bit_off);
348525ca84dSDennis Zhou (Facebook) 	int block_off = pcpu_off_to_block_off(*bit_off);
349525ca84dSDennis Zhou (Facebook) 	struct pcpu_block_md *block;
350525ca84dSDennis Zhou (Facebook) 
351525ca84dSDennis Zhou (Facebook) 	*bits = 0;
352525ca84dSDennis Zhou (Facebook) 	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
353525ca84dSDennis Zhou (Facebook) 	     block++, i++) {
354525ca84dSDennis Zhou (Facebook) 		/* handles contig area across blocks */
355525ca84dSDennis Zhou (Facebook) 		if (*bits) {
356525ca84dSDennis Zhou (Facebook) 			*bits += block->left_free;
357525ca84dSDennis Zhou (Facebook) 			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
358525ca84dSDennis Zhou (Facebook) 				continue;
359525ca84dSDennis Zhou (Facebook) 			return;
360525ca84dSDennis Zhou (Facebook) 		}
361525ca84dSDennis Zhou (Facebook) 
362525ca84dSDennis Zhou (Facebook) 		/*
363525ca84dSDennis Zhou (Facebook) 		 * This checks three things.  First is there a contig_hint to
364525ca84dSDennis Zhou (Facebook) 		 * check.  Second, have we checked this hint before by
365525ca84dSDennis Zhou (Facebook) 		 * comparing the block_off.  Third, is this the same as the
366525ca84dSDennis Zhou (Facebook) 		 * right contig hint.  In the last case, it spills over into
367525ca84dSDennis Zhou (Facebook) 		 * the next block and should be handled by the contig area
368525ca84dSDennis Zhou (Facebook) 		 * across blocks code.
369525ca84dSDennis Zhou (Facebook) 		 */
370525ca84dSDennis Zhou (Facebook) 		*bits = block->contig_hint;
371525ca84dSDennis Zhou (Facebook) 		if (*bits && block->contig_hint_start >= block_off &&
372525ca84dSDennis Zhou (Facebook) 		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
373525ca84dSDennis Zhou (Facebook) 			*bit_off = pcpu_block_off_to_off(i,
374525ca84dSDennis Zhou (Facebook) 					block->contig_hint_start);
375525ca84dSDennis Zhou (Facebook) 			return;
376525ca84dSDennis Zhou (Facebook) 		}
3771fa4df3eSDennis Zhou 		/* reset to satisfy the second predicate above */
3781fa4df3eSDennis Zhou 		block_off = 0;
379525ca84dSDennis Zhou (Facebook) 
380525ca84dSDennis Zhou (Facebook) 		*bits = block->right_free;
381525ca84dSDennis Zhou (Facebook) 		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
382525ca84dSDennis Zhou (Facebook) 	}
383525ca84dSDennis Zhou (Facebook) }
384525ca84dSDennis Zhou (Facebook) 
385b4c2116cSDennis Zhou (Facebook) /**
386b4c2116cSDennis Zhou (Facebook)  * pcpu_next_fit_region - finds fit areas for a given allocation request
387b4c2116cSDennis Zhou (Facebook)  * @chunk: chunk of interest
388b4c2116cSDennis Zhou (Facebook)  * @alloc_bits: size of allocation
389b4c2116cSDennis Zhou (Facebook)  * @align: alignment of area (max PAGE_SIZE)
390b4c2116cSDennis Zhou (Facebook)  * @bit_off: chunk offset
391b4c2116cSDennis Zhou (Facebook)  * @bits: size of free area
392b4c2116cSDennis Zhou (Facebook)  *
393b4c2116cSDennis Zhou (Facebook)  * Finds the next free region that is viable for use with a given size and
394b4c2116cSDennis Zhou (Facebook)  * alignment.  This only returns if there is a valid area to be used for this
395b4c2116cSDennis Zhou (Facebook)  * allocation.  block->first_free is returned if the allocation request fits
396b4c2116cSDennis Zhou (Facebook)  * within the block to see if the request can be fulfilled prior to the contig
397b4c2116cSDennis Zhou (Facebook)  * hint.
398b4c2116cSDennis Zhou (Facebook)  */
399b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
400b4c2116cSDennis Zhou (Facebook) 				 int align, int *bit_off, int *bits)
401b4c2116cSDennis Zhou (Facebook) {
402b4c2116cSDennis Zhou (Facebook) 	int i = pcpu_off_to_block_index(*bit_off);
403b4c2116cSDennis Zhou (Facebook) 	int block_off = pcpu_off_to_block_off(*bit_off);
404b4c2116cSDennis Zhou (Facebook) 	struct pcpu_block_md *block;
405b4c2116cSDennis Zhou (Facebook) 
406b4c2116cSDennis Zhou (Facebook) 	*bits = 0;
407b4c2116cSDennis Zhou (Facebook) 	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
408b4c2116cSDennis Zhou (Facebook) 	     block++, i++) {
409b4c2116cSDennis Zhou (Facebook) 		/* handles contig area across blocks */
410b4c2116cSDennis Zhou (Facebook) 		if (*bits) {
411b4c2116cSDennis Zhou (Facebook) 			*bits += block->left_free;
412b4c2116cSDennis Zhou (Facebook) 			if (*bits >= alloc_bits)
413b4c2116cSDennis Zhou (Facebook) 				return;
414b4c2116cSDennis Zhou (Facebook) 			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
415b4c2116cSDennis Zhou (Facebook) 				continue;
416b4c2116cSDennis Zhou (Facebook) 		}
417b4c2116cSDennis Zhou (Facebook) 
418b4c2116cSDennis Zhou (Facebook) 		/* check block->contig_hint */
419b4c2116cSDennis Zhou (Facebook) 		*bits = ALIGN(block->contig_hint_start, align) -
420b4c2116cSDennis Zhou (Facebook) 			block->contig_hint_start;
421b4c2116cSDennis Zhou (Facebook) 		/*
422b4c2116cSDennis Zhou (Facebook) 		 * This uses the block offset to determine if this has been
423b4c2116cSDennis Zhou (Facebook) 		 * checked in the prior iteration.
424b4c2116cSDennis Zhou (Facebook) 		 */
425b4c2116cSDennis Zhou (Facebook) 		if (block->contig_hint &&
426b4c2116cSDennis Zhou (Facebook) 		    block->contig_hint_start >= block_off &&
427b4c2116cSDennis Zhou (Facebook) 		    block->contig_hint >= *bits + alloc_bits) {
428382b88e9SDennis Zhou 			int start = pcpu_next_hint(block, alloc_bits);
429382b88e9SDennis Zhou 
430b4c2116cSDennis Zhou (Facebook) 			*bits += alloc_bits + block->contig_hint_start -
431382b88e9SDennis Zhou 				 start;
432382b88e9SDennis Zhou 			*bit_off = pcpu_block_off_to_off(i, start);
433b4c2116cSDennis Zhou (Facebook) 			return;
434b4c2116cSDennis Zhou (Facebook) 		}
4351fa4df3eSDennis Zhou 		/* reset to satisfy the second predicate above */
4361fa4df3eSDennis Zhou 		block_off = 0;
437b4c2116cSDennis Zhou (Facebook) 
438b4c2116cSDennis Zhou (Facebook) 		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
439b4c2116cSDennis Zhou (Facebook) 				 align);
440b4c2116cSDennis Zhou (Facebook) 		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
441b4c2116cSDennis Zhou (Facebook) 		*bit_off = pcpu_block_off_to_off(i, *bit_off);
442b4c2116cSDennis Zhou (Facebook) 		if (*bits >= alloc_bits)
443b4c2116cSDennis Zhou (Facebook) 			return;
444b4c2116cSDennis Zhou (Facebook) 	}
445b4c2116cSDennis Zhou (Facebook) 
446b4c2116cSDennis Zhou (Facebook) 	/* no valid offsets were found - fail condition */
447b4c2116cSDennis Zhou (Facebook) 	*bit_off = pcpu_chunk_map_bits(chunk);
448b4c2116cSDennis Zhou (Facebook) }
449b4c2116cSDennis Zhou (Facebook) 
450525ca84dSDennis Zhou (Facebook) /*
451525ca84dSDennis Zhou (Facebook)  * Metadata free area iterators.  These perform aggregation of free areas
452525ca84dSDennis Zhou (Facebook)  * based on the metadata blocks and return the offset @bit_off and size in
453b4c2116cSDennis Zhou (Facebook)  * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
454b4c2116cSDennis Zhou (Facebook)  * a fit is found for the allocation request.
455525ca84dSDennis Zhou (Facebook)  */
456525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
457525ca84dSDennis Zhou (Facebook) 	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
458525ca84dSDennis Zhou (Facebook) 	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
459525ca84dSDennis Zhou (Facebook) 	     (bit_off) += (bits) + 1,					\
460525ca84dSDennis Zhou (Facebook) 	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
461525ca84dSDennis Zhou (Facebook) 
462b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
463b4c2116cSDennis Zhou (Facebook) 	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
464b4c2116cSDennis Zhou (Facebook) 				  &(bits));				      \
465b4c2116cSDennis Zhou (Facebook) 	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
466b4c2116cSDennis Zhou (Facebook) 	     (bit_off) += (bits),					      \
467b4c2116cSDennis Zhou (Facebook) 	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
468b4c2116cSDennis Zhou (Facebook) 				  &(bits)))
469b4c2116cSDennis Zhou (Facebook) 
470525ca84dSDennis Zhou (Facebook) /**
47190459ce0SBob Liu  * pcpu_mem_zalloc - allocate memory
4721880d93bSTejun Heo  * @size: bytes to allocate
47347504ee0SDennis Zhou  * @gfp: allocation flags
474fbf59bc9STejun Heo  *
4751880d93bSTejun Heo  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
47647504ee0SDennis Zhou  * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
47747504ee0SDennis Zhou  * This is to facilitate passing through whitelisted flags.  The
47847504ee0SDennis Zhou  * returned memory is always zeroed.
479fbf59bc9STejun Heo  *
480fbf59bc9STejun Heo  * RETURNS:
4811880d93bSTejun Heo  * Pointer to the allocated area on success, NULL on failure.
482fbf59bc9STejun Heo  */
48347504ee0SDennis Zhou static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
484fbf59bc9STejun Heo {
485099a19d9STejun Heo 	if (WARN_ON_ONCE(!slab_is_available()))
486099a19d9STejun Heo 		return NULL;
487099a19d9STejun Heo 
488fbf59bc9STejun Heo 	if (size <= PAGE_SIZE)
489554fef1cSDennis Zhou 		return kzalloc(size, gfp);
4907af4c093SJesper Juhl 	else
49188dca4caSChristoph Hellwig 		return __vmalloc(size, gfp | __GFP_ZERO);
4921880d93bSTejun Heo }
493fbf59bc9STejun Heo 
4941880d93bSTejun Heo /**
4951880d93bSTejun Heo  * pcpu_mem_free - free memory
4961880d93bSTejun Heo  * @ptr: memory to free
4971880d93bSTejun Heo  *
49890459ce0SBob Liu  * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
4991880d93bSTejun Heo  */
5001d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr)
5011880d93bSTejun Heo {
5021d5cfdb0STetsuo Handa 	kvfree(ptr);
503fbf59bc9STejun Heo }
504fbf59bc9STejun Heo 
5058744d859SDennis Zhou static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
5068744d859SDennis Zhou 			      bool move_front)
5078744d859SDennis Zhou {
5088744d859SDennis Zhou 	if (chunk != pcpu_reserved_chunk) {
509*3c7be18aSRoman Gushchin 		struct list_head *pcpu_slot;
510*3c7be18aSRoman Gushchin 
511*3c7be18aSRoman Gushchin 		pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
5128744d859SDennis Zhou 		if (move_front)
5138744d859SDennis Zhou 			list_move(&chunk->list, &pcpu_slot[slot]);
5148744d859SDennis Zhou 		else
5158744d859SDennis Zhou 			list_move_tail(&chunk->list, &pcpu_slot[slot]);
5168744d859SDennis Zhou 	}
5178744d859SDennis Zhou }
5188744d859SDennis Zhou 
5198744d859SDennis Zhou static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
5208744d859SDennis Zhou {
5218744d859SDennis Zhou 	__pcpu_chunk_move(chunk, slot, true);
5228744d859SDennis Zhou }
5238744d859SDennis Zhou 
524fbf59bc9STejun Heo /**
525fbf59bc9STejun Heo  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
526fbf59bc9STejun Heo  * @chunk: chunk of interest
527fbf59bc9STejun Heo  * @oslot: the previous slot it was on
528fbf59bc9STejun Heo  *
529fbf59bc9STejun Heo  * This function is called after an allocation or free changed @chunk.
530fbf59bc9STejun Heo  * New slot according to the changed state is determined and @chunk is
531edcb4639STejun Heo  * moved to the slot.  Note that the reserved chunk is never put on
532edcb4639STejun Heo  * chunk slots.
533ccea34b5STejun Heo  *
534ccea34b5STejun Heo  * CONTEXT:
535ccea34b5STejun Heo  * pcpu_lock.
536fbf59bc9STejun Heo  */
537fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
538fbf59bc9STejun Heo {
539fbf59bc9STejun Heo 	int nslot = pcpu_chunk_slot(chunk);
540fbf59bc9STejun Heo 
5418744d859SDennis Zhou 	if (oslot != nslot)
5428744d859SDennis Zhou 		__pcpu_chunk_move(chunk, nslot, oslot < nslot);
54340064aecSDennis Zhou (Facebook) }
54440064aecSDennis Zhou (Facebook) 
54540064aecSDennis Zhou (Facebook) /*
546b239f7daSDennis Zhou  * pcpu_update_empty_pages - update empty page counters
547b239f7daSDennis Zhou  * @chunk: chunk of interest
548b239f7daSDennis Zhou  * @nr: nr of empty pages
54940064aecSDennis Zhou (Facebook)  *
550b239f7daSDennis Zhou  * This is used to keep track of the empty pages now based on the premise
551b239f7daSDennis Zhou  * a md_block covers a page.  The hint update functions recognize if a block
552b239f7daSDennis Zhou  * is made full or broken to calculate deltas for keeping track of free pages.
55340064aecSDennis Zhou (Facebook)  */
554b239f7daSDennis Zhou static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
555b239f7daSDennis Zhou {
556b239f7daSDennis Zhou 	chunk->nr_empty_pop_pages += nr;
55740064aecSDennis Zhou (Facebook) 	if (chunk != pcpu_reserved_chunk)
558b239f7daSDennis Zhou 		pcpu_nr_empty_pop_pages += nr;
55940064aecSDennis Zhou (Facebook) }
56040064aecSDennis Zhou (Facebook) 
561d9f3a01eSDennis Zhou /*
562d9f3a01eSDennis Zhou  * pcpu_region_overlap - determines if two regions overlap
563d9f3a01eSDennis Zhou  * @a: start of first region, inclusive
564d9f3a01eSDennis Zhou  * @b: end of first region, exclusive
565d9f3a01eSDennis Zhou  * @x: start of second region, inclusive
566d9f3a01eSDennis Zhou  * @y: end of second region, exclusive
567d9f3a01eSDennis Zhou  *
568d9f3a01eSDennis Zhou  * This is used to determine if the hint region [a, b) overlaps with the
569d9f3a01eSDennis Zhou  * allocated region [x, y).
570d9f3a01eSDennis Zhou  */
571d9f3a01eSDennis Zhou static inline bool pcpu_region_overlap(int a, int b, int x, int y)
572d9f3a01eSDennis Zhou {
573d9f3a01eSDennis Zhou 	return (a < y) && (x < b);
57440064aecSDennis Zhou (Facebook) }
57540064aecSDennis Zhou (Facebook) 
57640064aecSDennis Zhou (Facebook) /**
577ca460b3cSDennis Zhou (Facebook)  * pcpu_block_update - updates a block given a free area
578ca460b3cSDennis Zhou (Facebook)  * @block: block of interest
579ca460b3cSDennis Zhou (Facebook)  * @start: start offset in block
580ca460b3cSDennis Zhou (Facebook)  * @end: end offset in block
581ca460b3cSDennis Zhou (Facebook)  *
582ca460b3cSDennis Zhou (Facebook)  * Updates a block given a known free area.  The region [start, end) is
583268625a6SDennis Zhou (Facebook)  * expected to be the entirety of the free area within a block.  Chooses
584268625a6SDennis Zhou (Facebook)  * the best starting offset if the contig hints are equal.
585ca460b3cSDennis Zhou (Facebook)  */
586ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
587ca460b3cSDennis Zhou (Facebook) {
588ca460b3cSDennis Zhou (Facebook) 	int contig = end - start;
589ca460b3cSDennis Zhou (Facebook) 
590ca460b3cSDennis Zhou (Facebook) 	block->first_free = min(block->first_free, start);
591ca460b3cSDennis Zhou (Facebook) 	if (start == 0)
592ca460b3cSDennis Zhou (Facebook) 		block->left_free = contig;
593ca460b3cSDennis Zhou (Facebook) 
594047924c9SDennis Zhou 	if (end == block->nr_bits)
595ca460b3cSDennis Zhou (Facebook) 		block->right_free = contig;
596ca460b3cSDennis Zhou (Facebook) 
597ca460b3cSDennis Zhou (Facebook) 	if (contig > block->contig_hint) {
598382b88e9SDennis Zhou 		/* promote the old contig_hint to be the new scan_hint */
599382b88e9SDennis Zhou 		if (start > block->contig_hint_start) {
600382b88e9SDennis Zhou 			if (block->contig_hint > block->scan_hint) {
601382b88e9SDennis Zhou 				block->scan_hint_start =
602382b88e9SDennis Zhou 					block->contig_hint_start;
603382b88e9SDennis Zhou 				block->scan_hint = block->contig_hint;
604382b88e9SDennis Zhou 			} else if (start < block->scan_hint_start) {
605382b88e9SDennis Zhou 				/*
606382b88e9SDennis Zhou 				 * The old contig_hint == scan_hint.  But, the
607382b88e9SDennis Zhou 				 * new contig is larger so hold the invariant
608382b88e9SDennis Zhou 				 * scan_hint_start < contig_hint_start.
609382b88e9SDennis Zhou 				 */
610382b88e9SDennis Zhou 				block->scan_hint = 0;
611382b88e9SDennis Zhou 			}
612382b88e9SDennis Zhou 		} else {
613382b88e9SDennis Zhou 			block->scan_hint = 0;
614382b88e9SDennis Zhou 		}
615ca460b3cSDennis Zhou (Facebook) 		block->contig_hint_start = start;
616ca460b3cSDennis Zhou (Facebook) 		block->contig_hint = contig;
617382b88e9SDennis Zhou 	} else if (contig == block->contig_hint) {
618382b88e9SDennis Zhou 		if (block->contig_hint_start &&
619382b88e9SDennis Zhou 		    (!start ||
620382b88e9SDennis Zhou 		     __ffs(start) > __ffs(block->contig_hint_start))) {
621382b88e9SDennis Zhou 			/* start has a better alignment so use it */
622268625a6SDennis Zhou (Facebook) 			block->contig_hint_start = start;
623382b88e9SDennis Zhou 			if (start < block->scan_hint_start &&
624382b88e9SDennis Zhou 			    block->contig_hint > block->scan_hint)
625382b88e9SDennis Zhou 				block->scan_hint = 0;
626382b88e9SDennis Zhou 		} else if (start > block->scan_hint_start ||
627382b88e9SDennis Zhou 			   block->contig_hint > block->scan_hint) {
628382b88e9SDennis Zhou 			/*
629382b88e9SDennis Zhou 			 * Knowing contig == contig_hint, update the scan_hint
630382b88e9SDennis Zhou 			 * if it is farther than or larger than the current
631382b88e9SDennis Zhou 			 * scan_hint.
632382b88e9SDennis Zhou 			 */
633382b88e9SDennis Zhou 			block->scan_hint_start = start;
634382b88e9SDennis Zhou 			block->scan_hint = contig;
635382b88e9SDennis Zhou 		}
636382b88e9SDennis Zhou 	} else {
637382b88e9SDennis Zhou 		/*
638382b88e9SDennis Zhou 		 * The region is smaller than the contig_hint.  So only update
639382b88e9SDennis Zhou 		 * the scan_hint if it is larger than or equal and farther than
640382b88e9SDennis Zhou 		 * the current scan_hint.
641382b88e9SDennis Zhou 		 */
642382b88e9SDennis Zhou 		if ((start < block->contig_hint_start &&
643382b88e9SDennis Zhou 		     (contig > block->scan_hint ||
644382b88e9SDennis Zhou 		      (contig == block->scan_hint &&
645382b88e9SDennis Zhou 		       start > block->scan_hint_start)))) {
646382b88e9SDennis Zhou 			block->scan_hint_start = start;
647382b88e9SDennis Zhou 			block->scan_hint = contig;
648382b88e9SDennis Zhou 		}
649ca460b3cSDennis Zhou (Facebook) 	}
650ca460b3cSDennis Zhou (Facebook) }
651ca460b3cSDennis Zhou (Facebook) 
652b89462a9SDennis Zhou /*
653b89462a9SDennis Zhou  * pcpu_block_update_scan - update a block given a free area from a scan
654b89462a9SDennis Zhou  * @chunk: chunk of interest
655b89462a9SDennis Zhou  * @bit_off: chunk offset
656b89462a9SDennis Zhou  * @bits: size of free area
657b89462a9SDennis Zhou  *
658b89462a9SDennis Zhou  * Finding the final allocation spot first goes through pcpu_find_block_fit()
659b89462a9SDennis Zhou  * to find a block that can hold the allocation and then pcpu_alloc_area()
660b89462a9SDennis Zhou  * where a scan is used.  When allocations require specific alignments,
661b89462a9SDennis Zhou  * we can inadvertently create holes which will not be seen in the alloc
662b89462a9SDennis Zhou  * or free paths.
663b89462a9SDennis Zhou  *
664b89462a9SDennis Zhou  * This takes a given free area hole and updates a block as it may change the
665b89462a9SDennis Zhou  * scan_hint.  We need to scan backwards to ensure we don't miss free bits
666b89462a9SDennis Zhou  * from alignment.
667b89462a9SDennis Zhou  */
668b89462a9SDennis Zhou static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
669b89462a9SDennis Zhou 				   int bits)
670b89462a9SDennis Zhou {
671b89462a9SDennis Zhou 	int s_off = pcpu_off_to_block_off(bit_off);
672b89462a9SDennis Zhou 	int e_off = s_off + bits;
673b89462a9SDennis Zhou 	int s_index, l_bit;
674b89462a9SDennis Zhou 	struct pcpu_block_md *block;
675b89462a9SDennis Zhou 
676b89462a9SDennis Zhou 	if (e_off > PCPU_BITMAP_BLOCK_BITS)
677b89462a9SDennis Zhou 		return;
678b89462a9SDennis Zhou 
679b89462a9SDennis Zhou 	s_index = pcpu_off_to_block_index(bit_off);
680b89462a9SDennis Zhou 	block = chunk->md_blocks + s_index;
681b89462a9SDennis Zhou 
682b89462a9SDennis Zhou 	/* scan backwards in case of alignment skipping free bits */
683b89462a9SDennis Zhou 	l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
684b89462a9SDennis Zhou 	s_off = (s_off == l_bit) ? 0 : l_bit + 1;
685b89462a9SDennis Zhou 
686b89462a9SDennis Zhou 	pcpu_block_update(block, s_off, e_off);
687b89462a9SDennis Zhou }
688b89462a9SDennis Zhou 
689ca460b3cSDennis Zhou (Facebook) /**
69092c14cabSDennis Zhou  * pcpu_chunk_refresh_hint - updates metadata about a chunk
69192c14cabSDennis Zhou  * @chunk: chunk of interest
692d33d9f3dSDennis Zhou  * @full_scan: if we should scan from the beginning
69392c14cabSDennis Zhou  *
69492c14cabSDennis Zhou  * Iterates over the metadata blocks to find the largest contig area.
695d33d9f3dSDennis Zhou  * A full scan can be avoided on the allocation path as this is triggered
696d33d9f3dSDennis Zhou  * if we broke the contig_hint.  In doing so, the scan_hint will be before
697d33d9f3dSDennis Zhou  * the contig_hint or after if the scan_hint == contig_hint.  This cannot
698d33d9f3dSDennis Zhou  * be prevented on freeing as we want to find the largest area possibly
699d33d9f3dSDennis Zhou  * spanning blocks.
70092c14cabSDennis Zhou  */
701d33d9f3dSDennis Zhou static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
70292c14cabSDennis Zhou {
70392c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
70492c14cabSDennis Zhou 	int bit_off, bits;
70592c14cabSDennis Zhou 
706d33d9f3dSDennis Zhou 	/* promote scan_hint to contig_hint */
707d33d9f3dSDennis Zhou 	if (!full_scan && chunk_md->scan_hint) {
708d33d9f3dSDennis Zhou 		bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
709d33d9f3dSDennis Zhou 		chunk_md->contig_hint_start = chunk_md->scan_hint_start;
710d33d9f3dSDennis Zhou 		chunk_md->contig_hint = chunk_md->scan_hint;
711d33d9f3dSDennis Zhou 		chunk_md->scan_hint = 0;
712d33d9f3dSDennis Zhou 	} else {
71392c14cabSDennis Zhou 		bit_off = chunk_md->first_free;
714d33d9f3dSDennis Zhou 		chunk_md->contig_hint = 0;
715d33d9f3dSDennis Zhou 	}
716d33d9f3dSDennis Zhou 
71792c14cabSDennis Zhou 	bits = 0;
718e837dfdeSDennis Zhou 	pcpu_for_each_md_free_region(chunk, bit_off, bits)
71992c14cabSDennis Zhou 		pcpu_block_update(chunk_md, bit_off, bit_off + bits);
720ca460b3cSDennis Zhou (Facebook) }
721ca460b3cSDennis Zhou (Facebook) 
722ca460b3cSDennis Zhou (Facebook) /**
723ca460b3cSDennis Zhou (Facebook)  * pcpu_block_refresh_hint
724ca460b3cSDennis Zhou (Facebook)  * @chunk: chunk of interest
725ca460b3cSDennis Zhou (Facebook)  * @index: index of the metadata block
726ca460b3cSDennis Zhou (Facebook)  *
727ca460b3cSDennis Zhou (Facebook)  * Scans over the block beginning at first_free and updates the block
728ca460b3cSDennis Zhou (Facebook)  * metadata accordingly.
729ca460b3cSDennis Zhou (Facebook)  */
730ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
731ca460b3cSDennis Zhou (Facebook) {
732ca460b3cSDennis Zhou (Facebook) 	struct pcpu_block_md *block = chunk->md_blocks + index;
733ca460b3cSDennis Zhou (Facebook) 	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
734e837dfdeSDennis Zhou 	unsigned int rs, re, start;	/* region start, region end */
735ca460b3cSDennis Zhou (Facebook) 
736da3afdd5SDennis Zhou 	/* promote scan_hint to contig_hint */
737da3afdd5SDennis Zhou 	if (block->scan_hint) {
738da3afdd5SDennis Zhou 		start = block->scan_hint_start + block->scan_hint;
739da3afdd5SDennis Zhou 		block->contig_hint_start = block->scan_hint_start;
740da3afdd5SDennis Zhou 		block->contig_hint = block->scan_hint;
741da3afdd5SDennis Zhou 		block->scan_hint = 0;
742da3afdd5SDennis Zhou 	} else {
743da3afdd5SDennis Zhou 		start = block->first_free;
744ca460b3cSDennis Zhou (Facebook) 		block->contig_hint = 0;
745da3afdd5SDennis Zhou 	}
746da3afdd5SDennis Zhou 
747da3afdd5SDennis Zhou 	block->right_free = 0;
748ca460b3cSDennis Zhou (Facebook) 
749ca460b3cSDennis Zhou (Facebook) 	/* iterate over free areas and update the contig hints */
750e837dfdeSDennis Zhou 	bitmap_for_each_clear_region(alloc_map, rs, re, start,
751e837dfdeSDennis Zhou 				     PCPU_BITMAP_BLOCK_BITS)
752ca460b3cSDennis Zhou (Facebook) 		pcpu_block_update(block, rs, re);
753ca460b3cSDennis Zhou (Facebook) }
754ca460b3cSDennis Zhou (Facebook) 
755ca460b3cSDennis Zhou (Facebook) /**
756ca460b3cSDennis Zhou (Facebook)  * pcpu_block_update_hint_alloc - update hint on allocation path
757ca460b3cSDennis Zhou (Facebook)  * @chunk: chunk of interest
758ca460b3cSDennis Zhou (Facebook)  * @bit_off: chunk offset
759ca460b3cSDennis Zhou (Facebook)  * @bits: size of request
760fc304334SDennis Zhou (Facebook)  *
761fc304334SDennis Zhou (Facebook)  * Updates metadata for the allocation path.  The metadata only has to be
762fc304334SDennis Zhou (Facebook)  * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
763fc304334SDennis Zhou (Facebook)  * scans are required if the block's contig hint is broken.
764ca460b3cSDennis Zhou (Facebook)  */
765ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
766ca460b3cSDennis Zhou (Facebook) 					 int bits)
767ca460b3cSDennis Zhou (Facebook) {
76892c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
769b239f7daSDennis Zhou 	int nr_empty_pages = 0;
770ca460b3cSDennis Zhou (Facebook) 	struct pcpu_block_md *s_block, *e_block, *block;
771ca460b3cSDennis Zhou (Facebook) 	int s_index, e_index;	/* block indexes of the freed allocation */
772ca460b3cSDennis Zhou (Facebook) 	int s_off, e_off;	/* block offsets of the freed allocation */
773ca460b3cSDennis Zhou (Facebook) 
774ca460b3cSDennis Zhou (Facebook) 	/*
775ca460b3cSDennis Zhou (Facebook) 	 * Calculate per block offsets.
776ca460b3cSDennis Zhou (Facebook) 	 * The calculation uses an inclusive range, but the resulting offsets
777ca460b3cSDennis Zhou (Facebook) 	 * are [start, end).  e_index always points to the last block in the
778ca460b3cSDennis Zhou (Facebook) 	 * range.
779ca460b3cSDennis Zhou (Facebook) 	 */
780ca460b3cSDennis Zhou (Facebook) 	s_index = pcpu_off_to_block_index(bit_off);
781ca460b3cSDennis Zhou (Facebook) 	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
782ca460b3cSDennis Zhou (Facebook) 	s_off = pcpu_off_to_block_off(bit_off);
783ca460b3cSDennis Zhou (Facebook) 	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
784ca460b3cSDennis Zhou (Facebook) 
785ca460b3cSDennis Zhou (Facebook) 	s_block = chunk->md_blocks + s_index;
786ca460b3cSDennis Zhou (Facebook) 	e_block = chunk->md_blocks + e_index;
787ca460b3cSDennis Zhou (Facebook) 
788ca460b3cSDennis Zhou (Facebook) 	/*
789ca460b3cSDennis Zhou (Facebook) 	 * Update s_block.
790fc304334SDennis Zhou (Facebook) 	 * block->first_free must be updated if the allocation takes its place.
791fc304334SDennis Zhou (Facebook) 	 * If the allocation breaks the contig_hint, a scan is required to
792fc304334SDennis Zhou (Facebook) 	 * restore this hint.
793ca460b3cSDennis Zhou (Facebook) 	 */
794b239f7daSDennis Zhou 	if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
795b239f7daSDennis Zhou 		nr_empty_pages++;
796b239f7daSDennis Zhou 
797fc304334SDennis Zhou (Facebook) 	if (s_off == s_block->first_free)
798fc304334SDennis Zhou (Facebook) 		s_block->first_free = find_next_zero_bit(
799fc304334SDennis Zhou (Facebook) 					pcpu_index_alloc_map(chunk, s_index),
800fc304334SDennis Zhou (Facebook) 					PCPU_BITMAP_BLOCK_BITS,
801fc304334SDennis Zhou (Facebook) 					s_off + bits);
802fc304334SDennis Zhou (Facebook) 
803382b88e9SDennis Zhou 	if (pcpu_region_overlap(s_block->scan_hint_start,
804382b88e9SDennis Zhou 				s_block->scan_hint_start + s_block->scan_hint,
805382b88e9SDennis Zhou 				s_off,
806382b88e9SDennis Zhou 				s_off + bits))
807382b88e9SDennis Zhou 		s_block->scan_hint = 0;
808382b88e9SDennis Zhou 
809d9f3a01eSDennis Zhou 	if (pcpu_region_overlap(s_block->contig_hint_start,
810d9f3a01eSDennis Zhou 				s_block->contig_hint_start +
811d9f3a01eSDennis Zhou 				s_block->contig_hint,
812d9f3a01eSDennis Zhou 				s_off,
813d9f3a01eSDennis Zhou 				s_off + bits)) {
814fc304334SDennis Zhou (Facebook) 		/* block contig hint is broken - scan to fix it */
815da3afdd5SDennis Zhou 		if (!s_off)
816da3afdd5SDennis Zhou 			s_block->left_free = 0;
817ca460b3cSDennis Zhou (Facebook) 		pcpu_block_refresh_hint(chunk, s_index);
818fc304334SDennis Zhou (Facebook) 	} else {
819fc304334SDennis Zhou (Facebook) 		/* update left and right contig manually */
820fc304334SDennis Zhou (Facebook) 		s_block->left_free = min(s_block->left_free, s_off);
821fc304334SDennis Zhou (Facebook) 		if (s_index == e_index)
822fc304334SDennis Zhou (Facebook) 			s_block->right_free = min_t(int, s_block->right_free,
823fc304334SDennis Zhou (Facebook) 					PCPU_BITMAP_BLOCK_BITS - e_off);
824fc304334SDennis Zhou (Facebook) 		else
825fc304334SDennis Zhou (Facebook) 			s_block->right_free = 0;
826fc304334SDennis Zhou (Facebook) 	}
827ca460b3cSDennis Zhou (Facebook) 
828ca460b3cSDennis Zhou (Facebook) 	/*
829ca460b3cSDennis Zhou (Facebook) 	 * Update e_block.
830ca460b3cSDennis Zhou (Facebook) 	 */
831ca460b3cSDennis Zhou (Facebook) 	if (s_index != e_index) {
832b239f7daSDennis Zhou 		if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
833b239f7daSDennis Zhou 			nr_empty_pages++;
834b239f7daSDennis Zhou 
835fc304334SDennis Zhou (Facebook) 		/*
836fc304334SDennis Zhou (Facebook) 		 * When the allocation is across blocks, the end is along
837fc304334SDennis Zhou (Facebook) 		 * the left part of the e_block.
838fc304334SDennis Zhou (Facebook) 		 */
839fc304334SDennis Zhou (Facebook) 		e_block->first_free = find_next_zero_bit(
840fc304334SDennis Zhou (Facebook) 				pcpu_index_alloc_map(chunk, e_index),
841fc304334SDennis Zhou (Facebook) 				PCPU_BITMAP_BLOCK_BITS, e_off);
842fc304334SDennis Zhou (Facebook) 
843fc304334SDennis Zhou (Facebook) 		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
844fc304334SDennis Zhou (Facebook) 			/* reset the block */
845fc304334SDennis Zhou (Facebook) 			e_block++;
846fc304334SDennis Zhou (Facebook) 		} else {
847382b88e9SDennis Zhou 			if (e_off > e_block->scan_hint_start)
848382b88e9SDennis Zhou 				e_block->scan_hint = 0;
849382b88e9SDennis Zhou 
850da3afdd5SDennis Zhou 			e_block->left_free = 0;
851fc304334SDennis Zhou (Facebook) 			if (e_off > e_block->contig_hint_start) {
852fc304334SDennis Zhou (Facebook) 				/* contig hint is broken - scan to fix it */
853ca460b3cSDennis Zhou (Facebook) 				pcpu_block_refresh_hint(chunk, e_index);
854fc304334SDennis Zhou (Facebook) 			} else {
855fc304334SDennis Zhou (Facebook) 				e_block->right_free =
856fc304334SDennis Zhou (Facebook) 					min_t(int, e_block->right_free,
857fc304334SDennis Zhou (Facebook) 					      PCPU_BITMAP_BLOCK_BITS - e_off);
858fc304334SDennis Zhou (Facebook) 			}
859fc304334SDennis Zhou (Facebook) 		}
860ca460b3cSDennis Zhou (Facebook) 
861ca460b3cSDennis Zhou (Facebook) 		/* update in-between md_blocks */
862b239f7daSDennis Zhou 		nr_empty_pages += (e_index - s_index - 1);
863ca460b3cSDennis Zhou (Facebook) 		for (block = s_block + 1; block < e_block; block++) {
864382b88e9SDennis Zhou 			block->scan_hint = 0;
865ca460b3cSDennis Zhou (Facebook) 			block->contig_hint = 0;
866ca460b3cSDennis Zhou (Facebook) 			block->left_free = 0;
867ca460b3cSDennis Zhou (Facebook) 			block->right_free = 0;
868ca460b3cSDennis Zhou (Facebook) 		}
869ca460b3cSDennis Zhou (Facebook) 	}
870ca460b3cSDennis Zhou (Facebook) 
871b239f7daSDennis Zhou 	if (nr_empty_pages)
872b239f7daSDennis Zhou 		pcpu_update_empty_pages(chunk, -nr_empty_pages);
873b239f7daSDennis Zhou 
874d33d9f3dSDennis Zhou 	if (pcpu_region_overlap(chunk_md->scan_hint_start,
875d33d9f3dSDennis Zhou 				chunk_md->scan_hint_start +
876d33d9f3dSDennis Zhou 				chunk_md->scan_hint,
877d33d9f3dSDennis Zhou 				bit_off,
878d33d9f3dSDennis Zhou 				bit_off + bits))
879d33d9f3dSDennis Zhou 		chunk_md->scan_hint = 0;
880d33d9f3dSDennis Zhou 
881fc304334SDennis Zhou (Facebook) 	/*
882fc304334SDennis Zhou (Facebook) 	 * The only time a full chunk scan is required is if the chunk
883fc304334SDennis Zhou (Facebook) 	 * contig hint is broken.  Otherwise, it means a smaller space
884fc304334SDennis Zhou (Facebook) 	 * was used and therefore the chunk contig hint is still correct.
885fc304334SDennis Zhou (Facebook) 	 */
88692c14cabSDennis Zhou 	if (pcpu_region_overlap(chunk_md->contig_hint_start,
88792c14cabSDennis Zhou 				chunk_md->contig_hint_start +
88892c14cabSDennis Zhou 				chunk_md->contig_hint,
889d9f3a01eSDennis Zhou 				bit_off,
890d9f3a01eSDennis Zhou 				bit_off + bits))
891d33d9f3dSDennis Zhou 		pcpu_chunk_refresh_hint(chunk, false);
892ca460b3cSDennis Zhou (Facebook) }
893ca460b3cSDennis Zhou (Facebook) 
894ca460b3cSDennis Zhou (Facebook) /**
895ca460b3cSDennis Zhou (Facebook)  * pcpu_block_update_hint_free - updates the block hints on the free path
896ca460b3cSDennis Zhou (Facebook)  * @chunk: chunk of interest
897ca460b3cSDennis Zhou (Facebook)  * @bit_off: chunk offset
898ca460b3cSDennis Zhou (Facebook)  * @bits: size of request
899b185cd0dSDennis Zhou (Facebook)  *
900b185cd0dSDennis Zhou (Facebook)  * Updates metadata for the allocation path.  This avoids a blind block
901b185cd0dSDennis Zhou (Facebook)  * refresh by making use of the block contig hints.  If this fails, it scans
902b185cd0dSDennis Zhou (Facebook)  * forward and backward to determine the extent of the free area.  This is
903b185cd0dSDennis Zhou (Facebook)  * capped at the boundary of blocks.
904b185cd0dSDennis Zhou (Facebook)  *
905b185cd0dSDennis Zhou (Facebook)  * A chunk update is triggered if a page becomes free, a block becomes free,
906b185cd0dSDennis Zhou (Facebook)  * or the free spans across blocks.  This tradeoff is to minimize iterating
90792c14cabSDennis Zhou  * over the block metadata to update chunk_md->contig_hint.
90892c14cabSDennis Zhou  * chunk_md->contig_hint may be off by up to a page, but it will never be more
90992c14cabSDennis Zhou  * than the available space.  If the contig hint is contained in one block, it
91092c14cabSDennis Zhou  * will be accurate.
911ca460b3cSDennis Zhou (Facebook)  */
912ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
913ca460b3cSDennis Zhou (Facebook) 					int bits)
914ca460b3cSDennis Zhou (Facebook) {
915b239f7daSDennis Zhou 	int nr_empty_pages = 0;
916ca460b3cSDennis Zhou (Facebook) 	struct pcpu_block_md *s_block, *e_block, *block;
917ca460b3cSDennis Zhou (Facebook) 	int s_index, e_index;	/* block indexes of the freed allocation */
918ca460b3cSDennis Zhou (Facebook) 	int s_off, e_off;	/* block offsets of the freed allocation */
919b185cd0dSDennis Zhou (Facebook) 	int start, end;		/* start and end of the whole free area */
920ca460b3cSDennis Zhou (Facebook) 
921ca460b3cSDennis Zhou (Facebook) 	/*
922ca460b3cSDennis Zhou (Facebook) 	 * Calculate per block offsets.
923ca460b3cSDennis Zhou (Facebook) 	 * The calculation uses an inclusive range, but the resulting offsets
924ca460b3cSDennis Zhou (Facebook) 	 * are [start, end).  e_index always points to the last block in the
925ca460b3cSDennis Zhou (Facebook) 	 * range.
926ca460b3cSDennis Zhou (Facebook) 	 */
927ca460b3cSDennis Zhou (Facebook) 	s_index = pcpu_off_to_block_index(bit_off);
928ca460b3cSDennis Zhou (Facebook) 	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
929ca460b3cSDennis Zhou (Facebook) 	s_off = pcpu_off_to_block_off(bit_off);
930ca460b3cSDennis Zhou (Facebook) 	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
931ca460b3cSDennis Zhou (Facebook) 
932ca460b3cSDennis Zhou (Facebook) 	s_block = chunk->md_blocks + s_index;
933ca460b3cSDennis Zhou (Facebook) 	e_block = chunk->md_blocks + e_index;
934ca460b3cSDennis Zhou (Facebook) 
935b185cd0dSDennis Zhou (Facebook) 	/*
936b185cd0dSDennis Zhou (Facebook) 	 * Check if the freed area aligns with the block->contig_hint.
937b185cd0dSDennis Zhou (Facebook) 	 * If it does, then the scan to find the beginning/end of the
938b185cd0dSDennis Zhou (Facebook) 	 * larger free area can be avoided.
939b185cd0dSDennis Zhou (Facebook) 	 *
940b185cd0dSDennis Zhou (Facebook) 	 * start and end refer to beginning and end of the free area
941b185cd0dSDennis Zhou (Facebook) 	 * within each their respective blocks.  This is not necessarily
942b185cd0dSDennis Zhou (Facebook) 	 * the entire free area as it may span blocks past the beginning
943b185cd0dSDennis Zhou (Facebook) 	 * or end of the block.
944b185cd0dSDennis Zhou (Facebook) 	 */
945b185cd0dSDennis Zhou (Facebook) 	start = s_off;
946b185cd0dSDennis Zhou (Facebook) 	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
947b185cd0dSDennis Zhou (Facebook) 		start = s_block->contig_hint_start;
948b185cd0dSDennis Zhou (Facebook) 	} else {
949b185cd0dSDennis Zhou (Facebook) 		/*
950b185cd0dSDennis Zhou (Facebook) 		 * Scan backwards to find the extent of the free area.
951b185cd0dSDennis Zhou (Facebook) 		 * find_last_bit returns the starting bit, so if the start bit
952b185cd0dSDennis Zhou (Facebook) 		 * is returned, that means there was no last bit and the
953b185cd0dSDennis Zhou (Facebook) 		 * remainder of the chunk is free.
954b185cd0dSDennis Zhou (Facebook) 		 */
955b185cd0dSDennis Zhou (Facebook) 		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
956b185cd0dSDennis Zhou (Facebook) 					  start);
957b185cd0dSDennis Zhou (Facebook) 		start = (start == l_bit) ? 0 : l_bit + 1;
958b185cd0dSDennis Zhou (Facebook) 	}
959b185cd0dSDennis Zhou (Facebook) 
960b185cd0dSDennis Zhou (Facebook) 	end = e_off;
961b185cd0dSDennis Zhou (Facebook) 	if (e_off == e_block->contig_hint_start)
962b185cd0dSDennis Zhou (Facebook) 		end = e_block->contig_hint_start + e_block->contig_hint;
963b185cd0dSDennis Zhou (Facebook) 	else
964b185cd0dSDennis Zhou (Facebook) 		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
965b185cd0dSDennis Zhou (Facebook) 				    PCPU_BITMAP_BLOCK_BITS, end);
966b185cd0dSDennis Zhou (Facebook) 
967ca460b3cSDennis Zhou (Facebook) 	/* update s_block */
968b185cd0dSDennis Zhou (Facebook) 	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
969b239f7daSDennis Zhou 	if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
970b239f7daSDennis Zhou 		nr_empty_pages++;
971b185cd0dSDennis Zhou (Facebook) 	pcpu_block_update(s_block, start, e_off);
972ca460b3cSDennis Zhou (Facebook) 
973ca460b3cSDennis Zhou (Facebook) 	/* freeing in the same block */
974ca460b3cSDennis Zhou (Facebook) 	if (s_index != e_index) {
975ca460b3cSDennis Zhou (Facebook) 		/* update e_block */
976b239f7daSDennis Zhou 		if (end == PCPU_BITMAP_BLOCK_BITS)
977b239f7daSDennis Zhou 			nr_empty_pages++;
978b185cd0dSDennis Zhou (Facebook) 		pcpu_block_update(e_block, 0, end);
979ca460b3cSDennis Zhou (Facebook) 
980ca460b3cSDennis Zhou (Facebook) 		/* reset md_blocks in the middle */
981b239f7daSDennis Zhou 		nr_empty_pages += (e_index - s_index - 1);
982ca460b3cSDennis Zhou (Facebook) 		for (block = s_block + 1; block < e_block; block++) {
983ca460b3cSDennis Zhou (Facebook) 			block->first_free = 0;
984382b88e9SDennis Zhou 			block->scan_hint = 0;
985ca460b3cSDennis Zhou (Facebook) 			block->contig_hint_start = 0;
986ca460b3cSDennis Zhou (Facebook) 			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
987ca460b3cSDennis Zhou (Facebook) 			block->left_free = PCPU_BITMAP_BLOCK_BITS;
988ca460b3cSDennis Zhou (Facebook) 			block->right_free = PCPU_BITMAP_BLOCK_BITS;
989ca460b3cSDennis Zhou (Facebook) 		}
990ca460b3cSDennis Zhou (Facebook) 	}
991ca460b3cSDennis Zhou (Facebook) 
992b239f7daSDennis Zhou 	if (nr_empty_pages)
993b239f7daSDennis Zhou 		pcpu_update_empty_pages(chunk, nr_empty_pages);
994b239f7daSDennis Zhou 
995b185cd0dSDennis Zhou (Facebook) 	/*
996b239f7daSDennis Zhou 	 * Refresh chunk metadata when the free makes a block free or spans
997b239f7daSDennis Zhou 	 * across blocks.  The contig_hint may be off by up to a page, but if
998b239f7daSDennis Zhou 	 * the contig_hint is contained in a block, it will be accurate with
999b239f7daSDennis Zhou 	 * the else condition below.
1000b185cd0dSDennis Zhou (Facebook) 	 */
1001b239f7daSDennis Zhou 	if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1002d33d9f3dSDennis Zhou 		pcpu_chunk_refresh_hint(chunk, true);
1003b185cd0dSDennis Zhou (Facebook) 	else
100492c14cabSDennis Zhou 		pcpu_block_update(&chunk->chunk_md,
100592c14cabSDennis Zhou 				  pcpu_block_off_to_off(s_index, start),
100692c14cabSDennis Zhou 				  end);
1007ca460b3cSDennis Zhou (Facebook) }
1008ca460b3cSDennis Zhou (Facebook) 
1009ca460b3cSDennis Zhou (Facebook) /**
101040064aecSDennis Zhou (Facebook)  * pcpu_is_populated - determines if the region is populated
101140064aecSDennis Zhou (Facebook)  * @chunk: chunk of interest
101240064aecSDennis Zhou (Facebook)  * @bit_off: chunk offset
101340064aecSDennis Zhou (Facebook)  * @bits: size of area
101440064aecSDennis Zhou (Facebook)  * @next_off: return value for the next offset to start searching
101540064aecSDennis Zhou (Facebook)  *
101640064aecSDennis Zhou (Facebook)  * For atomic allocations, check if the backing pages are populated.
101740064aecSDennis Zhou (Facebook)  *
101840064aecSDennis Zhou (Facebook)  * RETURNS:
101940064aecSDennis Zhou (Facebook)  * Bool if the backing pages are populated.
102040064aecSDennis Zhou (Facebook)  * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
102140064aecSDennis Zhou (Facebook)  */
102240064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
102340064aecSDennis Zhou (Facebook) 			      int *next_off)
102440064aecSDennis Zhou (Facebook) {
1025e837dfdeSDennis Zhou 	unsigned int page_start, page_end, rs, re;
102640064aecSDennis Zhou (Facebook) 
102740064aecSDennis Zhou (Facebook) 	page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
102840064aecSDennis Zhou (Facebook) 	page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
102940064aecSDennis Zhou (Facebook) 
103040064aecSDennis Zhou (Facebook) 	rs = page_start;
1031e837dfdeSDennis Zhou 	bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
103240064aecSDennis Zhou (Facebook) 	if (rs >= page_end)
103340064aecSDennis Zhou (Facebook) 		return true;
103440064aecSDennis Zhou (Facebook) 
103540064aecSDennis Zhou (Facebook) 	*next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
103640064aecSDennis Zhou (Facebook) 	return false;
103740064aecSDennis Zhou (Facebook) }
103840064aecSDennis Zhou (Facebook) 
103940064aecSDennis Zhou (Facebook) /**
104040064aecSDennis Zhou (Facebook)  * pcpu_find_block_fit - finds the block index to start searching
104140064aecSDennis Zhou (Facebook)  * @chunk: chunk of interest
104240064aecSDennis Zhou (Facebook)  * @alloc_bits: size of request in allocation units
104340064aecSDennis Zhou (Facebook)  * @align: alignment of area (max PAGE_SIZE bytes)
104440064aecSDennis Zhou (Facebook)  * @pop_only: use populated regions only
104540064aecSDennis Zhou (Facebook)  *
1046b4c2116cSDennis Zhou (Facebook)  * Given a chunk and an allocation spec, find the offset to begin searching
1047b4c2116cSDennis Zhou (Facebook)  * for a free region.  This iterates over the bitmap metadata blocks to
1048b4c2116cSDennis Zhou (Facebook)  * find an offset that will be guaranteed to fit the requirements.  It is
1049b4c2116cSDennis Zhou (Facebook)  * not quite first fit as if the allocation does not fit in the contig hint
1050b4c2116cSDennis Zhou (Facebook)  * of a block or chunk, it is skipped.  This errs on the side of caution
1051b4c2116cSDennis Zhou (Facebook)  * to prevent excess iteration.  Poor alignment can cause the allocator to
1052b4c2116cSDennis Zhou (Facebook)  * skip over blocks and chunks that have valid free areas.
1053b4c2116cSDennis Zhou (Facebook)  *
105440064aecSDennis Zhou (Facebook)  * RETURNS:
105540064aecSDennis Zhou (Facebook)  * The offset in the bitmap to begin searching.
105640064aecSDennis Zhou (Facebook)  * -1 if no offset is found.
105740064aecSDennis Zhou (Facebook)  */
105840064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
105940064aecSDennis Zhou (Facebook) 			       size_t align, bool pop_only)
106040064aecSDennis Zhou (Facebook) {
106192c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1062b4c2116cSDennis Zhou (Facebook) 	int bit_off, bits, next_off;
106340064aecSDennis Zhou (Facebook) 
106413f96637SDennis Zhou (Facebook) 	/*
106513f96637SDennis Zhou (Facebook) 	 * Check to see if the allocation can fit in the chunk's contig hint.
106613f96637SDennis Zhou (Facebook) 	 * This is an optimization to prevent scanning by assuming if it
106713f96637SDennis Zhou (Facebook) 	 * cannot fit in the global hint, there is memory pressure and creating
106813f96637SDennis Zhou (Facebook) 	 * a new chunk would happen soon.
106913f96637SDennis Zhou (Facebook) 	 */
107092c14cabSDennis Zhou 	bit_off = ALIGN(chunk_md->contig_hint_start, align) -
107192c14cabSDennis Zhou 		  chunk_md->contig_hint_start;
107292c14cabSDennis Zhou 	if (bit_off + alloc_bits > chunk_md->contig_hint)
107313f96637SDennis Zhou (Facebook) 		return -1;
107413f96637SDennis Zhou (Facebook) 
1075d33d9f3dSDennis Zhou 	bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1076b4c2116cSDennis Zhou (Facebook) 	bits = 0;
1077b4c2116cSDennis Zhou (Facebook) 	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
107840064aecSDennis Zhou (Facebook) 		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1079b4c2116cSDennis Zhou (Facebook) 						   &next_off))
108040064aecSDennis Zhou (Facebook) 			break;
108140064aecSDennis Zhou (Facebook) 
1082b4c2116cSDennis Zhou (Facebook) 		bit_off = next_off;
108340064aecSDennis Zhou (Facebook) 		bits = 0;
108440064aecSDennis Zhou (Facebook) 	}
108540064aecSDennis Zhou (Facebook) 
108640064aecSDennis Zhou (Facebook) 	if (bit_off == pcpu_chunk_map_bits(chunk))
108740064aecSDennis Zhou (Facebook) 		return -1;
108840064aecSDennis Zhou (Facebook) 
108940064aecSDennis Zhou (Facebook) 	return bit_off;
109040064aecSDennis Zhou (Facebook) }
109140064aecSDennis Zhou (Facebook) 
1092b89462a9SDennis Zhou /*
1093b89462a9SDennis Zhou  * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1094b89462a9SDennis Zhou  * @map: the address to base the search on
1095b89462a9SDennis Zhou  * @size: the bitmap size in bits
1096b89462a9SDennis Zhou  * @start: the bitnumber to start searching at
1097b89462a9SDennis Zhou  * @nr: the number of zeroed bits we're looking for
1098b89462a9SDennis Zhou  * @align_mask: alignment mask for zero area
1099b89462a9SDennis Zhou  * @largest_off: offset of the largest area skipped
1100b89462a9SDennis Zhou  * @largest_bits: size of the largest area skipped
1101b89462a9SDennis Zhou  *
1102b89462a9SDennis Zhou  * The @align_mask should be one less than a power of 2.
1103b89462a9SDennis Zhou  *
1104b89462a9SDennis Zhou  * This is a modified version of bitmap_find_next_zero_area_off() to remember
1105b89462a9SDennis Zhou  * the largest area that was skipped.  This is imperfect, but in general is
1106b89462a9SDennis Zhou  * good enough.  The largest remembered region is the largest failed region
1107b89462a9SDennis Zhou  * seen.  This does not include anything we possibly skipped due to alignment.
1108b89462a9SDennis Zhou  * pcpu_block_update_scan() does scan backwards to try and recover what was
1109b89462a9SDennis Zhou  * lost to alignment.  While this can cause scanning to miss earlier possible
1110b89462a9SDennis Zhou  * free areas, smaller allocations will eventually fill those holes.
1111b89462a9SDennis Zhou  */
1112b89462a9SDennis Zhou static unsigned long pcpu_find_zero_area(unsigned long *map,
1113b89462a9SDennis Zhou 					 unsigned long size,
1114b89462a9SDennis Zhou 					 unsigned long start,
1115b89462a9SDennis Zhou 					 unsigned long nr,
1116b89462a9SDennis Zhou 					 unsigned long align_mask,
1117b89462a9SDennis Zhou 					 unsigned long *largest_off,
1118b89462a9SDennis Zhou 					 unsigned long *largest_bits)
1119b89462a9SDennis Zhou {
1120b89462a9SDennis Zhou 	unsigned long index, end, i, area_off, area_bits;
1121b89462a9SDennis Zhou again:
1122b89462a9SDennis Zhou 	index = find_next_zero_bit(map, size, start);
1123b89462a9SDennis Zhou 
1124b89462a9SDennis Zhou 	/* Align allocation */
1125b89462a9SDennis Zhou 	index = __ALIGN_MASK(index, align_mask);
1126b89462a9SDennis Zhou 	area_off = index;
1127b89462a9SDennis Zhou 
1128b89462a9SDennis Zhou 	end = index + nr;
1129b89462a9SDennis Zhou 	if (end > size)
1130b89462a9SDennis Zhou 		return end;
1131b89462a9SDennis Zhou 	i = find_next_bit(map, end, index);
1132b89462a9SDennis Zhou 	if (i < end) {
1133b89462a9SDennis Zhou 		area_bits = i - area_off;
1134b89462a9SDennis Zhou 		/* remember largest unused area with best alignment */
1135b89462a9SDennis Zhou 		if (area_bits > *largest_bits ||
1136b89462a9SDennis Zhou 		    (area_bits == *largest_bits && *largest_off &&
1137b89462a9SDennis Zhou 		     (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1138b89462a9SDennis Zhou 			*largest_off = area_off;
1139b89462a9SDennis Zhou 			*largest_bits = area_bits;
1140b89462a9SDennis Zhou 		}
1141b89462a9SDennis Zhou 
1142b89462a9SDennis Zhou 		start = i + 1;
1143b89462a9SDennis Zhou 		goto again;
1144b89462a9SDennis Zhou 	}
1145b89462a9SDennis Zhou 	return index;
1146b89462a9SDennis Zhou }
1147b89462a9SDennis Zhou 
114840064aecSDennis Zhou (Facebook) /**
114940064aecSDennis Zhou (Facebook)  * pcpu_alloc_area - allocates an area from a pcpu_chunk
115040064aecSDennis Zhou (Facebook)  * @chunk: chunk of interest
115140064aecSDennis Zhou (Facebook)  * @alloc_bits: size of request in allocation units
115240064aecSDennis Zhou (Facebook)  * @align: alignment of area (max PAGE_SIZE)
115340064aecSDennis Zhou (Facebook)  * @start: bit_off to start searching
115440064aecSDennis Zhou (Facebook)  *
115540064aecSDennis Zhou (Facebook)  * This function takes in a @start offset to begin searching to fit an
1156b4c2116cSDennis Zhou (Facebook)  * allocation of @alloc_bits with alignment @align.  It needs to scan
1157b4c2116cSDennis Zhou (Facebook)  * the allocation map because if it fits within the block's contig hint,
1158b4c2116cSDennis Zhou (Facebook)  * @start will be block->first_free. This is an attempt to fill the
1159b4c2116cSDennis Zhou (Facebook)  * allocation prior to breaking the contig hint.  The allocation and
1160b4c2116cSDennis Zhou (Facebook)  * boundary maps are updated accordingly if it confirms a valid
1161b4c2116cSDennis Zhou (Facebook)  * free area.
116240064aecSDennis Zhou (Facebook)  *
116340064aecSDennis Zhou (Facebook)  * RETURNS:
116440064aecSDennis Zhou (Facebook)  * Allocated addr offset in @chunk on success.
116540064aecSDennis Zhou (Facebook)  * -1 if no matching area is found.
116640064aecSDennis Zhou (Facebook)  */
116740064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
116840064aecSDennis Zhou (Facebook) 			   size_t align, int start)
116940064aecSDennis Zhou (Facebook) {
117092c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
117140064aecSDennis Zhou (Facebook) 	size_t align_mask = (align) ? (align - 1) : 0;
1172b89462a9SDennis Zhou 	unsigned long area_off = 0, area_bits = 0;
117340064aecSDennis Zhou (Facebook) 	int bit_off, end, oslot;
11749f7dcf22STejun Heo 
11754f996e23STejun Heo 	lockdep_assert_held(&pcpu_lock);
11764f996e23STejun Heo 
117740064aecSDennis Zhou (Facebook) 	oslot = pcpu_chunk_slot(chunk);
1178833af842STejun Heo 
1179833af842STejun Heo 	/*
118040064aecSDennis Zhou (Facebook) 	 * Search to find a fit.
1181833af842STejun Heo 	 */
11828c43004aSDennis Zhou 	end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
11838c43004aSDennis Zhou 		    pcpu_chunk_map_bits(chunk));
1184b89462a9SDennis Zhou 	bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1185b89462a9SDennis Zhou 				      align_mask, &area_off, &area_bits);
118640064aecSDennis Zhou (Facebook) 	if (bit_off >= end)
1187a16037c8STejun Heo 		return -1;
1188a16037c8STejun Heo 
1189b89462a9SDennis Zhou 	if (area_bits)
1190b89462a9SDennis Zhou 		pcpu_block_update_scan(chunk, area_off, area_bits);
1191b89462a9SDennis Zhou 
119240064aecSDennis Zhou (Facebook) 	/* update alloc map */
119340064aecSDennis Zhou (Facebook) 	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1194a16037c8STejun Heo 
119540064aecSDennis Zhou (Facebook) 	/* update boundary map */
119640064aecSDennis Zhou (Facebook) 	set_bit(bit_off, chunk->bound_map);
119740064aecSDennis Zhou (Facebook) 	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
119840064aecSDennis Zhou (Facebook) 	set_bit(bit_off + alloc_bits, chunk->bound_map);
1199a16037c8STejun Heo 
120040064aecSDennis Zhou (Facebook) 	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
120140064aecSDennis Zhou (Facebook) 
120286b442fbSDennis Zhou (Facebook) 	/* update first free bit */
120392c14cabSDennis Zhou 	if (bit_off == chunk_md->first_free)
120492c14cabSDennis Zhou 		chunk_md->first_free = find_next_zero_bit(
120586b442fbSDennis Zhou (Facebook) 					chunk->alloc_map,
120686b442fbSDennis Zhou (Facebook) 					pcpu_chunk_map_bits(chunk),
120786b442fbSDennis Zhou (Facebook) 					bit_off + alloc_bits);
120886b442fbSDennis Zhou (Facebook) 
1209ca460b3cSDennis Zhou (Facebook) 	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
121040064aecSDennis Zhou (Facebook) 
121140064aecSDennis Zhou (Facebook) 	pcpu_chunk_relocate(chunk, oslot);
121240064aecSDennis Zhou (Facebook) 
121340064aecSDennis Zhou (Facebook) 	return bit_off * PCPU_MIN_ALLOC_SIZE;
1214a16037c8STejun Heo }
1215a16037c8STejun Heo 
1216a16037c8STejun Heo /**
121740064aecSDennis Zhou (Facebook)  * pcpu_free_area - frees the corresponding offset
1218fbf59bc9STejun Heo  * @chunk: chunk of interest
121940064aecSDennis Zhou (Facebook)  * @off: addr offset into chunk
1220fbf59bc9STejun Heo  *
122140064aecSDennis Zhou (Facebook)  * This function determines the size of an allocation to free using
122240064aecSDennis Zhou (Facebook)  * the boundary bitmap and clears the allocation map.
12235b32af91SRoman Gushchin  *
12245b32af91SRoman Gushchin  * RETURNS:
12255b32af91SRoman Gushchin  * Number of freed bytes.
1226fbf59bc9STejun Heo  */
12275b32af91SRoman Gushchin static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1228fbf59bc9STejun Heo {
122992c14cabSDennis Zhou 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
12305b32af91SRoman Gushchin 	int bit_off, bits, end, oslot, freed;
1231fbf59bc9STejun Heo 
12325ccd30e4SDennis Zhou 	lockdep_assert_held(&pcpu_lock);
123330a5b536SDennis Zhou 	pcpu_stats_area_dealloc(chunk);
12345ccd30e4SDennis Zhou 
123540064aecSDennis Zhou (Facebook) 	oslot = pcpu_chunk_slot(chunk);
1236723ad1d9SAl Viro 
123740064aecSDennis Zhou (Facebook) 	bit_off = off / PCPU_MIN_ALLOC_SIZE;
1238fbf59bc9STejun Heo 
123940064aecSDennis Zhou (Facebook) 	/* find end index */
124040064aecSDennis Zhou (Facebook) 	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
124140064aecSDennis Zhou (Facebook) 			    bit_off + 1);
124240064aecSDennis Zhou (Facebook) 	bits = end - bit_off;
124340064aecSDennis Zhou (Facebook) 	bitmap_clear(chunk->alloc_map, bit_off, bits);
12443d331ad7SAl Viro 
12455b32af91SRoman Gushchin 	freed = bits * PCPU_MIN_ALLOC_SIZE;
12465b32af91SRoman Gushchin 
124740064aecSDennis Zhou (Facebook) 	/* update metadata */
12485b32af91SRoman Gushchin 	chunk->free_bytes += freed;
1249fbf59bc9STejun Heo 
125086b442fbSDennis Zhou (Facebook) 	/* update first free bit */
125192c14cabSDennis Zhou 	chunk_md->first_free = min(chunk_md->first_free, bit_off);
125286b442fbSDennis Zhou (Facebook) 
1253ca460b3cSDennis Zhou (Facebook) 	pcpu_block_update_hint_free(chunk, bit_off, bits);
1254b539b87fSTejun Heo 
1255fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, oslot);
12565b32af91SRoman Gushchin 
12575b32af91SRoman Gushchin 	return freed;
1258fbf59bc9STejun Heo }
1259fbf59bc9STejun Heo 
1260047924c9SDennis Zhou static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1261047924c9SDennis Zhou {
1262047924c9SDennis Zhou 	block->scan_hint = 0;
1263047924c9SDennis Zhou 	block->contig_hint = nr_bits;
1264047924c9SDennis Zhou 	block->left_free = nr_bits;
1265047924c9SDennis Zhou 	block->right_free = nr_bits;
1266047924c9SDennis Zhou 	block->first_free = 0;
1267047924c9SDennis Zhou 	block->nr_bits = nr_bits;
1268047924c9SDennis Zhou }
1269047924c9SDennis Zhou 
1270ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1271ca460b3cSDennis Zhou (Facebook) {
1272ca460b3cSDennis Zhou (Facebook) 	struct pcpu_block_md *md_block;
1273ca460b3cSDennis Zhou (Facebook) 
127492c14cabSDennis Zhou 	/* init the chunk's block */
127592c14cabSDennis Zhou 	pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
127692c14cabSDennis Zhou 
1277ca460b3cSDennis Zhou (Facebook) 	for (md_block = chunk->md_blocks;
1278ca460b3cSDennis Zhou (Facebook) 	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1279047924c9SDennis Zhou 	     md_block++)
1280047924c9SDennis Zhou 		pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1281ca460b3cSDennis Zhou (Facebook) }
1282ca460b3cSDennis Zhou (Facebook) 
128340064aecSDennis Zhou (Facebook) /**
128440064aecSDennis Zhou (Facebook)  * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
128540064aecSDennis Zhou (Facebook)  * @tmp_addr: the start of the region served
128640064aecSDennis Zhou (Facebook)  * @map_size: size of the region served
128740064aecSDennis Zhou (Facebook)  *
128840064aecSDennis Zhou (Facebook)  * This is responsible for creating the chunks that serve the first chunk.  The
128940064aecSDennis Zhou (Facebook)  * base_addr is page aligned down of @tmp_addr while the region end is page
129040064aecSDennis Zhou (Facebook)  * aligned up.  Offsets are kept track of to determine the region served. All
129140064aecSDennis Zhou (Facebook)  * this is done to appease the bitmap allocator in avoiding partial blocks.
129240064aecSDennis Zhou (Facebook)  *
129340064aecSDennis Zhou (Facebook)  * RETURNS:
129440064aecSDennis Zhou (Facebook)  * Chunk serving the region at @tmp_addr of @map_size.
129540064aecSDennis Zhou (Facebook)  */
1296c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
129740064aecSDennis Zhou (Facebook) 							 int map_size)
129810edf5b0SDennis Zhou (Facebook) {
129910edf5b0SDennis Zhou (Facebook) 	struct pcpu_chunk *chunk;
1300ca460b3cSDennis Zhou (Facebook) 	unsigned long aligned_addr, lcm_align;
130140064aecSDennis Zhou (Facebook) 	int start_offset, offset_bits, region_size, region_bits;
1302f655f405SMike Rapoport 	size_t alloc_size;
1303c0ebfdc3SDennis Zhou (Facebook) 
1304c0ebfdc3SDennis Zhou (Facebook) 	/* region calculations */
1305c0ebfdc3SDennis Zhou (Facebook) 	aligned_addr = tmp_addr & PAGE_MASK;
1306c0ebfdc3SDennis Zhou (Facebook) 
1307c0ebfdc3SDennis Zhou (Facebook) 	start_offset = tmp_addr - aligned_addr;
13086b9d7c8eSDennis Zhou (Facebook) 
1309ca460b3cSDennis Zhou (Facebook) 	/*
1310ca460b3cSDennis Zhou (Facebook) 	 * Align the end of the region with the LCM of PAGE_SIZE and
1311ca460b3cSDennis Zhou (Facebook) 	 * PCPU_BITMAP_BLOCK_SIZE.  One of these constants is a multiple of
1312ca460b3cSDennis Zhou (Facebook) 	 * the other.
1313ca460b3cSDennis Zhou (Facebook) 	 */
1314ca460b3cSDennis Zhou (Facebook) 	lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1315ca460b3cSDennis Zhou (Facebook) 	region_size = ALIGN(start_offset + map_size, lcm_align);
131610edf5b0SDennis Zhou (Facebook) 
1317c0ebfdc3SDennis Zhou (Facebook) 	/* allocate chunk */
1318f655f405SMike Rapoport 	alloc_size = sizeof(struct pcpu_chunk) +
1319f655f405SMike Rapoport 		BITS_TO_LONGS(region_size >> PAGE_SHIFT);
1320f655f405SMike Rapoport 	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1321f655f405SMike Rapoport 	if (!chunk)
1322f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1323f655f405SMike Rapoport 		      alloc_size);
1324c0ebfdc3SDennis Zhou (Facebook) 
132510edf5b0SDennis Zhou (Facebook) 	INIT_LIST_HEAD(&chunk->list);
1326c0ebfdc3SDennis Zhou (Facebook) 
1327c0ebfdc3SDennis Zhou (Facebook) 	chunk->base_addr = (void *)aligned_addr;
132810edf5b0SDennis Zhou (Facebook) 	chunk->start_offset = start_offset;
13296b9d7c8eSDennis Zhou (Facebook) 	chunk->end_offset = region_size - chunk->start_offset - map_size;
1330c0ebfdc3SDennis Zhou (Facebook) 
13318ab16c43SDennis Zhou (Facebook) 	chunk->nr_pages = region_size >> PAGE_SHIFT;
133240064aecSDennis Zhou (Facebook) 	region_bits = pcpu_chunk_map_bits(chunk);
1333c0ebfdc3SDennis Zhou (Facebook) 
1334f655f405SMike Rapoport 	alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1335f655f405SMike Rapoport 	chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1336f655f405SMike Rapoport 	if (!chunk->alloc_map)
1337f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1338f655f405SMike Rapoport 		      alloc_size);
1339f655f405SMike Rapoport 
1340f655f405SMike Rapoport 	alloc_size =
1341f655f405SMike Rapoport 		BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1342f655f405SMike Rapoport 	chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1343f655f405SMike Rapoport 	if (!chunk->bound_map)
1344f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1345f655f405SMike Rapoport 		      alloc_size);
1346f655f405SMike Rapoport 
1347f655f405SMike Rapoport 	alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1348f655f405SMike Rapoport 	chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1349f655f405SMike Rapoport 	if (!chunk->md_blocks)
1350f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1351f655f405SMike Rapoport 		      alloc_size);
1352f655f405SMike Rapoport 
1353*3c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
1354*3c7be18aSRoman Gushchin 	/* first chunk isn't memcg-aware */
1355*3c7be18aSRoman Gushchin 	chunk->obj_cgroups = NULL;
1356*3c7be18aSRoman Gushchin #endif
1357ca460b3cSDennis Zhou (Facebook) 	pcpu_init_md_blocks(chunk);
135810edf5b0SDennis Zhou (Facebook) 
135910edf5b0SDennis Zhou (Facebook) 	/* manage populated page bitmap */
136010edf5b0SDennis Zhou (Facebook) 	chunk->immutable = true;
13618ab16c43SDennis Zhou (Facebook) 	bitmap_fill(chunk->populated, chunk->nr_pages);
13628ab16c43SDennis Zhou (Facebook) 	chunk->nr_populated = chunk->nr_pages;
1363b239f7daSDennis Zhou 	chunk->nr_empty_pop_pages = chunk->nr_pages;
136410edf5b0SDennis Zhou (Facebook) 
136540064aecSDennis Zhou (Facebook) 	chunk->free_bytes = map_size;
1366c0ebfdc3SDennis Zhou (Facebook) 
1367c0ebfdc3SDennis Zhou (Facebook) 	if (chunk->start_offset) {
1368c0ebfdc3SDennis Zhou (Facebook) 		/* hide the beginning of the bitmap */
136940064aecSDennis Zhou (Facebook) 		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
137040064aecSDennis Zhou (Facebook) 		bitmap_set(chunk->alloc_map, 0, offset_bits);
137140064aecSDennis Zhou (Facebook) 		set_bit(0, chunk->bound_map);
137240064aecSDennis Zhou (Facebook) 		set_bit(offset_bits, chunk->bound_map);
1373ca460b3cSDennis Zhou (Facebook) 
137492c14cabSDennis Zhou 		chunk->chunk_md.first_free = offset_bits;
137586b442fbSDennis Zhou (Facebook) 
1376ca460b3cSDennis Zhou (Facebook) 		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1377c0ebfdc3SDennis Zhou (Facebook) 	}
1378c0ebfdc3SDennis Zhou (Facebook) 
13796b9d7c8eSDennis Zhou (Facebook) 	if (chunk->end_offset) {
13806b9d7c8eSDennis Zhou (Facebook) 		/* hide the end of the bitmap */
138140064aecSDennis Zhou (Facebook) 		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
138240064aecSDennis Zhou (Facebook) 		bitmap_set(chunk->alloc_map,
138340064aecSDennis Zhou (Facebook) 			   pcpu_chunk_map_bits(chunk) - offset_bits,
138440064aecSDennis Zhou (Facebook) 			   offset_bits);
138540064aecSDennis Zhou (Facebook) 		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
138640064aecSDennis Zhou (Facebook) 			chunk->bound_map);
138740064aecSDennis Zhou (Facebook) 		set_bit(region_bits, chunk->bound_map);
13886b9d7c8eSDennis Zhou (Facebook) 
1389ca460b3cSDennis Zhou (Facebook) 		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1390ca460b3cSDennis Zhou (Facebook) 					     - offset_bits, offset_bits);
1391ca460b3cSDennis Zhou (Facebook) 	}
139240064aecSDennis Zhou (Facebook) 
139310edf5b0SDennis Zhou (Facebook) 	return chunk;
139410edf5b0SDennis Zhou (Facebook) }
139510edf5b0SDennis Zhou (Facebook) 
1396*3c7be18aSRoman Gushchin static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp)
13976081089fSTejun Heo {
13986081089fSTejun Heo 	struct pcpu_chunk *chunk;
139940064aecSDennis Zhou (Facebook) 	int region_bits;
14006081089fSTejun Heo 
140147504ee0SDennis Zhou 	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
14026081089fSTejun Heo 	if (!chunk)
14036081089fSTejun Heo 		return NULL;
14046081089fSTejun Heo 
14056081089fSTejun Heo 	INIT_LIST_HEAD(&chunk->list);
1406c0ebfdc3SDennis Zhou (Facebook) 	chunk->nr_pages = pcpu_unit_pages;
140740064aecSDennis Zhou (Facebook) 	region_bits = pcpu_chunk_map_bits(chunk);
140840064aecSDennis Zhou (Facebook) 
140940064aecSDennis Zhou (Facebook) 	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
141047504ee0SDennis Zhou 					   sizeof(chunk->alloc_map[0]), gfp);
141140064aecSDennis Zhou (Facebook) 	if (!chunk->alloc_map)
141240064aecSDennis Zhou (Facebook) 		goto alloc_map_fail;
141340064aecSDennis Zhou (Facebook) 
141440064aecSDennis Zhou (Facebook) 	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
141547504ee0SDennis Zhou 					   sizeof(chunk->bound_map[0]), gfp);
141640064aecSDennis Zhou (Facebook) 	if (!chunk->bound_map)
141740064aecSDennis Zhou (Facebook) 		goto bound_map_fail;
141840064aecSDennis Zhou (Facebook) 
1419ca460b3cSDennis Zhou (Facebook) 	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
142047504ee0SDennis Zhou 					   sizeof(chunk->md_blocks[0]), gfp);
1421ca460b3cSDennis Zhou (Facebook) 	if (!chunk->md_blocks)
1422ca460b3cSDennis Zhou (Facebook) 		goto md_blocks_fail;
1423ca460b3cSDennis Zhou (Facebook) 
1424*3c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
1425*3c7be18aSRoman Gushchin 	if (pcpu_is_memcg_chunk(type)) {
1426*3c7be18aSRoman Gushchin 		chunk->obj_cgroups =
1427*3c7be18aSRoman Gushchin 			pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1428*3c7be18aSRoman Gushchin 					sizeof(struct obj_cgroup *), gfp);
1429*3c7be18aSRoman Gushchin 		if (!chunk->obj_cgroups)
1430*3c7be18aSRoman Gushchin 			goto objcg_fail;
1431*3c7be18aSRoman Gushchin 	}
1432*3c7be18aSRoman Gushchin #endif
1433*3c7be18aSRoman Gushchin 
1434ca460b3cSDennis Zhou (Facebook) 	pcpu_init_md_blocks(chunk);
1435ca460b3cSDennis Zhou (Facebook) 
143640064aecSDennis Zhou (Facebook) 	/* init metadata */
143740064aecSDennis Zhou (Facebook) 	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1438c0ebfdc3SDennis Zhou (Facebook) 
14396081089fSTejun Heo 	return chunk;
144040064aecSDennis Zhou (Facebook) 
1441*3c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
1442*3c7be18aSRoman Gushchin objcg_fail:
1443*3c7be18aSRoman Gushchin 	pcpu_mem_free(chunk->md_blocks);
1444*3c7be18aSRoman Gushchin #endif
1445ca460b3cSDennis Zhou (Facebook) md_blocks_fail:
1446ca460b3cSDennis Zhou (Facebook) 	pcpu_mem_free(chunk->bound_map);
144740064aecSDennis Zhou (Facebook) bound_map_fail:
144840064aecSDennis Zhou (Facebook) 	pcpu_mem_free(chunk->alloc_map);
144940064aecSDennis Zhou (Facebook) alloc_map_fail:
145040064aecSDennis Zhou (Facebook) 	pcpu_mem_free(chunk);
145140064aecSDennis Zhou (Facebook) 
145240064aecSDennis Zhou (Facebook) 	return NULL;
14536081089fSTejun Heo }
14546081089fSTejun Heo 
14556081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk)
14566081089fSTejun Heo {
14576081089fSTejun Heo 	if (!chunk)
14586081089fSTejun Heo 		return;
1459*3c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
1460*3c7be18aSRoman Gushchin 	pcpu_mem_free(chunk->obj_cgroups);
1461*3c7be18aSRoman Gushchin #endif
14626685b357SMike Rapoport 	pcpu_mem_free(chunk->md_blocks);
146340064aecSDennis Zhou (Facebook) 	pcpu_mem_free(chunk->bound_map);
146440064aecSDennis Zhou (Facebook) 	pcpu_mem_free(chunk->alloc_map);
14651d5cfdb0STetsuo Handa 	pcpu_mem_free(chunk);
14666081089fSTejun Heo }
14676081089fSTejun Heo 
1468b539b87fSTejun Heo /**
1469b539b87fSTejun Heo  * pcpu_chunk_populated - post-population bookkeeping
1470b539b87fSTejun Heo  * @chunk: pcpu_chunk which got populated
1471b539b87fSTejun Heo  * @page_start: the start page
1472b539b87fSTejun Heo  * @page_end: the end page
1473b539b87fSTejun Heo  *
1474b539b87fSTejun Heo  * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1475b539b87fSTejun Heo  * the bookkeeping information accordingly.  Must be called after each
1476b539b87fSTejun Heo  * successful population.
147740064aecSDennis Zhou (Facebook)  *
147840064aecSDennis Zhou (Facebook)  * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
147940064aecSDennis Zhou (Facebook)  * is to serve an allocation in that area.
1480b539b87fSTejun Heo  */
148140064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1482b239f7daSDennis Zhou 				 int page_end)
1483b539b87fSTejun Heo {
1484b539b87fSTejun Heo 	int nr = page_end - page_start;
1485b539b87fSTejun Heo 
1486b539b87fSTejun Heo 	lockdep_assert_held(&pcpu_lock);
1487b539b87fSTejun Heo 
1488b539b87fSTejun Heo 	bitmap_set(chunk->populated, page_start, nr);
1489b539b87fSTejun Heo 	chunk->nr_populated += nr;
14907e8a6304SDennis Zhou (Facebook) 	pcpu_nr_populated += nr;
149140064aecSDennis Zhou (Facebook) 
1492b239f7daSDennis Zhou 	pcpu_update_empty_pages(chunk, nr);
149340064aecSDennis Zhou (Facebook) }
1494b539b87fSTejun Heo 
1495b539b87fSTejun Heo /**
1496b539b87fSTejun Heo  * pcpu_chunk_depopulated - post-depopulation bookkeeping
1497b539b87fSTejun Heo  * @chunk: pcpu_chunk which got depopulated
1498b539b87fSTejun Heo  * @page_start: the start page
1499b539b87fSTejun Heo  * @page_end: the end page
1500b539b87fSTejun Heo  *
1501b539b87fSTejun Heo  * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1502b539b87fSTejun Heo  * Update the bookkeeping information accordingly.  Must be called after
1503b539b87fSTejun Heo  * each successful depopulation.
1504b539b87fSTejun Heo  */
1505b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1506b539b87fSTejun Heo 				   int page_start, int page_end)
1507b539b87fSTejun Heo {
1508b539b87fSTejun Heo 	int nr = page_end - page_start;
1509b539b87fSTejun Heo 
1510b539b87fSTejun Heo 	lockdep_assert_held(&pcpu_lock);
1511b539b87fSTejun Heo 
1512b539b87fSTejun Heo 	bitmap_clear(chunk->populated, page_start, nr);
1513b539b87fSTejun Heo 	chunk->nr_populated -= nr;
15147e8a6304SDennis Zhou (Facebook) 	pcpu_nr_populated -= nr;
1515b239f7daSDennis Zhou 
1516b239f7daSDennis Zhou 	pcpu_update_empty_pages(chunk, -nr);
1517b539b87fSTejun Heo }
1518b539b87fSTejun Heo 
1519fbf59bc9STejun Heo /*
15209f645532STejun Heo  * Chunk management implementation.
1521fbf59bc9STejun Heo  *
15229f645532STejun Heo  * To allow different implementations, chunk alloc/free and
15239f645532STejun Heo  * [de]population are implemented in a separate file which is pulled
15249f645532STejun Heo  * into this file and compiled together.  The following functions
15259f645532STejun Heo  * should be implemented.
1526ccea34b5STejun Heo  *
15279f645532STejun Heo  * pcpu_populate_chunk		- populate the specified range of a chunk
15289f645532STejun Heo  * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
15299f645532STejun Heo  * pcpu_create_chunk		- create a new chunk
15309f645532STejun Heo  * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
15319f645532STejun Heo  * pcpu_addr_to_page		- translate address to physical address
15329f645532STejun Heo  * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
1533fbf59bc9STejun Heo  */
153415d9f3d1SDennis Zhou static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
153547504ee0SDennis Zhou 			       int page_start, int page_end, gfp_t gfp);
153615d9f3d1SDennis Zhou static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
153715d9f3d1SDennis Zhou 				  int page_start, int page_end);
1538*3c7be18aSRoman Gushchin static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
1539*3c7be18aSRoman Gushchin 					    gfp_t gfp);
15409f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
15419f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr);
15429f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1543fbf59bc9STejun Heo 
1544b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM
1545b0c9778bSTejun Heo #include "percpu-km.c"
1546b0c9778bSTejun Heo #else
15479f645532STejun Heo #include "percpu-vm.c"
1548b0c9778bSTejun Heo #endif
1549fbf59bc9STejun Heo 
1550fbf59bc9STejun Heo /**
155188999a89STejun Heo  * pcpu_chunk_addr_search - determine chunk containing specified address
155288999a89STejun Heo  * @addr: address for which the chunk needs to be determined.
155388999a89STejun Heo  *
1554c0ebfdc3SDennis Zhou (Facebook)  * This is an internal function that handles all but static allocations.
1555c0ebfdc3SDennis Zhou (Facebook)  * Static percpu address values should never be passed into the allocator.
1556c0ebfdc3SDennis Zhou (Facebook)  *
155788999a89STejun Heo  * RETURNS:
155888999a89STejun Heo  * The address of the found chunk.
155988999a89STejun Heo  */
156088999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
156188999a89STejun Heo {
1562c0ebfdc3SDennis Zhou (Facebook) 	/* is it in the dynamic region (first chunk)? */
1563560f2c23SDennis Zhou (Facebook) 	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1564c0ebfdc3SDennis Zhou (Facebook) 		return pcpu_first_chunk;
1565c0ebfdc3SDennis Zhou (Facebook) 
1566c0ebfdc3SDennis Zhou (Facebook) 	/* is it in the reserved region? */
1567560f2c23SDennis Zhou (Facebook) 	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
156888999a89STejun Heo 		return pcpu_reserved_chunk;
156988999a89STejun Heo 
157088999a89STejun Heo 	/*
157188999a89STejun Heo 	 * The address is relative to unit0 which might be unused and
157288999a89STejun Heo 	 * thus unmapped.  Offset the address to the unit space of the
157388999a89STejun Heo 	 * current processor before looking it up in the vmalloc
157488999a89STejun Heo 	 * space.  Note that any possible cpu id can be used here, so
157588999a89STejun Heo 	 * there's no need to worry about preemption or cpu hotplug.
157688999a89STejun Heo 	 */
157788999a89STejun Heo 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
15789f645532STejun Heo 	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
157988999a89STejun Heo }
158088999a89STejun Heo 
1581*3c7be18aSRoman Gushchin #ifdef CONFIG_MEMCG_KMEM
1582*3c7be18aSRoman Gushchin static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1583*3c7be18aSRoman Gushchin 						     struct obj_cgroup **objcgp)
1584*3c7be18aSRoman Gushchin {
1585*3c7be18aSRoman Gushchin 	struct obj_cgroup *objcg;
1586*3c7be18aSRoman Gushchin 
1587*3c7be18aSRoman Gushchin 	if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT) ||
1588*3c7be18aSRoman Gushchin 	    memcg_kmem_bypass())
1589*3c7be18aSRoman Gushchin 		return PCPU_CHUNK_ROOT;
1590*3c7be18aSRoman Gushchin 
1591*3c7be18aSRoman Gushchin 	objcg = get_obj_cgroup_from_current();
1592*3c7be18aSRoman Gushchin 	if (!objcg)
1593*3c7be18aSRoman Gushchin 		return PCPU_CHUNK_ROOT;
1594*3c7be18aSRoman Gushchin 
1595*3c7be18aSRoman Gushchin 	if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
1596*3c7be18aSRoman Gushchin 		obj_cgroup_put(objcg);
1597*3c7be18aSRoman Gushchin 		return PCPU_FAIL_ALLOC;
1598*3c7be18aSRoman Gushchin 	}
1599*3c7be18aSRoman Gushchin 
1600*3c7be18aSRoman Gushchin 	*objcgp = objcg;
1601*3c7be18aSRoman Gushchin 	return PCPU_CHUNK_MEMCG;
1602*3c7be18aSRoman Gushchin }
1603*3c7be18aSRoman Gushchin 
1604*3c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1605*3c7be18aSRoman Gushchin 				       struct pcpu_chunk *chunk, int off,
1606*3c7be18aSRoman Gushchin 				       size_t size)
1607*3c7be18aSRoman Gushchin {
1608*3c7be18aSRoman Gushchin 	if (!objcg)
1609*3c7be18aSRoman Gushchin 		return;
1610*3c7be18aSRoman Gushchin 
1611*3c7be18aSRoman Gushchin 	if (chunk) {
1612*3c7be18aSRoman Gushchin 		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1613*3c7be18aSRoman Gushchin 	} else {
1614*3c7be18aSRoman Gushchin 		obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1615*3c7be18aSRoman Gushchin 		obj_cgroup_put(objcg);
1616*3c7be18aSRoman Gushchin 	}
1617*3c7be18aSRoman Gushchin }
1618*3c7be18aSRoman Gushchin 
1619*3c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1620*3c7be18aSRoman Gushchin {
1621*3c7be18aSRoman Gushchin 	struct obj_cgroup *objcg;
1622*3c7be18aSRoman Gushchin 
1623*3c7be18aSRoman Gushchin 	if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk)))
1624*3c7be18aSRoman Gushchin 		return;
1625*3c7be18aSRoman Gushchin 
1626*3c7be18aSRoman Gushchin 	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1627*3c7be18aSRoman Gushchin 	chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1628*3c7be18aSRoman Gushchin 
1629*3c7be18aSRoman Gushchin 	obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1630*3c7be18aSRoman Gushchin 
1631*3c7be18aSRoman Gushchin 	obj_cgroup_put(objcg);
1632*3c7be18aSRoman Gushchin }
1633*3c7be18aSRoman Gushchin 
1634*3c7be18aSRoman Gushchin #else /* CONFIG_MEMCG_KMEM */
1635*3c7be18aSRoman Gushchin static enum pcpu_chunk_type
1636*3c7be18aSRoman Gushchin pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1637*3c7be18aSRoman Gushchin {
1638*3c7be18aSRoman Gushchin 	return PCPU_CHUNK_ROOT;
1639*3c7be18aSRoman Gushchin }
1640*3c7be18aSRoman Gushchin 
1641*3c7be18aSRoman Gushchin static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1642*3c7be18aSRoman Gushchin 				       struct pcpu_chunk *chunk, int off,
1643*3c7be18aSRoman Gushchin 				       size_t size)
1644*3c7be18aSRoman Gushchin {
1645*3c7be18aSRoman Gushchin }
1646*3c7be18aSRoman Gushchin 
1647*3c7be18aSRoman Gushchin static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1648*3c7be18aSRoman Gushchin {
1649*3c7be18aSRoman Gushchin }
1650*3c7be18aSRoman Gushchin #endif /* CONFIG_MEMCG_KMEM */
1651*3c7be18aSRoman Gushchin 
165288999a89STejun Heo /**
1653edcb4639STejun Heo  * pcpu_alloc - the percpu allocator
1654cae3aeb8STejun Heo  * @size: size of area to allocate in bytes
1655fbf59bc9STejun Heo  * @align: alignment of area (max PAGE_SIZE)
1656edcb4639STejun Heo  * @reserved: allocate from the reserved chunk if available
16575835d96eSTejun Heo  * @gfp: allocation flags
1658fbf59bc9STejun Heo  *
16595835d96eSTejun Heo  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
16600ea7eeecSDaniel Borkmann  * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
16610ea7eeecSDaniel Borkmann  * then no warning will be triggered on invalid or failed allocation
16620ea7eeecSDaniel Borkmann  * requests.
1663fbf59bc9STejun Heo  *
1664fbf59bc9STejun Heo  * RETURNS:
1665fbf59bc9STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
1666fbf59bc9STejun Heo  */
16675835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
16685835d96eSTejun Heo 				 gfp_t gfp)
1669fbf59bc9STejun Heo {
167028307d93SFilipe Manana 	gfp_t pcpu_gfp;
167128307d93SFilipe Manana 	bool is_atomic;
167228307d93SFilipe Manana 	bool do_warn;
1673*3c7be18aSRoman Gushchin 	enum pcpu_chunk_type type;
1674*3c7be18aSRoman Gushchin 	struct list_head *pcpu_slot;
1675*3c7be18aSRoman Gushchin 	struct obj_cgroup *objcg = NULL;
1676f2badb0cSTejun Heo 	static int warn_limit = 10;
16778744d859SDennis Zhou 	struct pcpu_chunk *chunk, *next;
1678f2badb0cSTejun Heo 	const char *err;
167940064aecSDennis Zhou (Facebook) 	int slot, off, cpu, ret;
1680403a91b1SJiri Kosina 	unsigned long flags;
1681f528f0b8SCatalin Marinas 	void __percpu *ptr;
168240064aecSDennis Zhou (Facebook) 	size_t bits, bit_align;
1683fbf59bc9STejun Heo 
168428307d93SFilipe Manana 	gfp = current_gfp_context(gfp);
168528307d93SFilipe Manana 	/* whitelisted flags that can be passed to the backing allocators */
168628307d93SFilipe Manana 	pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
168728307d93SFilipe Manana 	is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
168828307d93SFilipe Manana 	do_warn = !(gfp & __GFP_NOWARN);
168928307d93SFilipe Manana 
1690723ad1d9SAl Viro 	/*
169140064aecSDennis Zhou (Facebook) 	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
169240064aecSDennis Zhou (Facebook) 	 * therefore alignment must be a minimum of that many bytes.
169340064aecSDennis Zhou (Facebook) 	 * An allocation may have internal fragmentation from rounding up
169440064aecSDennis Zhou (Facebook) 	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1695723ad1d9SAl Viro 	 */
1696d2f3c384SDennis Zhou (Facebook) 	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1697d2f3c384SDennis Zhou (Facebook) 		align = PCPU_MIN_ALLOC_SIZE;
1698723ad1d9SAl Viro 
1699d2f3c384SDennis Zhou (Facebook) 	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
170040064aecSDennis Zhou (Facebook) 	bits = size >> PCPU_MIN_ALLOC_SHIFT;
170140064aecSDennis Zhou (Facebook) 	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
17022f69fa82SViro 
17033ca45a46Szijun_hu 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
17043ca45a46Szijun_hu 		     !is_power_of_2(align))) {
17050ea7eeecSDaniel Borkmann 		WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1706756a025fSJoe Perches 		     size, align);
1707fbf59bc9STejun Heo 		return NULL;
1708fbf59bc9STejun Heo 	}
1709fbf59bc9STejun Heo 
1710*3c7be18aSRoman Gushchin 	type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg);
1711*3c7be18aSRoman Gushchin 	if (unlikely(type == PCPU_FAIL_ALLOC))
1712*3c7be18aSRoman Gushchin 		return NULL;
1713*3c7be18aSRoman Gushchin 	pcpu_slot = pcpu_chunk_list(type);
1714*3c7be18aSRoman Gushchin 
1715f52ba1feSKirill Tkhai 	if (!is_atomic) {
1716f52ba1feSKirill Tkhai 		/*
1717f52ba1feSKirill Tkhai 		 * pcpu_balance_workfn() allocates memory under this mutex,
1718f52ba1feSKirill Tkhai 		 * and it may wait for memory reclaim. Allow current task
1719f52ba1feSKirill Tkhai 		 * to become OOM victim, in case of memory pressure.
1720f52ba1feSKirill Tkhai 		 */
1721*3c7be18aSRoman Gushchin 		if (gfp & __GFP_NOFAIL) {
17226710e594STejun Heo 			mutex_lock(&pcpu_alloc_mutex);
1723*3c7be18aSRoman Gushchin 		} else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1724*3c7be18aSRoman Gushchin 			pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1725f52ba1feSKirill Tkhai 			return NULL;
1726f52ba1feSKirill Tkhai 		}
1727*3c7be18aSRoman Gushchin 	}
17286710e594STejun Heo 
1729403a91b1SJiri Kosina 	spin_lock_irqsave(&pcpu_lock, flags);
1730fbf59bc9STejun Heo 
1731edcb4639STejun Heo 	/* serve reserved allocations from the reserved chunk if available */
1732edcb4639STejun Heo 	if (reserved && pcpu_reserved_chunk) {
1733edcb4639STejun Heo 		chunk = pcpu_reserved_chunk;
1734833af842STejun Heo 
173540064aecSDennis Zhou (Facebook) 		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
173640064aecSDennis Zhou (Facebook) 		if (off < 0) {
1737833af842STejun Heo 			err = "alloc from reserved chunk failed";
1738ccea34b5STejun Heo 			goto fail_unlock;
1739f2badb0cSTejun Heo 		}
1740833af842STejun Heo 
174140064aecSDennis Zhou (Facebook) 		off = pcpu_alloc_area(chunk, bits, bit_align, off);
1742edcb4639STejun Heo 		if (off >= 0)
1743edcb4639STejun Heo 			goto area_found;
1744833af842STejun Heo 
1745f2badb0cSTejun Heo 		err = "alloc from reserved chunk failed";
1746ccea34b5STejun Heo 		goto fail_unlock;
1747edcb4639STejun Heo 	}
1748edcb4639STejun Heo 
1749ccea34b5STejun Heo restart:
1750edcb4639STejun Heo 	/* search through normal chunks */
1751fbf59bc9STejun Heo 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
17528744d859SDennis Zhou 		list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) {
175340064aecSDennis Zhou (Facebook) 			off = pcpu_find_block_fit(chunk, bits, bit_align,
175440064aecSDennis Zhou (Facebook) 						  is_atomic);
17558744d859SDennis Zhou 			if (off < 0) {
17568744d859SDennis Zhou 				if (slot < PCPU_SLOT_FAIL_THRESHOLD)
17578744d859SDennis Zhou 					pcpu_chunk_move(chunk, 0);
1758fbf59bc9STejun Heo 				continue;
17598744d859SDennis Zhou 			}
1760ccea34b5STejun Heo 
176140064aecSDennis Zhou (Facebook) 			off = pcpu_alloc_area(chunk, bits, bit_align, off);
1762fbf59bc9STejun Heo 			if (off >= 0)
1763fbf59bc9STejun Heo 				goto area_found;
176440064aecSDennis Zhou (Facebook) 
1765fbf59bc9STejun Heo 		}
1766fbf59bc9STejun Heo 	}
1767fbf59bc9STejun Heo 
1768403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
1769ccea34b5STejun Heo 
1770b38d08f3STejun Heo 	/*
1771b38d08f3STejun Heo 	 * No space left.  Create a new chunk.  We don't want multiple
1772b38d08f3STejun Heo 	 * tasks to create chunks simultaneously.  Serialize and create iff
1773b38d08f3STejun Heo 	 * there's still no empty chunk after grabbing the mutex.
1774b38d08f3STejun Heo 	 */
177511df02bfSDennis Zhou 	if (is_atomic) {
177611df02bfSDennis Zhou 		err = "atomic alloc failed, no space left";
17775835d96eSTejun Heo 		goto fail;
177811df02bfSDennis Zhou 	}
17795835d96eSTejun Heo 
1780b38d08f3STejun Heo 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1781*3c7be18aSRoman Gushchin 		chunk = pcpu_create_chunk(type, pcpu_gfp);
1782f2badb0cSTejun Heo 		if (!chunk) {
1783f2badb0cSTejun Heo 			err = "failed to allocate new chunk";
1784b38d08f3STejun Heo 			goto fail;
1785f2badb0cSTejun Heo 		}
1786ccea34b5STejun Heo 
1787403a91b1SJiri Kosina 		spin_lock_irqsave(&pcpu_lock, flags);
1788fbf59bc9STejun Heo 		pcpu_chunk_relocate(chunk, -1);
1789b38d08f3STejun Heo 	} else {
1790b38d08f3STejun Heo 		spin_lock_irqsave(&pcpu_lock, flags);
1791b38d08f3STejun Heo 	}
1792b38d08f3STejun Heo 
1793ccea34b5STejun Heo 	goto restart;
1794fbf59bc9STejun Heo 
1795fbf59bc9STejun Heo area_found:
179630a5b536SDennis Zhou 	pcpu_stats_area_alloc(chunk, size);
1797403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
1798ccea34b5STejun Heo 
1799dca49645STejun Heo 	/* populate if not all pages are already there */
18005835d96eSTejun Heo 	if (!is_atomic) {
1801e837dfdeSDennis Zhou 		unsigned int page_start, page_end, rs, re;
1802e04d3208STejun Heo 
1803dca49645STejun Heo 		page_start = PFN_DOWN(off);
1804dca49645STejun Heo 		page_end = PFN_UP(off + size);
1805dca49645STejun Heo 
1806e837dfdeSDennis Zhou 		bitmap_for_each_clear_region(chunk->populated, rs, re,
180791e914c5SDennis Zhou (Facebook) 					     page_start, page_end) {
1808dca49645STejun Heo 			WARN_ON(chunk->immutable);
1809dca49645STejun Heo 
1810554fef1cSDennis Zhou 			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1811b38d08f3STejun Heo 
1812403a91b1SJiri Kosina 			spin_lock_irqsave(&pcpu_lock, flags);
1813b38d08f3STejun Heo 			if (ret) {
181440064aecSDennis Zhou (Facebook) 				pcpu_free_area(chunk, off);
1815f2badb0cSTejun Heo 				err = "failed to populate";
1816ccea34b5STejun Heo 				goto fail_unlock;
1817fbf59bc9STejun Heo 			}
1818b239f7daSDennis Zhou 			pcpu_chunk_populated(chunk, rs, re);
1819b38d08f3STejun Heo 			spin_unlock_irqrestore(&pcpu_lock, flags);
1820dca49645STejun Heo 		}
1821dca49645STejun Heo 
1822ccea34b5STejun Heo 		mutex_unlock(&pcpu_alloc_mutex);
1823e04d3208STejun Heo 	}
1824ccea34b5STejun Heo 
18251a4d7607STejun Heo 	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
18261a4d7607STejun Heo 		pcpu_schedule_balance_work();
18271a4d7607STejun Heo 
1828dca49645STejun Heo 	/* clear the areas and return address relative to base address */
1829dca49645STejun Heo 	for_each_possible_cpu(cpu)
1830dca49645STejun Heo 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1831dca49645STejun Heo 
1832f528f0b8SCatalin Marinas 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
18338a8c35faSLarry Finger 	kmemleak_alloc_percpu(ptr, size, gfp);
1834df95e795SDennis Zhou 
1835df95e795SDennis Zhou 	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1836df95e795SDennis Zhou 			chunk->base_addr, off, ptr);
1837df95e795SDennis Zhou 
1838*3c7be18aSRoman Gushchin 	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1839*3c7be18aSRoman Gushchin 
1840f528f0b8SCatalin Marinas 	return ptr;
1841ccea34b5STejun Heo 
1842ccea34b5STejun Heo fail_unlock:
1843403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
1844b38d08f3STejun Heo fail:
1845df95e795SDennis Zhou 	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1846df95e795SDennis Zhou 
18470ea7eeecSDaniel Borkmann 	if (!is_atomic && do_warn && warn_limit) {
1848870d4b12SJoe Perches 		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
18495835d96eSTejun Heo 			size, align, is_atomic, err);
1850f2badb0cSTejun Heo 		dump_stack();
1851f2badb0cSTejun Heo 		if (!--warn_limit)
1852870d4b12SJoe Perches 			pr_info("limit reached, disable warning\n");
1853f2badb0cSTejun Heo 	}
18541a4d7607STejun Heo 	if (is_atomic) {
18551a4d7607STejun Heo 		/* see the flag handling in pcpu_blance_workfn() */
18561a4d7607STejun Heo 		pcpu_atomic_alloc_failed = true;
18571a4d7607STejun Heo 		pcpu_schedule_balance_work();
18586710e594STejun Heo 	} else {
18596710e594STejun Heo 		mutex_unlock(&pcpu_alloc_mutex);
18601a4d7607STejun Heo 	}
1861*3c7be18aSRoman Gushchin 
1862*3c7be18aSRoman Gushchin 	pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1863*3c7be18aSRoman Gushchin 
1864ccea34b5STejun Heo 	return NULL;
1865fbf59bc9STejun Heo }
1866edcb4639STejun Heo 
1867edcb4639STejun Heo /**
18685835d96eSTejun Heo  * __alloc_percpu_gfp - allocate dynamic percpu area
1869edcb4639STejun Heo  * @size: size of area to allocate in bytes
1870edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
18715835d96eSTejun Heo  * @gfp: allocation flags
1872edcb4639STejun Heo  *
18735835d96eSTejun Heo  * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
18745835d96eSTejun Heo  * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
18750ea7eeecSDaniel Borkmann  * be called from any context but is a lot more likely to fail. If @gfp
18760ea7eeecSDaniel Borkmann  * has __GFP_NOWARN then no warning will be triggered on invalid or failed
18770ea7eeecSDaniel Borkmann  * allocation requests.
1878ccea34b5STejun Heo  *
1879edcb4639STejun Heo  * RETURNS:
1880edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
1881edcb4639STejun Heo  */
18825835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
18835835d96eSTejun Heo {
18845835d96eSTejun Heo 	return pcpu_alloc(size, align, false, gfp);
18855835d96eSTejun Heo }
18865835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
18875835d96eSTejun Heo 
18885835d96eSTejun Heo /**
18895835d96eSTejun Heo  * __alloc_percpu - allocate dynamic percpu area
18905835d96eSTejun Heo  * @size: size of area to allocate in bytes
18915835d96eSTejun Heo  * @align: alignment of area (max PAGE_SIZE)
18925835d96eSTejun Heo  *
18935835d96eSTejun Heo  * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
18945835d96eSTejun Heo  */
189543cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align)
1896edcb4639STejun Heo {
18975835d96eSTejun Heo 	return pcpu_alloc(size, align, false, GFP_KERNEL);
1898edcb4639STejun Heo }
1899fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu);
1900fbf59bc9STejun Heo 
1901edcb4639STejun Heo /**
1902edcb4639STejun Heo  * __alloc_reserved_percpu - allocate reserved percpu area
1903edcb4639STejun Heo  * @size: size of area to allocate in bytes
1904edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
1905edcb4639STejun Heo  *
19069329ba97STejun Heo  * Allocate zero-filled percpu area of @size bytes aligned at @align
19079329ba97STejun Heo  * from reserved percpu area if arch has set it up; otherwise,
19089329ba97STejun Heo  * allocation is served from the same dynamic area.  Might sleep.
19099329ba97STejun Heo  * Might trigger writeouts.
1910edcb4639STejun Heo  *
1911ccea34b5STejun Heo  * CONTEXT:
1912ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
1913ccea34b5STejun Heo  *
1914edcb4639STejun Heo  * RETURNS:
1915edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
1916edcb4639STejun Heo  */
191743cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1918edcb4639STejun Heo {
19195835d96eSTejun Heo 	return pcpu_alloc(size, align, true, GFP_KERNEL);
1920edcb4639STejun Heo }
1921edcb4639STejun Heo 
1922a56dbddfSTejun Heo /**
1923*3c7be18aSRoman Gushchin  * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
1924*3c7be18aSRoman Gushchin  * @type: chunk type
1925a56dbddfSTejun Heo  *
192647504ee0SDennis Zhou  * Reclaim all fully free chunks except for the first one.  This is also
192747504ee0SDennis Zhou  * responsible for maintaining the pool of empty populated pages.  However,
192847504ee0SDennis Zhou  * it is possible that this is called when physical memory is scarce causing
192947504ee0SDennis Zhou  * OOM killer to be triggered.  We should avoid doing so until an actual
193047504ee0SDennis Zhou  * allocation causes the failure as it is possible that requests can be
193147504ee0SDennis Zhou  * serviced from already backed regions.
1932a56dbddfSTejun Heo  */
1933*3c7be18aSRoman Gushchin static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
1934fbf59bc9STejun Heo {
193547504ee0SDennis Zhou 	/* gfp flags passed to underlying allocators */
1936554fef1cSDennis Zhou 	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1937fe6bd8c3STejun Heo 	LIST_HEAD(to_free);
1938*3c7be18aSRoman Gushchin 	struct list_head *pcpu_slot = pcpu_chunk_list(type);
1939fe6bd8c3STejun Heo 	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1940a56dbddfSTejun Heo 	struct pcpu_chunk *chunk, *next;
19411a4d7607STejun Heo 	int slot, nr_to_pop, ret;
1942a56dbddfSTejun Heo 
19431a4d7607STejun Heo 	/*
19441a4d7607STejun Heo 	 * There's no reason to keep around multiple unused chunks and VM
19451a4d7607STejun Heo 	 * areas can be scarce.  Destroy all free chunks except for one.
19461a4d7607STejun Heo 	 */
1947ccea34b5STejun Heo 	mutex_lock(&pcpu_alloc_mutex);
1948ccea34b5STejun Heo 	spin_lock_irq(&pcpu_lock);
1949a56dbddfSTejun Heo 
1950fe6bd8c3STejun Heo 	list_for_each_entry_safe(chunk, next, free_head, list) {
19518d408b4bSTejun Heo 		WARN_ON(chunk->immutable);
1952a56dbddfSTejun Heo 
1953a56dbddfSTejun Heo 		/* spare the first one */
1954fe6bd8c3STejun Heo 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1955a56dbddfSTejun Heo 			continue;
1956a56dbddfSTejun Heo 
1957fe6bd8c3STejun Heo 		list_move(&chunk->list, &to_free);
1958a56dbddfSTejun Heo 	}
1959a56dbddfSTejun Heo 
1960ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
1961a56dbddfSTejun Heo 
1962fe6bd8c3STejun Heo 	list_for_each_entry_safe(chunk, next, &to_free, list) {
1963e837dfdeSDennis Zhou 		unsigned int rs, re;
1964dca49645STejun Heo 
1965e837dfdeSDennis Zhou 		bitmap_for_each_set_region(chunk->populated, rs, re, 0,
196691e914c5SDennis Zhou (Facebook) 					   chunk->nr_pages) {
1967a93ace48STejun Heo 			pcpu_depopulate_chunk(chunk, rs, re);
1968b539b87fSTejun Heo 			spin_lock_irq(&pcpu_lock);
1969b539b87fSTejun Heo 			pcpu_chunk_depopulated(chunk, rs, re);
1970b539b87fSTejun Heo 			spin_unlock_irq(&pcpu_lock);
1971a93ace48STejun Heo 		}
19726081089fSTejun Heo 		pcpu_destroy_chunk(chunk);
1973accd4f36SEric Dumazet 		cond_resched();
1974fbf59bc9STejun Heo 	}
1975971f3918STejun Heo 
19761a4d7607STejun Heo 	/*
19771a4d7607STejun Heo 	 * Ensure there are certain number of free populated pages for
19781a4d7607STejun Heo 	 * atomic allocs.  Fill up from the most packed so that atomic
19791a4d7607STejun Heo 	 * allocs don't increase fragmentation.  If atomic allocation
19801a4d7607STejun Heo 	 * failed previously, always populate the maximum amount.  This
19811a4d7607STejun Heo 	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
19821a4d7607STejun Heo 	 * failing indefinitely; however, large atomic allocs are not
19831a4d7607STejun Heo 	 * something we support properly and can be highly unreliable and
19841a4d7607STejun Heo 	 * inefficient.
19851a4d7607STejun Heo 	 */
19861a4d7607STejun Heo retry_pop:
19871a4d7607STejun Heo 	if (pcpu_atomic_alloc_failed) {
19881a4d7607STejun Heo 		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
19891a4d7607STejun Heo 		/* best effort anyway, don't worry about synchronization */
19901a4d7607STejun Heo 		pcpu_atomic_alloc_failed = false;
19911a4d7607STejun Heo 	} else {
19921a4d7607STejun Heo 		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
19931a4d7607STejun Heo 				  pcpu_nr_empty_pop_pages,
19941a4d7607STejun Heo 				  0, PCPU_EMPTY_POP_PAGES_HIGH);
19951a4d7607STejun Heo 	}
19961a4d7607STejun Heo 
19971a4d7607STejun Heo 	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1998e837dfdeSDennis Zhou 		unsigned int nr_unpop = 0, rs, re;
19991a4d7607STejun Heo 
20001a4d7607STejun Heo 		if (!nr_to_pop)
20011a4d7607STejun Heo 			break;
20021a4d7607STejun Heo 
20031a4d7607STejun Heo 		spin_lock_irq(&pcpu_lock);
20041a4d7607STejun Heo 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
20058ab16c43SDennis Zhou (Facebook) 			nr_unpop = chunk->nr_pages - chunk->nr_populated;
20061a4d7607STejun Heo 			if (nr_unpop)
20071a4d7607STejun Heo 				break;
20081a4d7607STejun Heo 		}
20091a4d7607STejun Heo 		spin_unlock_irq(&pcpu_lock);
20101a4d7607STejun Heo 
20111a4d7607STejun Heo 		if (!nr_unpop)
20121a4d7607STejun Heo 			continue;
20131a4d7607STejun Heo 
20141a4d7607STejun Heo 		/* @chunk can't go away while pcpu_alloc_mutex is held */
2015e837dfdeSDennis Zhou 		bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
201691e914c5SDennis Zhou (Facebook) 					     chunk->nr_pages) {
2017e837dfdeSDennis Zhou 			int nr = min_t(int, re - rs, nr_to_pop);
20181a4d7607STejun Heo 
201947504ee0SDennis Zhou 			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
20201a4d7607STejun Heo 			if (!ret) {
20211a4d7607STejun Heo 				nr_to_pop -= nr;
20221a4d7607STejun Heo 				spin_lock_irq(&pcpu_lock);
2023b239f7daSDennis Zhou 				pcpu_chunk_populated(chunk, rs, rs + nr);
20241a4d7607STejun Heo 				spin_unlock_irq(&pcpu_lock);
20251a4d7607STejun Heo 			} else {
20261a4d7607STejun Heo 				nr_to_pop = 0;
20271a4d7607STejun Heo 			}
20281a4d7607STejun Heo 
20291a4d7607STejun Heo 			if (!nr_to_pop)
20301a4d7607STejun Heo 				break;
20311a4d7607STejun Heo 		}
20321a4d7607STejun Heo 	}
20331a4d7607STejun Heo 
20341a4d7607STejun Heo 	if (nr_to_pop) {
20351a4d7607STejun Heo 		/* ran out of chunks to populate, create a new one and retry */
2036*3c7be18aSRoman Gushchin 		chunk = pcpu_create_chunk(type, gfp);
20371a4d7607STejun Heo 		if (chunk) {
20381a4d7607STejun Heo 			spin_lock_irq(&pcpu_lock);
20391a4d7607STejun Heo 			pcpu_chunk_relocate(chunk, -1);
20401a4d7607STejun Heo 			spin_unlock_irq(&pcpu_lock);
20411a4d7607STejun Heo 			goto retry_pop;
20421a4d7607STejun Heo 		}
20431a4d7607STejun Heo 	}
20441a4d7607STejun Heo 
2045971f3918STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
2046a56dbddfSTejun Heo }
2047fbf59bc9STejun Heo 
2048fbf59bc9STejun Heo /**
2049*3c7be18aSRoman Gushchin  * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2050*3c7be18aSRoman Gushchin  * @work: unused
2051*3c7be18aSRoman Gushchin  *
2052*3c7be18aSRoman Gushchin  * Call __pcpu_balance_workfn() for each chunk type.
2053*3c7be18aSRoman Gushchin  */
2054*3c7be18aSRoman Gushchin static void pcpu_balance_workfn(struct work_struct *work)
2055*3c7be18aSRoman Gushchin {
2056*3c7be18aSRoman Gushchin 	enum pcpu_chunk_type type;
2057*3c7be18aSRoman Gushchin 
2058*3c7be18aSRoman Gushchin 	for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
2059*3c7be18aSRoman Gushchin 		__pcpu_balance_workfn(type);
2060*3c7be18aSRoman Gushchin }
2061*3c7be18aSRoman Gushchin 
2062*3c7be18aSRoman Gushchin /**
2063fbf59bc9STejun Heo  * free_percpu - free percpu area
2064fbf59bc9STejun Heo  * @ptr: pointer to area to free
2065fbf59bc9STejun Heo  *
2066ccea34b5STejun Heo  * Free percpu area @ptr.
2067ccea34b5STejun Heo  *
2068ccea34b5STejun Heo  * CONTEXT:
2069ccea34b5STejun Heo  * Can be called from atomic context.
2070fbf59bc9STejun Heo  */
207143cf38ebSTejun Heo void free_percpu(void __percpu *ptr)
2072fbf59bc9STejun Heo {
2073129182e5SAndrew Morton 	void *addr;
2074fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
2075ccea34b5STejun Heo 	unsigned long flags;
2076*3c7be18aSRoman Gushchin 	int size, off;
2077198790d9SJohn Sperbeck 	bool need_balance = false;
2078*3c7be18aSRoman Gushchin 	struct list_head *pcpu_slot;
2079fbf59bc9STejun Heo 
2080fbf59bc9STejun Heo 	if (!ptr)
2081fbf59bc9STejun Heo 		return;
2082fbf59bc9STejun Heo 
2083f528f0b8SCatalin Marinas 	kmemleak_free_percpu(ptr);
2084f528f0b8SCatalin Marinas 
2085129182e5SAndrew Morton 	addr = __pcpu_ptr_to_addr(ptr);
2086129182e5SAndrew Morton 
2087ccea34b5STejun Heo 	spin_lock_irqsave(&pcpu_lock, flags);
2088fbf59bc9STejun Heo 
2089fbf59bc9STejun Heo 	chunk = pcpu_chunk_addr_search(addr);
2090bba174f5STejun Heo 	off = addr - chunk->base_addr;
2091fbf59bc9STejun Heo 
2092*3c7be18aSRoman Gushchin 	size = pcpu_free_area(chunk, off);
2093*3c7be18aSRoman Gushchin 
2094*3c7be18aSRoman Gushchin 	pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
2095*3c7be18aSRoman Gushchin 
2096*3c7be18aSRoman Gushchin 	pcpu_memcg_free_hook(chunk, off, size);
2097fbf59bc9STejun Heo 
2098a56dbddfSTejun Heo 	/* if there are more than one fully free chunks, wake up grim reaper */
209940064aecSDennis Zhou (Facebook) 	if (chunk->free_bytes == pcpu_unit_size) {
2100fbf59bc9STejun Heo 		struct pcpu_chunk *pos;
2101fbf59bc9STejun Heo 
2102a56dbddfSTejun Heo 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
2103fbf59bc9STejun Heo 			if (pos != chunk) {
2104198790d9SJohn Sperbeck 				need_balance = true;
2105fbf59bc9STejun Heo 				break;
2106fbf59bc9STejun Heo 			}
2107fbf59bc9STejun Heo 	}
2108fbf59bc9STejun Heo 
2109df95e795SDennis Zhou 	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2110df95e795SDennis Zhou 
2111ccea34b5STejun Heo 	spin_unlock_irqrestore(&pcpu_lock, flags);
2112198790d9SJohn Sperbeck 
2113198790d9SJohn Sperbeck 	if (need_balance)
2114198790d9SJohn Sperbeck 		pcpu_schedule_balance_work();
2115fbf59bc9STejun Heo }
2116fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu);
2117fbf59bc9STejun Heo 
2118383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2119383776faSThomas Gleixner {
2120383776faSThomas Gleixner #ifdef CONFIG_SMP
2121383776faSThomas Gleixner 	const size_t static_size = __per_cpu_end - __per_cpu_start;
2122383776faSThomas Gleixner 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2123383776faSThomas Gleixner 	unsigned int cpu;
2124383776faSThomas Gleixner 
2125383776faSThomas Gleixner 	for_each_possible_cpu(cpu) {
2126383776faSThomas Gleixner 		void *start = per_cpu_ptr(base, cpu);
2127383776faSThomas Gleixner 		void *va = (void *)addr;
2128383776faSThomas Gleixner 
2129383776faSThomas Gleixner 		if (va >= start && va < start + static_size) {
21308ce371f9SPeter Zijlstra 			if (can_addr) {
2131383776faSThomas Gleixner 				*can_addr = (unsigned long) (va - start);
21328ce371f9SPeter Zijlstra 				*can_addr += (unsigned long)
21338ce371f9SPeter Zijlstra 					per_cpu_ptr(base, get_boot_cpu_id());
21348ce371f9SPeter Zijlstra 			}
2135383776faSThomas Gleixner 			return true;
2136383776faSThomas Gleixner 		}
2137383776faSThomas Gleixner 	}
2138383776faSThomas Gleixner #endif
2139383776faSThomas Gleixner 	/* on UP, can't distinguish from other static vars, always false */
2140383776faSThomas Gleixner 	return false;
2141383776faSThomas Gleixner }
2142383776faSThomas Gleixner 
21433b034b0dSVivek Goyal /**
214410fad5e4STejun Heo  * is_kernel_percpu_address - test whether address is from static percpu area
214510fad5e4STejun Heo  * @addr: address to test
214610fad5e4STejun Heo  *
214710fad5e4STejun Heo  * Test whether @addr belongs to in-kernel static percpu area.  Module
214810fad5e4STejun Heo  * static percpu areas are not considered.  For those, use
214910fad5e4STejun Heo  * is_module_percpu_address().
215010fad5e4STejun Heo  *
215110fad5e4STejun Heo  * RETURNS:
215210fad5e4STejun Heo  * %true if @addr is from in-kernel static percpu area, %false otherwise.
215310fad5e4STejun Heo  */
215410fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr)
215510fad5e4STejun Heo {
2156383776faSThomas Gleixner 	return __is_kernel_percpu_address(addr, NULL);
215710fad5e4STejun Heo }
215810fad5e4STejun Heo 
215910fad5e4STejun Heo /**
21603b034b0dSVivek Goyal  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
21613b034b0dSVivek Goyal  * @addr: the address to be converted to physical address
21623b034b0dSVivek Goyal  *
21633b034b0dSVivek Goyal  * Given @addr which is dereferenceable address obtained via one of
21643b034b0dSVivek Goyal  * percpu access macros, this function translates it into its physical
21653b034b0dSVivek Goyal  * address.  The caller is responsible for ensuring @addr stays valid
21663b034b0dSVivek Goyal  * until this function finishes.
21673b034b0dSVivek Goyal  *
216867589c71SDave Young  * percpu allocator has special setup for the first chunk, which currently
216967589c71SDave Young  * supports either embedding in linear address space or vmalloc mapping,
217067589c71SDave Young  * and, from the second one, the backing allocator (currently either vm or
217167589c71SDave Young  * km) provides translation.
217267589c71SDave Young  *
2173bffc4375SYannick Guerrini  * The addr can be translated simply without checking if it falls into the
217467589c71SDave Young  * first chunk. But the current code reflects better how percpu allocator
217567589c71SDave Young  * actually works, and the verification can discover both bugs in percpu
217667589c71SDave Young  * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
217767589c71SDave Young  * code.
217867589c71SDave Young  *
21793b034b0dSVivek Goyal  * RETURNS:
21803b034b0dSVivek Goyal  * The physical address for @addr.
21813b034b0dSVivek Goyal  */
21823b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr)
21833b034b0dSVivek Goyal {
21849983b6f0STejun Heo 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
21859983b6f0STejun Heo 	bool in_first_chunk = false;
2186a855b84cSTejun Heo 	unsigned long first_low, first_high;
21879983b6f0STejun Heo 	unsigned int cpu;
21889983b6f0STejun Heo 
21899983b6f0STejun Heo 	/*
2190a855b84cSTejun Heo 	 * The following test on unit_low/high isn't strictly
21919983b6f0STejun Heo 	 * necessary but will speed up lookups of addresses which
21929983b6f0STejun Heo 	 * aren't in the first chunk.
2193c0ebfdc3SDennis Zhou (Facebook) 	 *
2194c0ebfdc3SDennis Zhou (Facebook) 	 * The address check is against full chunk sizes.  pcpu_base_addr
2195c0ebfdc3SDennis Zhou (Facebook) 	 * points to the beginning of the first chunk including the
2196c0ebfdc3SDennis Zhou (Facebook) 	 * static region.  Assumes good intent as the first chunk may
2197c0ebfdc3SDennis Zhou (Facebook) 	 * not be full (ie. < pcpu_unit_pages in size).
21989983b6f0STejun Heo 	 */
2199c0ebfdc3SDennis Zhou (Facebook) 	first_low = (unsigned long)pcpu_base_addr +
2200c0ebfdc3SDennis Zhou (Facebook) 		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2201c0ebfdc3SDennis Zhou (Facebook) 	first_high = (unsigned long)pcpu_base_addr +
2202c0ebfdc3SDennis Zhou (Facebook) 		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2203a855b84cSTejun Heo 	if ((unsigned long)addr >= first_low &&
2204a855b84cSTejun Heo 	    (unsigned long)addr < first_high) {
22059983b6f0STejun Heo 		for_each_possible_cpu(cpu) {
22069983b6f0STejun Heo 			void *start = per_cpu_ptr(base, cpu);
22079983b6f0STejun Heo 
22089983b6f0STejun Heo 			if (addr >= start && addr < start + pcpu_unit_size) {
22099983b6f0STejun Heo 				in_first_chunk = true;
22109983b6f0STejun Heo 				break;
22119983b6f0STejun Heo 			}
22129983b6f0STejun Heo 		}
22139983b6f0STejun Heo 	}
22149983b6f0STejun Heo 
22159983b6f0STejun Heo 	if (in_first_chunk) {
2216eac522efSDavid Howells 		if (!is_vmalloc_addr(addr))
22173b034b0dSVivek Goyal 			return __pa(addr);
22183b034b0dSVivek Goyal 		else
22199f57bd4dSEugene Surovegin 			return page_to_phys(vmalloc_to_page(addr)) +
22209f57bd4dSEugene Surovegin 			       offset_in_page(addr);
2221020ec653STejun Heo 	} else
22229f57bd4dSEugene Surovegin 		return page_to_phys(pcpu_addr_to_page(addr)) +
22239f57bd4dSEugene Surovegin 		       offset_in_page(addr);
22243b034b0dSVivek Goyal }
22253b034b0dSVivek Goyal 
2226fbf59bc9STejun Heo /**
2227fd1e8a1fSTejun Heo  * pcpu_alloc_alloc_info - allocate percpu allocation info
2228fd1e8a1fSTejun Heo  * @nr_groups: the number of groups
2229fd1e8a1fSTejun Heo  * @nr_units: the number of units
2230033e48fbSTejun Heo  *
2231fd1e8a1fSTejun Heo  * Allocate ai which is large enough for @nr_groups groups containing
2232fd1e8a1fSTejun Heo  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
2233fd1e8a1fSTejun Heo  * cpu_map array which is long enough for @nr_units and filled with
2234fd1e8a1fSTejun Heo  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
2235fd1e8a1fSTejun Heo  * pointer of other groups.
2236033e48fbSTejun Heo  *
2237033e48fbSTejun Heo  * RETURNS:
2238fd1e8a1fSTejun Heo  * Pointer to the allocated pcpu_alloc_info on success, NULL on
2239fd1e8a1fSTejun Heo  * failure.
2240033e48fbSTejun Heo  */
2241fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2242fd1e8a1fSTejun Heo 						      int nr_units)
2243fd1e8a1fSTejun Heo {
2244fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
2245fd1e8a1fSTejun Heo 	size_t base_size, ai_size;
2246fd1e8a1fSTejun Heo 	void *ptr;
2247fd1e8a1fSTejun Heo 	int unit;
2248fd1e8a1fSTejun Heo 
224914d37612SGustavo A. R. Silva 	base_size = ALIGN(struct_size(ai, groups, nr_groups),
2250fd1e8a1fSTejun Heo 			  __alignof__(ai->groups[0].cpu_map[0]));
2251fd1e8a1fSTejun Heo 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2252fd1e8a1fSTejun Heo 
225326fb3daeSMike Rapoport 	ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2254fd1e8a1fSTejun Heo 	if (!ptr)
2255fd1e8a1fSTejun Heo 		return NULL;
2256fd1e8a1fSTejun Heo 	ai = ptr;
2257fd1e8a1fSTejun Heo 	ptr += base_size;
2258fd1e8a1fSTejun Heo 
2259fd1e8a1fSTejun Heo 	ai->groups[0].cpu_map = ptr;
2260fd1e8a1fSTejun Heo 
2261fd1e8a1fSTejun Heo 	for (unit = 0; unit < nr_units; unit++)
2262fd1e8a1fSTejun Heo 		ai->groups[0].cpu_map[unit] = NR_CPUS;
2263fd1e8a1fSTejun Heo 
2264fd1e8a1fSTejun Heo 	ai->nr_groups = nr_groups;
2265fd1e8a1fSTejun Heo 	ai->__ai_size = PFN_ALIGN(ai_size);
2266fd1e8a1fSTejun Heo 
2267fd1e8a1fSTejun Heo 	return ai;
2268fd1e8a1fSTejun Heo }
2269fd1e8a1fSTejun Heo 
2270fd1e8a1fSTejun Heo /**
2271fd1e8a1fSTejun Heo  * pcpu_free_alloc_info - free percpu allocation info
2272fd1e8a1fSTejun Heo  * @ai: pcpu_alloc_info to free
2273fd1e8a1fSTejun Heo  *
2274fd1e8a1fSTejun Heo  * Free @ai which was allocated by pcpu_alloc_alloc_info().
2275fd1e8a1fSTejun Heo  */
2276fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2277fd1e8a1fSTejun Heo {
2278999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(ai), ai->__ai_size);
2279fd1e8a1fSTejun Heo }
2280fd1e8a1fSTejun Heo 
2281fd1e8a1fSTejun Heo /**
2282fd1e8a1fSTejun Heo  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2283fd1e8a1fSTejun Heo  * @lvl: loglevel
2284fd1e8a1fSTejun Heo  * @ai: allocation info to dump
2285fd1e8a1fSTejun Heo  *
2286fd1e8a1fSTejun Heo  * Print out information about @ai using loglevel @lvl.
2287fd1e8a1fSTejun Heo  */
2288fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl,
2289fd1e8a1fSTejun Heo 				 const struct pcpu_alloc_info *ai)
2290033e48fbSTejun Heo {
2291fd1e8a1fSTejun Heo 	int group_width = 1, cpu_width = 1, width;
2292033e48fbSTejun Heo 	char empty_str[] = "--------";
2293fd1e8a1fSTejun Heo 	int alloc = 0, alloc_end = 0;
2294fd1e8a1fSTejun Heo 	int group, v;
2295fd1e8a1fSTejun Heo 	int upa, apl;	/* units per alloc, allocs per line */
2296033e48fbSTejun Heo 
2297fd1e8a1fSTejun Heo 	v = ai->nr_groups;
2298033e48fbSTejun Heo 	while (v /= 10)
2299fd1e8a1fSTejun Heo 		group_width++;
2300033e48fbSTejun Heo 
2301fd1e8a1fSTejun Heo 	v = num_possible_cpus();
2302fd1e8a1fSTejun Heo 	while (v /= 10)
2303fd1e8a1fSTejun Heo 		cpu_width++;
2304fd1e8a1fSTejun Heo 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2305033e48fbSTejun Heo 
2306fd1e8a1fSTejun Heo 	upa = ai->alloc_size / ai->unit_size;
2307fd1e8a1fSTejun Heo 	width = upa * (cpu_width + 1) + group_width + 3;
2308fd1e8a1fSTejun Heo 	apl = rounddown_pow_of_two(max(60 / width, 1));
2309033e48fbSTejun Heo 
2310fd1e8a1fSTejun Heo 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2311fd1e8a1fSTejun Heo 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2312fd1e8a1fSTejun Heo 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2313fd1e8a1fSTejun Heo 
2314fd1e8a1fSTejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
2315fd1e8a1fSTejun Heo 		const struct pcpu_group_info *gi = &ai->groups[group];
2316fd1e8a1fSTejun Heo 		int unit = 0, unit_end = 0;
2317fd1e8a1fSTejun Heo 
2318fd1e8a1fSTejun Heo 		BUG_ON(gi->nr_units % upa);
2319fd1e8a1fSTejun Heo 		for (alloc_end += gi->nr_units / upa;
2320fd1e8a1fSTejun Heo 		     alloc < alloc_end; alloc++) {
2321fd1e8a1fSTejun Heo 			if (!(alloc % apl)) {
23221170532bSJoe Perches 				pr_cont("\n");
2323fd1e8a1fSTejun Heo 				printk("%spcpu-alloc: ", lvl);
2324033e48fbSTejun Heo 			}
23251170532bSJoe Perches 			pr_cont("[%0*d] ", group_width, group);
2326fd1e8a1fSTejun Heo 
2327fd1e8a1fSTejun Heo 			for (unit_end += upa; unit < unit_end; unit++)
2328fd1e8a1fSTejun Heo 				if (gi->cpu_map[unit] != NR_CPUS)
23291170532bSJoe Perches 					pr_cont("%0*d ",
23301170532bSJoe Perches 						cpu_width, gi->cpu_map[unit]);
2331033e48fbSTejun Heo 				else
23321170532bSJoe Perches 					pr_cont("%s ", empty_str);
2333033e48fbSTejun Heo 		}
2334fd1e8a1fSTejun Heo 	}
23351170532bSJoe Perches 	pr_cont("\n");
2336033e48fbSTejun Heo }
2337033e48fbSTejun Heo 
2338fbf59bc9STejun Heo /**
23398d408b4bSTejun Heo  * pcpu_setup_first_chunk - initialize the first percpu chunk
2340fd1e8a1fSTejun Heo  * @ai: pcpu_alloc_info describing how to percpu area is shaped
234138a6be52STejun Heo  * @base_addr: mapped address
2342fbf59bc9STejun Heo  *
23438d408b4bSTejun Heo  * Initialize the first percpu chunk which contains the kernel static
234469ab285bSChristophe JAILLET  * percpu area.  This function is to be called from arch percpu area
234538a6be52STejun Heo  * setup path.
23468d408b4bSTejun Heo  *
2347fd1e8a1fSTejun Heo  * @ai contains all information necessary to initialize the first
2348fd1e8a1fSTejun Heo  * chunk and prime the dynamic percpu allocator.
23498d408b4bSTejun Heo  *
2350fd1e8a1fSTejun Heo  * @ai->static_size is the size of static percpu area.
2351fd1e8a1fSTejun Heo  *
2352fd1e8a1fSTejun Heo  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2353edcb4639STejun Heo  * reserve after the static area in the first chunk.  This reserves
2354edcb4639STejun Heo  * the first chunk such that it's available only through reserved
2355edcb4639STejun Heo  * percpu allocation.  This is primarily used to serve module percpu
2356edcb4639STejun Heo  * static areas on architectures where the addressing model has
2357edcb4639STejun Heo  * limited offset range for symbol relocations to guarantee module
2358edcb4639STejun Heo  * percpu symbols fall inside the relocatable range.
2359edcb4639STejun Heo  *
2360fd1e8a1fSTejun Heo  * @ai->dyn_size determines the number of bytes available for dynamic
2361fd1e8a1fSTejun Heo  * allocation in the first chunk.  The area between @ai->static_size +
2362fd1e8a1fSTejun Heo  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
23636074d5b0STejun Heo  *
2364fd1e8a1fSTejun Heo  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2365fd1e8a1fSTejun Heo  * and equal to or larger than @ai->static_size + @ai->reserved_size +
2366fd1e8a1fSTejun Heo  * @ai->dyn_size.
23678d408b4bSTejun Heo  *
2368fd1e8a1fSTejun Heo  * @ai->atom_size is the allocation atom size and used as alignment
2369fd1e8a1fSTejun Heo  * for vm areas.
23708d408b4bSTejun Heo  *
2371fd1e8a1fSTejun Heo  * @ai->alloc_size is the allocation size and always multiple of
2372fd1e8a1fSTejun Heo  * @ai->atom_size.  This is larger than @ai->atom_size if
2373fd1e8a1fSTejun Heo  * @ai->unit_size is larger than @ai->atom_size.
2374fd1e8a1fSTejun Heo  *
2375fd1e8a1fSTejun Heo  * @ai->nr_groups and @ai->groups describe virtual memory layout of
2376fd1e8a1fSTejun Heo  * percpu areas.  Units which should be colocated are put into the
2377fd1e8a1fSTejun Heo  * same group.  Dynamic VM areas will be allocated according to these
2378fd1e8a1fSTejun Heo  * groupings.  If @ai->nr_groups is zero, a single group containing
2379fd1e8a1fSTejun Heo  * all units is assumed.
23808d408b4bSTejun Heo  *
238138a6be52STejun Heo  * The caller should have mapped the first chunk at @base_addr and
238238a6be52STejun Heo  * copied static data to each unit.
2383fbf59bc9STejun Heo  *
2384c0ebfdc3SDennis Zhou (Facebook)  * The first chunk will always contain a static and a dynamic region.
2385c0ebfdc3SDennis Zhou (Facebook)  * However, the static region is not managed by any chunk.  If the first
2386c0ebfdc3SDennis Zhou (Facebook)  * chunk also contains a reserved region, it is served by two chunks -
2387c0ebfdc3SDennis Zhou (Facebook)  * one for the reserved region and one for the dynamic region.  They
2388c0ebfdc3SDennis Zhou (Facebook)  * share the same vm, but use offset regions in the area allocation map.
2389c0ebfdc3SDennis Zhou (Facebook)  * The chunk serving the dynamic region is circulated in the chunk slots
2390c0ebfdc3SDennis Zhou (Facebook)  * and available for dynamic allocation like any other chunk.
2391fbf59bc9STejun Heo  */
2392163fa234SKefeng Wang void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2393fd1e8a1fSTejun Heo 				   void *base_addr)
2394fbf59bc9STejun Heo {
2395b9c39442SDennis Zhou (Facebook) 	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2396d2f3c384SDennis Zhou (Facebook) 	size_t static_size, dyn_size;
23970c4169c3SDennis Zhou (Facebook) 	struct pcpu_chunk *chunk;
23986563297cSTejun Heo 	unsigned long *group_offsets;
23996563297cSTejun Heo 	size_t *group_sizes;
2400fb435d52STejun Heo 	unsigned long *unit_off;
2401fbf59bc9STejun Heo 	unsigned int cpu;
2402fd1e8a1fSTejun Heo 	int *unit_map;
2403fd1e8a1fSTejun Heo 	int group, unit, i;
2404c0ebfdc3SDennis Zhou (Facebook) 	int map_size;
2405c0ebfdc3SDennis Zhou (Facebook) 	unsigned long tmp_addr;
2406f655f405SMike Rapoport 	size_t alloc_size;
2407*3c7be18aSRoman Gushchin 	enum pcpu_chunk_type type;
2408fbf59bc9STejun Heo 
2409635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond)	do {					\
2410635b75fcSTejun Heo 	if (unlikely(cond)) {						\
2411870d4b12SJoe Perches 		pr_emerg("failed to initialize, %s\n", #cond);		\
2412870d4b12SJoe Perches 		pr_emerg("cpu_possible_mask=%*pb\n",			\
2413807de073STejun Heo 			 cpumask_pr_args(cpu_possible_mask));		\
2414635b75fcSTejun Heo 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
2415635b75fcSTejun Heo 		BUG();							\
2416635b75fcSTejun Heo 	}								\
2417635b75fcSTejun Heo } while (0)
2418635b75fcSTejun Heo 
24192f39e637STejun Heo 	/* sanity checks */
2420635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2421bbddff05STejun Heo #ifdef CONFIG_SMP
2422635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(!ai->static_size);
2423f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2424bbddff05STejun Heo #endif
2425635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(!base_addr);
2426f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2427635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2428f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2429635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2430ca460b3cSDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2431099a19d9STejun Heo 	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2432fb29a2ccSDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!ai->dyn_size);
2433d2f3c384SDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2434ca460b3cSDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2435ca460b3cSDennis Zhou (Facebook) 			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
24369f645532STejun Heo 	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
24378d408b4bSTejun Heo 
24386563297cSTejun Heo 	/* process group information and build config tables accordingly */
2439f655f405SMike Rapoport 	alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2440f655f405SMike Rapoport 	group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2441f655f405SMike Rapoport 	if (!group_offsets)
2442f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2443f655f405SMike Rapoport 		      alloc_size);
2444f655f405SMike Rapoport 
2445f655f405SMike Rapoport 	alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2446f655f405SMike Rapoport 	group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2447f655f405SMike Rapoport 	if (!group_sizes)
2448f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2449f655f405SMike Rapoport 		      alloc_size);
2450f655f405SMike Rapoport 
2451f655f405SMike Rapoport 	alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2452f655f405SMike Rapoport 	unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2453f655f405SMike Rapoport 	if (!unit_map)
2454f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2455f655f405SMike Rapoport 		      alloc_size);
2456f655f405SMike Rapoport 
2457f655f405SMike Rapoport 	alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2458f655f405SMike Rapoport 	unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2459f655f405SMike Rapoport 	if (!unit_off)
2460f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2461f655f405SMike Rapoport 		      alloc_size);
24622f39e637STejun Heo 
2463fd1e8a1fSTejun Heo 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2464ffe0d5a5STejun Heo 		unit_map[cpu] = UINT_MAX;
2465a855b84cSTejun Heo 
2466a855b84cSTejun Heo 	pcpu_low_unit_cpu = NR_CPUS;
2467a855b84cSTejun Heo 	pcpu_high_unit_cpu = NR_CPUS;
24682f39e637STejun Heo 
2469fd1e8a1fSTejun Heo 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2470fd1e8a1fSTejun Heo 		const struct pcpu_group_info *gi = &ai->groups[group];
24712f39e637STejun Heo 
24726563297cSTejun Heo 		group_offsets[group] = gi->base_offset;
24736563297cSTejun Heo 		group_sizes[group] = gi->nr_units * ai->unit_size;
24746563297cSTejun Heo 
2475fd1e8a1fSTejun Heo 		for (i = 0; i < gi->nr_units; i++) {
2476fd1e8a1fSTejun Heo 			cpu = gi->cpu_map[i];
2477fd1e8a1fSTejun Heo 			if (cpu == NR_CPUS)
2478fd1e8a1fSTejun Heo 				continue;
2479fd1e8a1fSTejun Heo 
24809f295664SDan Carpenter 			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2481635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2482635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2483fd1e8a1fSTejun Heo 
2484fd1e8a1fSTejun Heo 			unit_map[cpu] = unit + i;
2485fb435d52STejun Heo 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2486fb435d52STejun Heo 
2487a855b84cSTejun Heo 			/* determine low/high unit_cpu */
2488a855b84cSTejun Heo 			if (pcpu_low_unit_cpu == NR_CPUS ||
2489a855b84cSTejun Heo 			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2490a855b84cSTejun Heo 				pcpu_low_unit_cpu = cpu;
2491a855b84cSTejun Heo 			if (pcpu_high_unit_cpu == NR_CPUS ||
2492a855b84cSTejun Heo 			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2493a855b84cSTejun Heo 				pcpu_high_unit_cpu = cpu;
24940fc0531eSLinus Torvalds 		}
24950fc0531eSLinus Torvalds 	}
2496fd1e8a1fSTejun Heo 	pcpu_nr_units = unit;
24972f39e637STejun Heo 
24982f39e637STejun Heo 	for_each_possible_cpu(cpu)
2499635b75fcSTejun Heo 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2500635b75fcSTejun Heo 
2501635b75fcSTejun Heo 	/* we're done parsing the input, undefine BUG macro and dump config */
2502635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON
2503bcbea798STejun Heo 	pcpu_dump_alloc_info(KERN_DEBUG, ai);
25042f39e637STejun Heo 
25056563297cSTejun Heo 	pcpu_nr_groups = ai->nr_groups;
25066563297cSTejun Heo 	pcpu_group_offsets = group_offsets;
25076563297cSTejun Heo 	pcpu_group_sizes = group_sizes;
2508fd1e8a1fSTejun Heo 	pcpu_unit_map = unit_map;
2509fb435d52STejun Heo 	pcpu_unit_offsets = unit_off;
25102f39e637STejun Heo 
25112f39e637STejun Heo 	/* determine basic parameters */
2512fd1e8a1fSTejun Heo 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2513d9b55eebSTejun Heo 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
25146563297cSTejun Heo 	pcpu_atom_size = ai->atom_size;
2515ce3141a2STejun Heo 	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
2516ce3141a2STejun Heo 		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
2517cafe8816STejun Heo 
251830a5b536SDennis Zhou 	pcpu_stats_save_ai(ai);
251930a5b536SDennis Zhou 
2520d9b55eebSTejun Heo 	/*
2521d9b55eebSTejun Heo 	 * Allocate chunk slots.  The additional last slot is for
2522d9b55eebSTejun Heo 	 * empty chunks.
2523d9b55eebSTejun Heo 	 */
2524d9b55eebSTejun Heo 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2525*3c7be18aSRoman Gushchin 	pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2526*3c7be18aSRoman Gushchin 					  sizeof(pcpu_chunk_lists[0]) *
2527*3c7be18aSRoman Gushchin 					  PCPU_NR_CHUNK_TYPES,
25287e1c4e27SMike Rapoport 					  SMP_CACHE_BYTES);
2529*3c7be18aSRoman Gushchin 	if (!pcpu_chunk_lists)
2530f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2531*3c7be18aSRoman Gushchin 		      pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) *
2532*3c7be18aSRoman Gushchin 		      PCPU_NR_CHUNK_TYPES);
2533*3c7be18aSRoman Gushchin 
2534*3c7be18aSRoman Gushchin 	for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
2535fbf59bc9STejun Heo 		for (i = 0; i < pcpu_nr_slots; i++)
2536*3c7be18aSRoman Gushchin 			INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]);
2537fbf59bc9STejun Heo 
2538edcb4639STejun Heo 	/*
2539d2f3c384SDennis Zhou (Facebook) 	 * The end of the static region needs to be aligned with the
2540d2f3c384SDennis Zhou (Facebook) 	 * minimum allocation size as this offsets the reserved and
2541d2f3c384SDennis Zhou (Facebook) 	 * dynamic region.  The first chunk ends page aligned by
2542d2f3c384SDennis Zhou (Facebook) 	 * expanding the dynamic region, therefore the dynamic region
2543d2f3c384SDennis Zhou (Facebook) 	 * can be shrunk to compensate while still staying above the
2544d2f3c384SDennis Zhou (Facebook) 	 * configured sizes.
2545d2f3c384SDennis Zhou (Facebook) 	 */
2546d2f3c384SDennis Zhou (Facebook) 	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2547d2f3c384SDennis Zhou (Facebook) 	dyn_size = ai->dyn_size - (static_size - ai->static_size);
2548d2f3c384SDennis Zhou (Facebook) 
2549d2f3c384SDennis Zhou (Facebook) 	/*
2550c0ebfdc3SDennis Zhou (Facebook) 	 * Initialize first chunk.
2551c0ebfdc3SDennis Zhou (Facebook) 	 * If the reserved_size is non-zero, this initializes the reserved
2552c0ebfdc3SDennis Zhou (Facebook) 	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
2553c0ebfdc3SDennis Zhou (Facebook) 	 * and the dynamic region is initialized here.  The first chunk,
2554c0ebfdc3SDennis Zhou (Facebook) 	 * pcpu_first_chunk, will always point to the chunk that serves
2555c0ebfdc3SDennis Zhou (Facebook) 	 * the dynamic region.
2556edcb4639STejun Heo 	 */
2557d2f3c384SDennis Zhou (Facebook) 	tmp_addr = (unsigned long)base_addr + static_size;
2558d2f3c384SDennis Zhou (Facebook) 	map_size = ai->reserved_size ?: dyn_size;
255940064aecSDennis Zhou (Facebook) 	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
256061ace7faSTejun Heo 
2561edcb4639STejun Heo 	/* init dynamic chunk if necessary */
2562b9c39442SDennis Zhou (Facebook) 	if (ai->reserved_size) {
25630c4169c3SDennis Zhou (Facebook) 		pcpu_reserved_chunk = chunk;
2564b9c39442SDennis Zhou (Facebook) 
2565d2f3c384SDennis Zhou (Facebook) 		tmp_addr = (unsigned long)base_addr + static_size +
2566c0ebfdc3SDennis Zhou (Facebook) 			   ai->reserved_size;
2567d2f3c384SDennis Zhou (Facebook) 		map_size = dyn_size;
256840064aecSDennis Zhou (Facebook) 		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2569edcb4639STejun Heo 	}
2570edcb4639STejun Heo 
25712441d15cSTejun Heo 	/* link the first chunk in */
25720c4169c3SDennis Zhou (Facebook) 	pcpu_first_chunk = chunk;
25730cecf50cSDennis Zhou (Facebook) 	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2574ae9e6bc9STejun Heo 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
2575fbf59bc9STejun Heo 
25767e8a6304SDennis Zhou (Facebook) 	/* include all regions of the first chunk */
25777e8a6304SDennis Zhou (Facebook) 	pcpu_nr_populated += PFN_DOWN(size_sum);
25787e8a6304SDennis Zhou (Facebook) 
257930a5b536SDennis Zhou 	pcpu_stats_chunk_alloc();
2580df95e795SDennis Zhou 	trace_percpu_create_chunk(base_addr);
258130a5b536SDennis Zhou 
2582fbf59bc9STejun Heo 	/* we're done */
2583bba174f5STejun Heo 	pcpu_base_addr = base_addr;
2584fbf59bc9STejun Heo }
258566c3a757STejun Heo 
2586bbddff05STejun Heo #ifdef CONFIG_SMP
2587bbddff05STejun Heo 
258817f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2589f58dc01bSTejun Heo 	[PCPU_FC_AUTO]	= "auto",
2590f58dc01bSTejun Heo 	[PCPU_FC_EMBED]	= "embed",
2591f58dc01bSTejun Heo 	[PCPU_FC_PAGE]	= "page",
2592f58dc01bSTejun Heo };
259366c3a757STejun Heo 
2594f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2595f58dc01bSTejun Heo 
2596f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str)
259766c3a757STejun Heo {
25985479c78aSCyrill Gorcunov 	if (!str)
25995479c78aSCyrill Gorcunov 		return -EINVAL;
26005479c78aSCyrill Gorcunov 
2601f58dc01bSTejun Heo 	if (0)
2602f58dc01bSTejun Heo 		/* nada */;
2603f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2604f58dc01bSTejun Heo 	else if (!strcmp(str, "embed"))
2605f58dc01bSTejun Heo 		pcpu_chosen_fc = PCPU_FC_EMBED;
2606f58dc01bSTejun Heo #endif
2607f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2608f58dc01bSTejun Heo 	else if (!strcmp(str, "page"))
2609f58dc01bSTejun Heo 		pcpu_chosen_fc = PCPU_FC_PAGE;
2610f58dc01bSTejun Heo #endif
2611f58dc01bSTejun Heo 	else
2612870d4b12SJoe Perches 		pr_warn("unknown allocator %s specified\n", str);
261366c3a757STejun Heo 
2614f58dc01bSTejun Heo 	return 0;
261566c3a757STejun Heo }
2616f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup);
261766c3a757STejun Heo 
26183c9a024fSTejun Heo /*
26193c9a024fSTejun Heo  * pcpu_embed_first_chunk() is used by the generic percpu setup.
26203c9a024fSTejun Heo  * Build it if needed by the arch config or the generic setup is going
26213c9a024fSTejun Heo  * to be used.
26223c9a024fSTejun Heo  */
262308fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
262408fc4580STejun Heo 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
26253c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK
26263c9a024fSTejun Heo #endif
26273c9a024fSTejun Heo 
26283c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */
26293c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
26303c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK
26313c9a024fSTejun Heo #endif
26323c9a024fSTejun Heo 
26333c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */
26343c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
26353c9a024fSTejun Heo /**
2636fbf59bc9STejun Heo  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2637fbf59bc9STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
2638fbf59bc9STejun Heo  * @dyn_size: minimum free size for dynamic allocation in bytes
2639fbf59bc9STejun Heo  * @atom_size: allocation atom size
2640fbf59bc9STejun Heo  * @cpu_distance_fn: callback to determine distance between cpus, optional
2641fbf59bc9STejun Heo  *
2642fbf59bc9STejun Heo  * This function determines grouping of units, their mappings to cpus
2643fbf59bc9STejun Heo  * and other parameters considering needed percpu size, allocation
2644fbf59bc9STejun Heo  * atom size and distances between CPUs.
2645fbf59bc9STejun Heo  *
2646bffc4375SYannick Guerrini  * Groups are always multiples of atom size and CPUs which are of
2647fbf59bc9STejun Heo  * LOCAL_DISTANCE both ways are grouped together and share space for
2648fbf59bc9STejun Heo  * units in the same group.  The returned configuration is guaranteed
2649fbf59bc9STejun Heo  * to have CPUs on different nodes on different groups and >=75% usage
2650fbf59bc9STejun Heo  * of allocated virtual address space.
2651fbf59bc9STejun Heo  *
2652fbf59bc9STejun Heo  * RETURNS:
2653fbf59bc9STejun Heo  * On success, pointer to the new allocation_info is returned.  On
2654fbf59bc9STejun Heo  * failure, ERR_PTR value is returned.
2655fbf59bc9STejun Heo  */
2656fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2657fbf59bc9STejun Heo 				size_t reserved_size, size_t dyn_size,
2658fbf59bc9STejun Heo 				size_t atom_size,
2659fbf59bc9STejun Heo 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2660fbf59bc9STejun Heo {
2661fbf59bc9STejun Heo 	static int group_map[NR_CPUS] __initdata;
2662fbf59bc9STejun Heo 	static int group_cnt[NR_CPUS] __initdata;
2663fbf59bc9STejun Heo 	const size_t static_size = __per_cpu_end - __per_cpu_start;
2664fbf59bc9STejun Heo 	int nr_groups = 1, nr_units = 0;
2665fbf59bc9STejun Heo 	size_t size_sum, min_unit_size, alloc_size;
26663f649ab7SKees Cook 	int upa, max_upa, best_upa;	/* units_per_alloc */
2667fbf59bc9STejun Heo 	int last_allocs, group, unit;
2668fbf59bc9STejun Heo 	unsigned int cpu, tcpu;
2669fbf59bc9STejun Heo 	struct pcpu_alloc_info *ai;
2670fbf59bc9STejun Heo 	unsigned int *cpu_map;
2671fbf59bc9STejun Heo 
2672fbf59bc9STejun Heo 	/* this function may be called multiple times */
2673fbf59bc9STejun Heo 	memset(group_map, 0, sizeof(group_map));
2674fbf59bc9STejun Heo 	memset(group_cnt, 0, sizeof(group_cnt));
2675fbf59bc9STejun Heo 
2676fbf59bc9STejun Heo 	/* calculate size_sum and ensure dyn_size is enough for early alloc */
2677fbf59bc9STejun Heo 	size_sum = PFN_ALIGN(static_size + reserved_size +
2678fbf59bc9STejun Heo 			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2679fbf59bc9STejun Heo 	dyn_size = size_sum - static_size - reserved_size;
2680fbf59bc9STejun Heo 
2681fbf59bc9STejun Heo 	/*
2682fbf59bc9STejun Heo 	 * Determine min_unit_size, alloc_size and max_upa such that
2683fbf59bc9STejun Heo 	 * alloc_size is multiple of atom_size and is the smallest
268425985edcSLucas De Marchi 	 * which can accommodate 4k aligned segments which are equal to
2685fbf59bc9STejun Heo 	 * or larger than min_unit_size.
2686fbf59bc9STejun Heo 	 */
2687fbf59bc9STejun Heo 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2688fbf59bc9STejun Heo 
26899c015162SDennis Zhou (Facebook) 	/* determine the maximum # of units that can fit in an allocation */
2690fbf59bc9STejun Heo 	alloc_size = roundup(min_unit_size, atom_size);
2691fbf59bc9STejun Heo 	upa = alloc_size / min_unit_size;
2692f09f1243SAlexander Kuleshov 	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2693fbf59bc9STejun Heo 		upa--;
2694fbf59bc9STejun Heo 	max_upa = upa;
2695fbf59bc9STejun Heo 
2696fbf59bc9STejun Heo 	/* group cpus according to their proximity */
2697fbf59bc9STejun Heo 	for_each_possible_cpu(cpu) {
2698fbf59bc9STejun Heo 		group = 0;
2699fbf59bc9STejun Heo 	next_group:
2700fbf59bc9STejun Heo 		for_each_possible_cpu(tcpu) {
2701fbf59bc9STejun Heo 			if (cpu == tcpu)
2702fbf59bc9STejun Heo 				break;
2703fbf59bc9STejun Heo 			if (group_map[tcpu] == group && cpu_distance_fn &&
2704fbf59bc9STejun Heo 			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
2705fbf59bc9STejun Heo 			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
2706fbf59bc9STejun Heo 				group++;
2707fbf59bc9STejun Heo 				nr_groups = max(nr_groups, group + 1);
2708fbf59bc9STejun Heo 				goto next_group;
2709fbf59bc9STejun Heo 			}
2710fbf59bc9STejun Heo 		}
2711fbf59bc9STejun Heo 		group_map[cpu] = group;
2712fbf59bc9STejun Heo 		group_cnt[group]++;
2713fbf59bc9STejun Heo 	}
2714fbf59bc9STejun Heo 
2715fbf59bc9STejun Heo 	/*
27169c015162SDennis Zhou (Facebook) 	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
27179c015162SDennis Zhou (Facebook) 	 * Expand the unit_size until we use >= 75% of the units allocated.
27189c015162SDennis Zhou (Facebook) 	 * Related to atom_size, which could be much larger than the unit_size.
2719fbf59bc9STejun Heo 	 */
2720fbf59bc9STejun Heo 	last_allocs = INT_MAX;
2721fbf59bc9STejun Heo 	for (upa = max_upa; upa; upa--) {
2722fbf59bc9STejun Heo 		int allocs = 0, wasted = 0;
2723fbf59bc9STejun Heo 
2724f09f1243SAlexander Kuleshov 		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2725fbf59bc9STejun Heo 			continue;
2726fbf59bc9STejun Heo 
2727fbf59bc9STejun Heo 		for (group = 0; group < nr_groups; group++) {
2728fbf59bc9STejun Heo 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2729fbf59bc9STejun Heo 			allocs += this_allocs;
2730fbf59bc9STejun Heo 			wasted += this_allocs * upa - group_cnt[group];
2731fbf59bc9STejun Heo 		}
2732fbf59bc9STejun Heo 
2733fbf59bc9STejun Heo 		/*
2734fbf59bc9STejun Heo 		 * Don't accept if wastage is over 1/3.  The
2735fbf59bc9STejun Heo 		 * greater-than comparison ensures upa==1 always
2736fbf59bc9STejun Heo 		 * passes the following check.
2737fbf59bc9STejun Heo 		 */
2738fbf59bc9STejun Heo 		if (wasted > num_possible_cpus() / 3)
2739fbf59bc9STejun Heo 			continue;
2740fbf59bc9STejun Heo 
2741fbf59bc9STejun Heo 		/* and then don't consume more memory */
2742fbf59bc9STejun Heo 		if (allocs > last_allocs)
2743fbf59bc9STejun Heo 			break;
2744fbf59bc9STejun Heo 		last_allocs = allocs;
2745fbf59bc9STejun Heo 		best_upa = upa;
2746fbf59bc9STejun Heo 	}
2747fbf59bc9STejun Heo 	upa = best_upa;
2748fbf59bc9STejun Heo 
2749fbf59bc9STejun Heo 	/* allocate and fill alloc_info */
2750fbf59bc9STejun Heo 	for (group = 0; group < nr_groups; group++)
2751fbf59bc9STejun Heo 		nr_units += roundup(group_cnt[group], upa);
2752fbf59bc9STejun Heo 
2753fbf59bc9STejun Heo 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2754fbf59bc9STejun Heo 	if (!ai)
2755fbf59bc9STejun Heo 		return ERR_PTR(-ENOMEM);
2756fbf59bc9STejun Heo 	cpu_map = ai->groups[0].cpu_map;
2757fbf59bc9STejun Heo 
2758fbf59bc9STejun Heo 	for (group = 0; group < nr_groups; group++) {
2759fbf59bc9STejun Heo 		ai->groups[group].cpu_map = cpu_map;
2760fbf59bc9STejun Heo 		cpu_map += roundup(group_cnt[group], upa);
2761fbf59bc9STejun Heo 	}
2762fbf59bc9STejun Heo 
2763fbf59bc9STejun Heo 	ai->static_size = static_size;
2764fbf59bc9STejun Heo 	ai->reserved_size = reserved_size;
2765fbf59bc9STejun Heo 	ai->dyn_size = dyn_size;
2766fbf59bc9STejun Heo 	ai->unit_size = alloc_size / upa;
2767fbf59bc9STejun Heo 	ai->atom_size = atom_size;
2768fbf59bc9STejun Heo 	ai->alloc_size = alloc_size;
2769fbf59bc9STejun Heo 
27702de7852fSPeng Fan 	for (group = 0, unit = 0; group < nr_groups; group++) {
2771fbf59bc9STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
2772fbf59bc9STejun Heo 
2773fbf59bc9STejun Heo 		/*
2774fbf59bc9STejun Heo 		 * Initialize base_offset as if all groups are located
2775fbf59bc9STejun Heo 		 * back-to-back.  The caller should update this to
2776fbf59bc9STejun Heo 		 * reflect actual allocation.
2777fbf59bc9STejun Heo 		 */
2778fbf59bc9STejun Heo 		gi->base_offset = unit * ai->unit_size;
2779fbf59bc9STejun Heo 
2780fbf59bc9STejun Heo 		for_each_possible_cpu(cpu)
2781fbf59bc9STejun Heo 			if (group_map[cpu] == group)
2782fbf59bc9STejun Heo 				gi->cpu_map[gi->nr_units++] = cpu;
2783fbf59bc9STejun Heo 		gi->nr_units = roundup(gi->nr_units, upa);
2784fbf59bc9STejun Heo 		unit += gi->nr_units;
2785fbf59bc9STejun Heo 	}
2786fbf59bc9STejun Heo 	BUG_ON(unit != nr_units);
2787fbf59bc9STejun Heo 
2788fbf59bc9STejun Heo 	return ai;
2789fbf59bc9STejun Heo }
27903c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2791fbf59bc9STejun Heo 
27923c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK)
279366c3a757STejun Heo /**
279466c3a757STejun Heo  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
279566c3a757STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
27964ba6ce25STejun Heo  * @dyn_size: minimum free size for dynamic allocation in bytes
2797c8826dd5STejun Heo  * @atom_size: allocation atom size
2798c8826dd5STejun Heo  * @cpu_distance_fn: callback to determine distance between cpus, optional
2799c8826dd5STejun Heo  * @alloc_fn: function to allocate percpu page
280025985edcSLucas De Marchi  * @free_fn: function to free percpu page
280166c3a757STejun Heo  *
280266c3a757STejun Heo  * This is a helper to ease setting up embedded first percpu chunk and
280366c3a757STejun Heo  * can be called where pcpu_setup_first_chunk() is expected.
280466c3a757STejun Heo  *
280566c3a757STejun Heo  * If this function is used to setup the first chunk, it is allocated
2806c8826dd5STejun Heo  * by calling @alloc_fn and used as-is without being mapped into
2807c8826dd5STejun Heo  * vmalloc area.  Allocations are always whole multiples of @atom_size
2808c8826dd5STejun Heo  * aligned to @atom_size.
2809c8826dd5STejun Heo  *
2810c8826dd5STejun Heo  * This enables the first chunk to piggy back on the linear physical
2811c8826dd5STejun Heo  * mapping which often uses larger page size.  Please note that this
2812c8826dd5STejun Heo  * can result in very sparse cpu->unit mapping on NUMA machines thus
2813c8826dd5STejun Heo  * requiring large vmalloc address space.  Don't use this allocator if
2814c8826dd5STejun Heo  * vmalloc space is not orders of magnitude larger than distances
2815c8826dd5STejun Heo  * between node memory addresses (ie. 32bit NUMA machines).
281666c3a757STejun Heo  *
28174ba6ce25STejun Heo  * @dyn_size specifies the minimum dynamic area size.
281866c3a757STejun Heo  *
281966c3a757STejun Heo  * If the needed size is smaller than the minimum or specified unit
2820c8826dd5STejun Heo  * size, the leftover is returned using @free_fn.
282166c3a757STejun Heo  *
282266c3a757STejun Heo  * RETURNS:
2823fb435d52STejun Heo  * 0 on success, -errno on failure.
282466c3a757STejun Heo  */
28254ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2826c8826dd5STejun Heo 				  size_t atom_size,
2827c8826dd5STejun Heo 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2828c8826dd5STejun Heo 				  pcpu_fc_alloc_fn_t alloc_fn,
2829c8826dd5STejun Heo 				  pcpu_fc_free_fn_t free_fn)
283066c3a757STejun Heo {
2831c8826dd5STejun Heo 	void *base = (void *)ULONG_MAX;
2832c8826dd5STejun Heo 	void **areas = NULL;
2833fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
283493c76b6bSzijun_hu 	size_t size_sum, areas_size;
283593c76b6bSzijun_hu 	unsigned long max_distance;
2836163fa234SKefeng Wang 	int group, i, highest_group, rc = 0;
283766c3a757STejun Heo 
2838c8826dd5STejun Heo 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2839c8826dd5STejun Heo 				   cpu_distance_fn);
2840fd1e8a1fSTejun Heo 	if (IS_ERR(ai))
2841fd1e8a1fSTejun Heo 		return PTR_ERR(ai);
284266c3a757STejun Heo 
2843fd1e8a1fSTejun Heo 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2844c8826dd5STejun Heo 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
284566c3a757STejun Heo 
284626fb3daeSMike Rapoport 	areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
2847c8826dd5STejun Heo 	if (!areas) {
2848fb435d52STejun Heo 		rc = -ENOMEM;
2849c8826dd5STejun Heo 		goto out_free;
2850fa8a7094STejun Heo 	}
285166c3a757STejun Heo 
28529b739662Szijun_hu 	/* allocate, copy and determine base address & max_distance */
28539b739662Szijun_hu 	highest_group = 0;
2854c8826dd5STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
2855c8826dd5STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
2856c8826dd5STejun Heo 		unsigned int cpu = NR_CPUS;
2857c8826dd5STejun Heo 		void *ptr;
285866c3a757STejun Heo 
2859c8826dd5STejun Heo 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2860c8826dd5STejun Heo 			cpu = gi->cpu_map[i];
2861c8826dd5STejun Heo 		BUG_ON(cpu == NR_CPUS);
2862c8826dd5STejun Heo 
2863c8826dd5STejun Heo 		/* allocate space for the whole group */
2864c8826dd5STejun Heo 		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2865c8826dd5STejun Heo 		if (!ptr) {
2866c8826dd5STejun Heo 			rc = -ENOMEM;
2867c8826dd5STejun Heo 			goto out_free_areas;
2868c8826dd5STejun Heo 		}
2869f528f0b8SCatalin Marinas 		/* kmemleak tracks the percpu allocations separately */
2870f528f0b8SCatalin Marinas 		kmemleak_free(ptr);
2871c8826dd5STejun Heo 		areas[group] = ptr;
2872c8826dd5STejun Heo 
2873c8826dd5STejun Heo 		base = min(ptr, base);
28749b739662Szijun_hu 		if (ptr > areas[highest_group])
28759b739662Szijun_hu 			highest_group = group;
28769b739662Szijun_hu 	}
28779b739662Szijun_hu 	max_distance = areas[highest_group] - base;
28789b739662Szijun_hu 	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
28799b739662Szijun_hu 
28809b739662Szijun_hu 	/* warn if maximum distance is further than 75% of vmalloc space */
28819b739662Szijun_hu 	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
28829b739662Szijun_hu 		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
28839b739662Szijun_hu 				max_distance, VMALLOC_TOTAL);
28849b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
28859b739662Szijun_hu 		/* and fail if we have fallback */
28869b739662Szijun_hu 		rc = -EINVAL;
28879b739662Szijun_hu 		goto out_free_areas;
28889b739662Szijun_hu #endif
288942b64281STejun Heo 	}
289042b64281STejun Heo 
289142b64281STejun Heo 	/*
289242b64281STejun Heo 	 * Copy data and free unused parts.  This should happen after all
289342b64281STejun Heo 	 * allocations are complete; otherwise, we may end up with
289442b64281STejun Heo 	 * overlapping groups.
289542b64281STejun Heo 	 */
289642b64281STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
289742b64281STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
289842b64281STejun Heo 		void *ptr = areas[group];
2899c8826dd5STejun Heo 
2900c8826dd5STejun Heo 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2901c8826dd5STejun Heo 			if (gi->cpu_map[i] == NR_CPUS) {
2902c8826dd5STejun Heo 				/* unused unit, free whole */
2903c8826dd5STejun Heo 				free_fn(ptr, ai->unit_size);
2904c8826dd5STejun Heo 				continue;
2905c8826dd5STejun Heo 			}
2906c8826dd5STejun Heo 			/* copy and return the unused part */
2907fd1e8a1fSTejun Heo 			memcpy(ptr, __per_cpu_load, ai->static_size);
2908c8826dd5STejun Heo 			free_fn(ptr + size_sum, ai->unit_size - size_sum);
2909c8826dd5STejun Heo 		}
291066c3a757STejun Heo 	}
291166c3a757STejun Heo 
2912c8826dd5STejun Heo 	/* base address is now known, determine group base offsets */
29136ea529a2STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
2914c8826dd5STejun Heo 		ai->groups[group].base_offset = areas[group] - base;
29156ea529a2STejun Heo 	}
2916c8826dd5STejun Heo 
291700206a69SMatteo Croce 	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
291800206a69SMatteo Croce 		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2919fd1e8a1fSTejun Heo 		ai->dyn_size, ai->unit_size);
292066c3a757STejun Heo 
2921163fa234SKefeng Wang 	pcpu_setup_first_chunk(ai, base);
2922c8826dd5STejun Heo 	goto out_free;
2923c8826dd5STejun Heo 
2924c8826dd5STejun Heo out_free_areas:
2925c8826dd5STejun Heo 	for (group = 0; group < ai->nr_groups; group++)
2926f851c8d8SMichael Holzheu 		if (areas[group])
2927c8826dd5STejun Heo 			free_fn(areas[group],
2928c8826dd5STejun Heo 				ai->groups[group].nr_units * ai->unit_size);
2929c8826dd5STejun Heo out_free:
2930fd1e8a1fSTejun Heo 	pcpu_free_alloc_info(ai);
2931c8826dd5STejun Heo 	if (areas)
2932999c17e3SSantosh Shilimkar 		memblock_free_early(__pa(areas), areas_size);
2933fb435d52STejun Heo 	return rc;
2934d4b95f80STejun Heo }
29353c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */
2936d4b95f80STejun Heo 
29373c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK
2938d4b95f80STejun Heo /**
293900ae4064STejun Heo  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2940d4b95f80STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
2941d4b95f80STejun Heo  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
294225985edcSLucas De Marchi  * @free_fn: function to free percpu page, always called with PAGE_SIZE
2943d4b95f80STejun Heo  * @populate_pte_fn: function to populate pte
2944d4b95f80STejun Heo  *
294500ae4064STejun Heo  * This is a helper to ease setting up page-remapped first percpu
294600ae4064STejun Heo  * chunk and can be called where pcpu_setup_first_chunk() is expected.
2947d4b95f80STejun Heo  *
2948d4b95f80STejun Heo  * This is the basic allocator.  Static percpu area is allocated
2949d4b95f80STejun Heo  * page-by-page into vmalloc area.
2950d4b95f80STejun Heo  *
2951d4b95f80STejun Heo  * RETURNS:
2952fb435d52STejun Heo  * 0 on success, -errno on failure.
2953d4b95f80STejun Heo  */
2954fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size,
2955d4b95f80STejun Heo 				 pcpu_fc_alloc_fn_t alloc_fn,
2956d4b95f80STejun Heo 				 pcpu_fc_free_fn_t free_fn,
2957d4b95f80STejun Heo 				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2958d4b95f80STejun Heo {
29598f05a6a6STejun Heo 	static struct vm_struct vm;
2960fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
296100ae4064STejun Heo 	char psize_str[16];
2962ce3141a2STejun Heo 	int unit_pages;
2963d4b95f80STejun Heo 	size_t pages_size;
2964ce3141a2STejun Heo 	struct page **pages;
2965163fa234SKefeng Wang 	int unit, i, j, rc = 0;
29668f606604Szijun_hu 	int upa;
29678f606604Szijun_hu 	int nr_g0_units;
2968d4b95f80STejun Heo 
296900ae4064STejun Heo 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
297000ae4064STejun Heo 
29714ba6ce25STejun Heo 	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2972fd1e8a1fSTejun Heo 	if (IS_ERR(ai))
2973fd1e8a1fSTejun Heo 		return PTR_ERR(ai);
2974fd1e8a1fSTejun Heo 	BUG_ON(ai->nr_groups != 1);
29758f606604Szijun_hu 	upa = ai->alloc_size/ai->unit_size;
29768f606604Szijun_hu 	nr_g0_units = roundup(num_possible_cpus(), upa);
29770b59c25fSIgor Stoppa 	if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
29788f606604Szijun_hu 		pcpu_free_alloc_info(ai);
29798f606604Szijun_hu 		return -EINVAL;
29808f606604Szijun_hu 	}
2981fd1e8a1fSTejun Heo 
2982fd1e8a1fSTejun Heo 	unit_pages = ai->unit_size >> PAGE_SHIFT;
2983d4b95f80STejun Heo 
2984d4b95f80STejun Heo 	/* unaligned allocations can't be freed, round up to page size */
2985fd1e8a1fSTejun Heo 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2986fd1e8a1fSTejun Heo 			       sizeof(pages[0]));
29877e1c4e27SMike Rapoport 	pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
2988f655f405SMike Rapoport 	if (!pages)
2989f655f405SMike Rapoport 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2990f655f405SMike Rapoport 		      pages_size);
2991d4b95f80STejun Heo 
29928f05a6a6STejun Heo 	/* allocate pages */
2993d4b95f80STejun Heo 	j = 0;
29948f606604Szijun_hu 	for (unit = 0; unit < num_possible_cpus(); unit++) {
2995fd1e8a1fSTejun Heo 		unsigned int cpu = ai->groups[0].cpu_map[unit];
29968f606604Szijun_hu 		for (i = 0; i < unit_pages; i++) {
2997d4b95f80STejun Heo 			void *ptr;
2998d4b95f80STejun Heo 
29993cbc8565STejun Heo 			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
3000d4b95f80STejun Heo 			if (!ptr) {
3001870d4b12SJoe Perches 				pr_warn("failed to allocate %s page for cpu%u\n",
3002598d8091SJoe Perches 						psize_str, cpu);
3003d4b95f80STejun Heo 				goto enomem;
3004d4b95f80STejun Heo 			}
3005f528f0b8SCatalin Marinas 			/* kmemleak tracks the percpu allocations separately */
3006f528f0b8SCatalin Marinas 			kmemleak_free(ptr);
3007ce3141a2STejun Heo 			pages[j++] = virt_to_page(ptr);
3008d4b95f80STejun Heo 		}
30098f606604Szijun_hu 	}
3010d4b95f80STejun Heo 
30118f05a6a6STejun Heo 	/* allocate vm area, map the pages and copy static data */
30128f05a6a6STejun Heo 	vm.flags = VM_ALLOC;
3013fd1e8a1fSTejun Heo 	vm.size = num_possible_cpus() * ai->unit_size;
30148f05a6a6STejun Heo 	vm_area_register_early(&vm, PAGE_SIZE);
30158f05a6a6STejun Heo 
3016fd1e8a1fSTejun Heo 	for (unit = 0; unit < num_possible_cpus(); unit++) {
30171d9d3257STejun Heo 		unsigned long unit_addr =
3018fd1e8a1fSTejun Heo 			(unsigned long)vm.addr + unit * ai->unit_size;
30198f05a6a6STejun Heo 
3020ce3141a2STejun Heo 		for (i = 0; i < unit_pages; i++)
30218f05a6a6STejun Heo 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
30228f05a6a6STejun Heo 
30238f05a6a6STejun Heo 		/* pte already populated, the following shouldn't fail */
3024fb435d52STejun Heo 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3025ce3141a2STejun Heo 				      unit_pages);
3026fb435d52STejun Heo 		if (rc < 0)
3027fb435d52STejun Heo 			panic("failed to map percpu area, err=%d\n", rc);
30288f05a6a6STejun Heo 
30298f05a6a6STejun Heo 		/*
30308f05a6a6STejun Heo 		 * FIXME: Archs with virtual cache should flush local
30318f05a6a6STejun Heo 		 * cache for the linear mapping here - something
30328f05a6a6STejun Heo 		 * equivalent to flush_cache_vmap() on the local cpu.
30338f05a6a6STejun Heo 		 * flush_cache_vmap() can't be used as most supporting
30348f05a6a6STejun Heo 		 * data structures are not set up yet.
30358f05a6a6STejun Heo 		 */
30368f05a6a6STejun Heo 
30378f05a6a6STejun Heo 		/* copy static data */
3038fd1e8a1fSTejun Heo 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
303966c3a757STejun Heo 	}
304066c3a757STejun Heo 
304166c3a757STejun Heo 	/* we're ready, commit */
304200206a69SMatteo Croce 	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
304300206a69SMatteo Croce 		unit_pages, psize_str, ai->static_size,
3044fd1e8a1fSTejun Heo 		ai->reserved_size, ai->dyn_size);
304566c3a757STejun Heo 
3046163fa234SKefeng Wang 	pcpu_setup_first_chunk(ai, vm.addr);
3047d4b95f80STejun Heo 	goto out_free_ar;
3048d4b95f80STejun Heo 
3049d4b95f80STejun Heo enomem:
3050d4b95f80STejun Heo 	while (--j >= 0)
3051ce3141a2STejun Heo 		free_fn(page_address(pages[j]), PAGE_SIZE);
3052fb435d52STejun Heo 	rc = -ENOMEM;
3053d4b95f80STejun Heo out_free_ar:
3054999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(pages), pages_size);
3055fd1e8a1fSTejun Heo 	pcpu_free_alloc_info(ai);
3056fb435d52STejun Heo 	return rc;
305766c3a757STejun Heo }
30583c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */
3059d4b95f80STejun Heo 
3060bbddff05STejun Heo #ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
30618c4bfc6eSTejun Heo /*
3062bbddff05STejun Heo  * Generic SMP percpu area setup.
3063e74e3962STejun Heo  *
3064e74e3962STejun Heo  * The embedding helper is used because its behavior closely resembles
3065e74e3962STejun Heo  * the original non-dynamic generic percpu area setup.  This is
3066e74e3962STejun Heo  * important because many archs have addressing restrictions and might
3067e74e3962STejun Heo  * fail if the percpu area is located far away from the previous
3068e74e3962STejun Heo  * location.  As an added bonus, in non-NUMA cases, embedding is
3069e74e3962STejun Heo  * generally a good idea TLB-wise because percpu area can piggy back
3070e74e3962STejun Heo  * on the physical linear memory mapping which uses large page
3071e74e3962STejun Heo  * mappings on applicable archs.
3072e74e3962STejun Heo  */
3073e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3074e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset);
3075e74e3962STejun Heo 
3076c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
3077c8826dd5STejun Heo 				       size_t align)
3078c8826dd5STejun Heo {
307926fb3daeSMike Rapoport 	return  memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
3080c8826dd5STejun Heo }
3081c8826dd5STejun Heo 
3082c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
3083c8826dd5STejun Heo {
3084999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(ptr), size);
3085c8826dd5STejun Heo }
3086c8826dd5STejun Heo 
3087e74e3962STejun Heo void __init setup_per_cpu_areas(void)
3088e74e3962STejun Heo {
3089e74e3962STejun Heo 	unsigned long delta;
3090e74e3962STejun Heo 	unsigned int cpu;
3091fb435d52STejun Heo 	int rc;
3092e74e3962STejun Heo 
3093e74e3962STejun Heo 	/*
3094e74e3962STejun Heo 	 * Always reserve area for module percpu variables.  That's
3095e74e3962STejun Heo 	 * what the legacy allocator did.
3096e74e3962STejun Heo 	 */
3097fb435d52STejun Heo 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
3098c8826dd5STejun Heo 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
3099c8826dd5STejun Heo 				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
3100fb435d52STejun Heo 	if (rc < 0)
3101bbddff05STejun Heo 		panic("Failed to initialize percpu areas.");
3102e74e3962STejun Heo 
3103e74e3962STejun Heo 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3104e74e3962STejun Heo 	for_each_possible_cpu(cpu)
3105fb435d52STejun Heo 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3106e74e3962STejun Heo }
3107e74e3962STejun Heo #endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3108099a19d9STejun Heo 
3109bbddff05STejun Heo #else	/* CONFIG_SMP */
3110bbddff05STejun Heo 
3111bbddff05STejun Heo /*
3112bbddff05STejun Heo  * UP percpu area setup.
3113bbddff05STejun Heo  *
3114bbddff05STejun Heo  * UP always uses km-based percpu allocator with identity mapping.
3115bbddff05STejun Heo  * Static percpu variables are indistinguishable from the usual static
3116bbddff05STejun Heo  * variables and don't require any special preparation.
3117bbddff05STejun Heo  */
3118bbddff05STejun Heo void __init setup_per_cpu_areas(void)
3119bbddff05STejun Heo {
3120bbddff05STejun Heo 	const size_t unit_size =
3121bbddff05STejun Heo 		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3122bbddff05STejun Heo 					 PERCPU_DYNAMIC_RESERVE));
3123bbddff05STejun Heo 	struct pcpu_alloc_info *ai;
3124bbddff05STejun Heo 	void *fc;
3125bbddff05STejun Heo 
3126bbddff05STejun Heo 	ai = pcpu_alloc_alloc_info(1, 1);
312726fb3daeSMike Rapoport 	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3128bbddff05STejun Heo 	if (!ai || !fc)
3129bbddff05STejun Heo 		panic("Failed to allocate memory for percpu areas.");
3130100d13c3SCatalin Marinas 	/* kmemleak tracks the percpu allocations separately */
3131100d13c3SCatalin Marinas 	kmemleak_free(fc);
3132bbddff05STejun Heo 
3133bbddff05STejun Heo 	ai->dyn_size = unit_size;
3134bbddff05STejun Heo 	ai->unit_size = unit_size;
3135bbddff05STejun Heo 	ai->atom_size = unit_size;
3136bbddff05STejun Heo 	ai->alloc_size = unit_size;
3137bbddff05STejun Heo 	ai->groups[0].nr_units = 1;
3138bbddff05STejun Heo 	ai->groups[0].cpu_map[0] = 0;
3139bbddff05STejun Heo 
3140163fa234SKefeng Wang 	pcpu_setup_first_chunk(ai, fc);
3141438a5061SNicolas Pitre 	pcpu_free_alloc_info(ai);
3142bbddff05STejun Heo }
3143bbddff05STejun Heo 
3144bbddff05STejun Heo #endif	/* CONFIG_SMP */
3145bbddff05STejun Heo 
3146099a19d9STejun Heo /*
31477e8a6304SDennis Zhou (Facebook)  * pcpu_nr_pages - calculate total number of populated backing pages
31487e8a6304SDennis Zhou (Facebook)  *
31497e8a6304SDennis Zhou (Facebook)  * This reflects the number of pages populated to back chunks.  Metadata is
31507e8a6304SDennis Zhou (Facebook)  * excluded in the number exposed in meminfo as the number of backing pages
31517e8a6304SDennis Zhou (Facebook)  * scales with the number of cpus and can quickly outweigh the memory used for
31527e8a6304SDennis Zhou (Facebook)  * metadata.  It also keeps this calculation nice and simple.
31537e8a6304SDennis Zhou (Facebook)  *
31547e8a6304SDennis Zhou (Facebook)  * RETURNS:
31557e8a6304SDennis Zhou (Facebook)  * Total number of populated backing pages in use by the allocator.
31567e8a6304SDennis Zhou (Facebook)  */
31577e8a6304SDennis Zhou (Facebook) unsigned long pcpu_nr_pages(void)
31587e8a6304SDennis Zhou (Facebook) {
31597e8a6304SDennis Zhou (Facebook) 	return pcpu_nr_populated * pcpu_nr_units;
31607e8a6304SDennis Zhou (Facebook) }
31617e8a6304SDennis Zhou (Facebook) 
31627e8a6304SDennis Zhou (Facebook) /*
31631a4d7607STejun Heo  * Percpu allocator is initialized early during boot when neither slab or
31641a4d7607STejun Heo  * workqueue is available.  Plug async management until everything is up
31651a4d7607STejun Heo  * and running.
31661a4d7607STejun Heo  */
31671a4d7607STejun Heo static int __init percpu_enable_async(void)
31681a4d7607STejun Heo {
31691a4d7607STejun Heo 	pcpu_async_enabled = true;
31701a4d7607STejun Heo 	return 0;
31711a4d7607STejun Heo }
31721a4d7607STejun Heo subsys_initcall(percpu_enable_async);
3173