xref: /linux/mm/percpu.c (revision b9c39442ceffb202b98a88d492347eae125c5ba2)
1fbf59bc9STejun Heo /*
288999a89STejun Heo  * mm/percpu.c - percpu memory allocator
3fbf59bc9STejun Heo  *
4fbf59bc9STejun Heo  * Copyright (C) 2009		SUSE Linux Products GmbH
5fbf59bc9STejun Heo  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6fbf59bc9STejun Heo  *
79c015162SDennis Zhou (Facebook)  * This file is released under the GPLv2 license.
8fbf59bc9STejun Heo  *
99c015162SDennis Zhou (Facebook)  * The percpu allocator handles both static and dynamic areas.  Percpu
109c015162SDennis Zhou (Facebook)  * areas are allocated in chunks which are divided into units.  There is
119c015162SDennis Zhou (Facebook)  * a 1-to-1 mapping for units to possible cpus.  These units are grouped
129c015162SDennis Zhou (Facebook)  * based on NUMA properties of the machine.
13fbf59bc9STejun Heo  *
14fbf59bc9STejun Heo  *  c0                           c1                         c2
15fbf59bc9STejun Heo  *  -------------------          -------------------        ------------
16fbf59bc9STejun Heo  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
17fbf59bc9STejun Heo  *  -------------------  ......  -------------------  ....  ------------
18fbf59bc9STejun Heo  *
199c015162SDennis Zhou (Facebook)  * Allocation is done by offsets into a unit's address space.  Ie., an
209c015162SDennis Zhou (Facebook)  * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
219c015162SDennis Zhou (Facebook)  * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
229c015162SDennis Zhou (Facebook)  * and even sparse.  Access is handled by configuring percpu base
239c015162SDennis Zhou (Facebook)  * registers according to the cpu to unit mappings and offsetting the
249c015162SDennis Zhou (Facebook)  * base address using pcpu_unit_size.
25fbf59bc9STejun Heo  *
269c015162SDennis Zhou (Facebook)  * There is special consideration for the first chunk which must handle
279c015162SDennis Zhou (Facebook)  * the static percpu variables in the kernel image as allocation services
289c015162SDennis Zhou (Facebook)  * are not online yet.  In short, the first chunk is structure like so:
299c015162SDennis Zhou (Facebook)  *
309c015162SDennis Zhou (Facebook)  *                  <Static | [Reserved] | Dynamic>
319c015162SDennis Zhou (Facebook)  *
329c015162SDennis Zhou (Facebook)  * The static data is copied from the original section managed by the
339c015162SDennis Zhou (Facebook)  * linker.  The reserved section, if non-zero, primarily manages static
349c015162SDennis Zhou (Facebook)  * percpu variables from kernel modules.  Finally, the dynamic section
359c015162SDennis Zhou (Facebook)  * takes care of normal allocations.
36fbf59bc9STejun Heo  *
37fbf59bc9STejun Heo  * Allocation state in each chunk is kept using an array of integers
38fbf59bc9STejun Heo  * on chunk->map.  A positive value in the map represents a free
39fbf59bc9STejun Heo  * region and negative allocated.  Allocation inside a chunk is done
40fbf59bc9STejun Heo  * by scanning this map sequentially and serving the first matching
41fbf59bc9STejun Heo  * entry.  This is mostly copied from the percpu_modalloc() allocator.
42e1b9aa3fSChristoph Lameter  * Chunks can be determined from the address using the index field
43e1b9aa3fSChristoph Lameter  * in the page struct. The index field contains a pointer to the chunk.
44fbf59bc9STejun Heo  *
459c015162SDennis Zhou (Facebook)  * These chunks are organized into lists according to free_size and
469c015162SDennis Zhou (Facebook)  * tries to allocate from the fullest chunk first. Each chunk maintains
479c015162SDennis Zhou (Facebook)  * a maximum contiguous area size hint which is guaranteed to be equal
489c015162SDennis Zhou (Facebook)  * to or larger than the maximum contiguous area in the chunk. This
499c015162SDennis Zhou (Facebook)  * helps prevent the allocator from iterating over chunks unnecessarily.
509c015162SDennis Zhou (Facebook)  *
514091fb95SMasahiro Yamada  * To use this allocator, arch code should do the following:
52fbf59bc9STejun Heo  *
53fbf59bc9STejun Heo  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
54e0100983STejun Heo  *   regular address to percpu pointer and back if they need to be
55e0100983STejun Heo  *   different from the default
56fbf59bc9STejun Heo  *
578d408b4bSTejun Heo  * - use pcpu_setup_first_chunk() during percpu area initialization to
588d408b4bSTejun Heo  *   setup the first chunk containing the kernel static percpu area
59fbf59bc9STejun Heo  */
60fbf59bc9STejun Heo 
61870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62870d4b12SJoe Perches 
63fbf59bc9STejun Heo #include <linux/bitmap.h>
64fbf59bc9STejun Heo #include <linux/bootmem.h>
65fd1e8a1fSTejun Heo #include <linux/err.h>
66fbf59bc9STejun Heo #include <linux/list.h>
67a530b795STejun Heo #include <linux/log2.h>
68fbf59bc9STejun Heo #include <linux/mm.h>
69fbf59bc9STejun Heo #include <linux/module.h>
70fbf59bc9STejun Heo #include <linux/mutex.h>
71fbf59bc9STejun Heo #include <linux/percpu.h>
72fbf59bc9STejun Heo #include <linux/pfn.h>
73fbf59bc9STejun Heo #include <linux/slab.h>
74ccea34b5STejun Heo #include <linux/spinlock.h>
75fbf59bc9STejun Heo #include <linux/vmalloc.h>
76a56dbddfSTejun Heo #include <linux/workqueue.h>
77f528f0b8SCatalin Marinas #include <linux/kmemleak.h>
78fbf59bc9STejun Heo 
79fbf59bc9STejun Heo #include <asm/cacheflush.h>
80e0100983STejun Heo #include <asm/sections.h>
81fbf59bc9STejun Heo #include <asm/tlbflush.h>
823b034b0dSVivek Goyal #include <asm/io.h>
83fbf59bc9STejun Heo 
84df95e795SDennis Zhou #define CREATE_TRACE_POINTS
85df95e795SDennis Zhou #include <trace/events/percpu.h>
86df95e795SDennis Zhou 
878fa3ed80SDennis Zhou #include "percpu-internal.h"
888fa3ed80SDennis Zhou 
89fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
90fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
919c824b6aSTejun Heo #define PCPU_ATOMIC_MAP_MARGIN_LOW	32
929c824b6aSTejun Heo #define PCPU_ATOMIC_MAP_MARGIN_HIGH	64
931a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW	2
941a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH	4
95fbf59bc9STejun Heo 
96bbddff05STejun Heo #ifdef CONFIG_SMP
97e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
98e0100983STejun Heo #ifndef __addr_to_pcpu_ptr
99e0100983STejun Heo #define __addr_to_pcpu_ptr(addr)					\
10043cf38ebSTejun Heo 	(void __percpu *)((unsigned long)(addr) -			\
10143cf38ebSTejun Heo 			  (unsigned long)pcpu_base_addr	+		\
10243cf38ebSTejun Heo 			  (unsigned long)__per_cpu_start)
103e0100983STejun Heo #endif
104e0100983STejun Heo #ifndef __pcpu_ptr_to_addr
105e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr)						\
10643cf38ebSTejun Heo 	(void __force *)((unsigned long)(ptr) +				\
10743cf38ebSTejun Heo 			 (unsigned long)pcpu_base_addr -		\
10843cf38ebSTejun Heo 			 (unsigned long)__per_cpu_start)
109e0100983STejun Heo #endif
110bbddff05STejun Heo #else	/* CONFIG_SMP */
111bbddff05STejun Heo /* on UP, it's always identity mapped */
112bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
113bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
114bbddff05STejun Heo #endif	/* CONFIG_SMP */
115e0100983STejun Heo 
1161328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init;
1171328710bSDaniel Micay static int pcpu_unit_size __ro_after_init;
1181328710bSDaniel Micay static int pcpu_nr_units __ro_after_init;
1191328710bSDaniel Micay static int pcpu_atom_size __ro_after_init;
1208fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init;
1211328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init;
122fbf59bc9STejun Heo 
123a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */
1241328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init;
1251328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init;
1262f39e637STejun Heo 
127fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */
1281328710bSDaniel Micay void *pcpu_base_addr __ro_after_init;
129fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr);
130fbf59bc9STejun Heo 
1311328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
1321328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
1332f39e637STejun Heo 
1346563297cSTejun Heo /* group information, used for vm allocation */
1351328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init;
1361328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init;
1371328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init;
1386563297cSTejun Heo 
139ae9e6bc9STejun Heo /*
140ae9e6bc9STejun Heo  * The first chunk which always exists.  Note that unlike other
141ae9e6bc9STejun Heo  * chunks, this one can be allocated and mapped in several different
142ae9e6bc9STejun Heo  * ways and thus often doesn't live in the vmalloc area.
143ae9e6bc9STejun Heo  */
1448fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
145ae9e6bc9STejun Heo 
146ae9e6bc9STejun Heo /*
147ae9e6bc9STejun Heo  * Optional reserved chunk.  This chunk reserves part of the first
148e2266705SDennis Zhou (Facebook)  * chunk and serves it for reserved allocations.  When the reserved
149e2266705SDennis Zhou (Facebook)  * region doesn't exist, the following variable is NULL.
150ae9e6bc9STejun Heo  */
1518fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
152edcb4639STejun Heo 
1538fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
1546710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
155fbf59bc9STejun Heo 
1568fa3ed80SDennis Zhou struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
157fbf59bc9STejun Heo 
1584f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */
1594f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks);
1604f996e23STejun Heo 
161b539b87fSTejun Heo /*
162b539b87fSTejun Heo  * The number of empty populated pages, protected by pcpu_lock.  The
163b539b87fSTejun Heo  * reserved chunk doesn't contribute to the count.
164b539b87fSTejun Heo  */
1656b9b6f39SDennis Zhou (Facebook) int pcpu_nr_empty_pop_pages;
166b539b87fSTejun Heo 
1671a4d7607STejun Heo /*
1681a4d7607STejun Heo  * Balance work is used to populate or destroy chunks asynchronously.  We
1691a4d7607STejun Heo  * try to keep the number of populated free pages between
1701a4d7607STejun Heo  * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
1711a4d7607STejun Heo  * empty chunk.
1721a4d7607STejun Heo  */
173fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work);
174fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
1751a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly;
1761a4d7607STejun Heo static bool pcpu_atomic_alloc_failed;
1771a4d7607STejun Heo 
1781a4d7607STejun Heo static void pcpu_schedule_balance_work(void)
1791a4d7607STejun Heo {
1801a4d7607STejun Heo 	if (pcpu_async_enabled)
1811a4d7607STejun Heo 		schedule_work(&pcpu_balance_work);
1821a4d7607STejun Heo }
183a56dbddfSTejun Heo 
184020ec653STejun Heo static bool pcpu_addr_in_first_chunk(void *addr)
185020ec653STejun Heo {
186020ec653STejun Heo 	void *first_start = pcpu_first_chunk->base_addr;
187020ec653STejun Heo 
188020ec653STejun Heo 	return addr >= first_start && addr < first_start + pcpu_unit_size;
189020ec653STejun Heo }
190020ec653STejun Heo 
191020ec653STejun Heo static bool pcpu_addr_in_reserved_chunk(void *addr)
192020ec653STejun Heo {
193020ec653STejun Heo 	void *first_start = pcpu_first_chunk->base_addr;
194020ec653STejun Heo 
195020ec653STejun Heo 	return addr >= first_start &&
196e2266705SDennis Zhou (Facebook) 		addr < first_start + pcpu_first_chunk->start_offset;
197020ec653STejun Heo }
198020ec653STejun Heo 
199d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size)
200fbf59bc9STejun Heo {
201cae3aeb8STejun Heo 	int highbit = fls(size);	/* size is in bytes */
202fbf59bc9STejun Heo 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
203fbf59bc9STejun Heo }
204fbf59bc9STejun Heo 
205d9b55eebSTejun Heo static int pcpu_size_to_slot(int size)
206d9b55eebSTejun Heo {
207d9b55eebSTejun Heo 	if (size == pcpu_unit_size)
208d9b55eebSTejun Heo 		return pcpu_nr_slots - 1;
209d9b55eebSTejun Heo 	return __pcpu_size_to_slot(size);
210d9b55eebSTejun Heo }
211d9b55eebSTejun Heo 
212fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
213fbf59bc9STejun Heo {
214fbf59bc9STejun Heo 	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
215fbf59bc9STejun Heo 		return 0;
216fbf59bc9STejun Heo 
217fbf59bc9STejun Heo 	return pcpu_size_to_slot(chunk->free_size);
218fbf59bc9STejun Heo }
219fbf59bc9STejun Heo 
22088999a89STejun Heo /* set the pointer to a chunk in a page struct */
22188999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
22288999a89STejun Heo {
22388999a89STejun Heo 	page->index = (unsigned long)pcpu;
22488999a89STejun Heo }
22588999a89STejun Heo 
22688999a89STejun Heo /* obtain pointer to a chunk from a page struct */
22788999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
22888999a89STejun Heo {
22988999a89STejun Heo 	return (struct pcpu_chunk *)page->index;
23088999a89STejun Heo }
23188999a89STejun Heo 
23288999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
233fbf59bc9STejun Heo {
2342f39e637STejun Heo 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
235fbf59bc9STejun Heo }
236fbf59bc9STejun Heo 
2379983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
238fbf59bc9STejun Heo 				     unsigned int cpu, int page_idx)
239fbf59bc9STejun Heo {
240bba174f5STejun Heo 	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
241fb435d52STejun Heo 		(page_idx << PAGE_SHIFT);
242fbf59bc9STejun Heo }
243fbf59bc9STejun Heo 
24488999a89STejun Heo static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
24588999a89STejun Heo 					   int *rs, int *re, int end)
246ce3141a2STejun Heo {
247ce3141a2STejun Heo 	*rs = find_next_zero_bit(chunk->populated, end, *rs);
248ce3141a2STejun Heo 	*re = find_next_bit(chunk->populated, end, *rs + 1);
249ce3141a2STejun Heo }
250ce3141a2STejun Heo 
25188999a89STejun Heo static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
25288999a89STejun Heo 					 int *rs, int *re, int end)
253ce3141a2STejun Heo {
254ce3141a2STejun Heo 	*rs = find_next_bit(chunk->populated, end, *rs);
255ce3141a2STejun Heo 	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
256ce3141a2STejun Heo }
257ce3141a2STejun Heo 
258ce3141a2STejun Heo /*
259ce3141a2STejun Heo  * (Un)populated page region iterators.  Iterate over (un)populated
260b595076aSUwe Kleine-König  * page regions between @start and @end in @chunk.  @rs and @re should
261ce3141a2STejun Heo  * be integer variables and will be set to start and end page index of
262ce3141a2STejun Heo  * the current region.
263ce3141a2STejun Heo  */
264ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
265ce3141a2STejun Heo 	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
266ce3141a2STejun Heo 	     (rs) < (re);						    \
267ce3141a2STejun Heo 	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
268ce3141a2STejun Heo 
269ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
270ce3141a2STejun Heo 	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
271ce3141a2STejun Heo 	     (rs) < (re);						    \
272ce3141a2STejun Heo 	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
273ce3141a2STejun Heo 
274fbf59bc9STejun Heo /**
27590459ce0SBob Liu  * pcpu_mem_zalloc - allocate memory
2761880d93bSTejun Heo  * @size: bytes to allocate
277fbf59bc9STejun Heo  *
2781880d93bSTejun Heo  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
27990459ce0SBob Liu  * kzalloc() is used; otherwise, vzalloc() is used.  The returned
2801880d93bSTejun Heo  * memory is always zeroed.
281fbf59bc9STejun Heo  *
282ccea34b5STejun Heo  * CONTEXT:
283ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
284ccea34b5STejun Heo  *
285fbf59bc9STejun Heo  * RETURNS:
2861880d93bSTejun Heo  * Pointer to the allocated area on success, NULL on failure.
287fbf59bc9STejun Heo  */
28890459ce0SBob Liu static void *pcpu_mem_zalloc(size_t size)
289fbf59bc9STejun Heo {
290099a19d9STejun Heo 	if (WARN_ON_ONCE(!slab_is_available()))
291099a19d9STejun Heo 		return NULL;
292099a19d9STejun Heo 
293fbf59bc9STejun Heo 	if (size <= PAGE_SIZE)
2941880d93bSTejun Heo 		return kzalloc(size, GFP_KERNEL);
2957af4c093SJesper Juhl 	else
2967af4c093SJesper Juhl 		return vzalloc(size);
2971880d93bSTejun Heo }
298fbf59bc9STejun Heo 
2991880d93bSTejun Heo /**
3001880d93bSTejun Heo  * pcpu_mem_free - free memory
3011880d93bSTejun Heo  * @ptr: memory to free
3021880d93bSTejun Heo  *
30390459ce0SBob Liu  * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
3041880d93bSTejun Heo  */
3051d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr)
3061880d93bSTejun Heo {
3071d5cfdb0STetsuo Handa 	kvfree(ptr);
308fbf59bc9STejun Heo }
309fbf59bc9STejun Heo 
310fbf59bc9STejun Heo /**
311b539b87fSTejun Heo  * pcpu_count_occupied_pages - count the number of pages an area occupies
312b539b87fSTejun Heo  * @chunk: chunk of interest
313b539b87fSTejun Heo  * @i: index of the area in question
314b539b87fSTejun Heo  *
315b539b87fSTejun Heo  * Count the number of pages chunk's @i'th area occupies.  When the area's
316b539b87fSTejun Heo  * start and/or end address isn't aligned to page boundary, the straddled
317b539b87fSTejun Heo  * page is included in the count iff the rest of the page is free.
318b539b87fSTejun Heo  */
319b539b87fSTejun Heo static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
320b539b87fSTejun Heo {
321b539b87fSTejun Heo 	int off = chunk->map[i] & ~1;
322b539b87fSTejun Heo 	int end = chunk->map[i + 1] & ~1;
323b539b87fSTejun Heo 
324b539b87fSTejun Heo 	if (!PAGE_ALIGNED(off) && i > 0) {
325b539b87fSTejun Heo 		int prev = chunk->map[i - 1];
326b539b87fSTejun Heo 
327b539b87fSTejun Heo 		if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
328b539b87fSTejun Heo 			off = round_down(off, PAGE_SIZE);
329b539b87fSTejun Heo 	}
330b539b87fSTejun Heo 
331b539b87fSTejun Heo 	if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
332b539b87fSTejun Heo 		int next = chunk->map[i + 1];
333b539b87fSTejun Heo 		int nend = chunk->map[i + 2] & ~1;
334b539b87fSTejun Heo 
335b539b87fSTejun Heo 		if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
336b539b87fSTejun Heo 			end = round_up(end, PAGE_SIZE);
337b539b87fSTejun Heo 	}
338b539b87fSTejun Heo 
339b539b87fSTejun Heo 	return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
340b539b87fSTejun Heo }
341b539b87fSTejun Heo 
342b539b87fSTejun Heo /**
343fbf59bc9STejun Heo  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
344fbf59bc9STejun Heo  * @chunk: chunk of interest
345fbf59bc9STejun Heo  * @oslot: the previous slot it was on
346fbf59bc9STejun Heo  *
347fbf59bc9STejun Heo  * This function is called after an allocation or free changed @chunk.
348fbf59bc9STejun Heo  * New slot according to the changed state is determined and @chunk is
349edcb4639STejun Heo  * moved to the slot.  Note that the reserved chunk is never put on
350edcb4639STejun Heo  * chunk slots.
351ccea34b5STejun Heo  *
352ccea34b5STejun Heo  * CONTEXT:
353ccea34b5STejun Heo  * pcpu_lock.
354fbf59bc9STejun Heo  */
355fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
356fbf59bc9STejun Heo {
357fbf59bc9STejun Heo 	int nslot = pcpu_chunk_slot(chunk);
358fbf59bc9STejun Heo 
359edcb4639STejun Heo 	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
360fbf59bc9STejun Heo 		if (oslot < nslot)
361fbf59bc9STejun Heo 			list_move(&chunk->list, &pcpu_slot[nslot]);
362fbf59bc9STejun Heo 		else
363fbf59bc9STejun Heo 			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
364fbf59bc9STejun Heo 	}
365fbf59bc9STejun Heo }
366fbf59bc9STejun Heo 
367fbf59bc9STejun Heo /**
368833af842STejun Heo  * pcpu_need_to_extend - determine whether chunk area map needs to be extended
369833af842STejun Heo  * @chunk: chunk of interest
3709c824b6aSTejun Heo  * @is_atomic: the allocation context
3719f7dcf22STejun Heo  *
3729c824b6aSTejun Heo  * Determine whether area map of @chunk needs to be extended.  If
3739c824b6aSTejun Heo  * @is_atomic, only the amount necessary for a new allocation is
3749c824b6aSTejun Heo  * considered; however, async extension is scheduled if the left amount is
3759c824b6aSTejun Heo  * low.  If !@is_atomic, it aims for more empty space.  Combined, this
3769c824b6aSTejun Heo  * ensures that the map is likely to have enough available space to
3779c824b6aSTejun Heo  * accomodate atomic allocations which can't extend maps directly.
3789f7dcf22STejun Heo  *
379ccea34b5STejun Heo  * CONTEXT:
380833af842STejun Heo  * pcpu_lock.
381ccea34b5STejun Heo  *
3829f7dcf22STejun Heo  * RETURNS:
383833af842STejun Heo  * New target map allocation length if extension is necessary, 0
384833af842STejun Heo  * otherwise.
3859f7dcf22STejun Heo  */
3869c824b6aSTejun Heo static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
3879f7dcf22STejun Heo {
3889c824b6aSTejun Heo 	int margin, new_alloc;
3899f7dcf22STejun Heo 
3904f996e23STejun Heo 	lockdep_assert_held(&pcpu_lock);
3914f996e23STejun Heo 
3929c824b6aSTejun Heo 	if (is_atomic) {
3939c824b6aSTejun Heo 		margin = 3;
3949c824b6aSTejun Heo 
3959c824b6aSTejun Heo 		if (chunk->map_alloc <
3964f996e23STejun Heo 		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
3974f996e23STejun Heo 			if (list_empty(&chunk->map_extend_list)) {
3984f996e23STejun Heo 				list_add_tail(&chunk->map_extend_list,
3994f996e23STejun Heo 					      &pcpu_map_extend_chunks);
4004f996e23STejun Heo 				pcpu_schedule_balance_work();
4014f996e23STejun Heo 			}
4024f996e23STejun Heo 		}
4039c824b6aSTejun Heo 	} else {
4049c824b6aSTejun Heo 		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
4059c824b6aSTejun Heo 	}
4069c824b6aSTejun Heo 
4079c824b6aSTejun Heo 	if (chunk->map_alloc >= chunk->map_used + margin)
4089f7dcf22STejun Heo 		return 0;
4099f7dcf22STejun Heo 
4109f7dcf22STejun Heo 	new_alloc = PCPU_DFL_MAP_ALLOC;
4119c824b6aSTejun Heo 	while (new_alloc < chunk->map_used + margin)
4129f7dcf22STejun Heo 		new_alloc *= 2;
4139f7dcf22STejun Heo 
414833af842STejun Heo 	return new_alloc;
415ccea34b5STejun Heo }
416ccea34b5STejun Heo 
417833af842STejun Heo /**
418833af842STejun Heo  * pcpu_extend_area_map - extend area map of a chunk
419833af842STejun Heo  * @chunk: chunk of interest
420833af842STejun Heo  * @new_alloc: new target allocation length of the area map
421833af842STejun Heo  *
422833af842STejun Heo  * Extend area map of @chunk to have @new_alloc entries.
423833af842STejun Heo  *
424833af842STejun Heo  * CONTEXT:
425833af842STejun Heo  * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
426833af842STejun Heo  *
427833af842STejun Heo  * RETURNS:
428833af842STejun Heo  * 0 on success, -errno on failure.
429ccea34b5STejun Heo  */
430833af842STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
431833af842STejun Heo {
432833af842STejun Heo 	int *old = NULL, *new = NULL;
433833af842STejun Heo 	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
434833af842STejun Heo 	unsigned long flags;
4359f7dcf22STejun Heo 
4366710e594STejun Heo 	lockdep_assert_held(&pcpu_alloc_mutex);
4376710e594STejun Heo 
43890459ce0SBob Liu 	new = pcpu_mem_zalloc(new_size);
439833af842STejun Heo 	if (!new)
440833af842STejun Heo 		return -ENOMEM;
441833af842STejun Heo 
442833af842STejun Heo 	/* acquire pcpu_lock and switch to new area map */
443833af842STejun Heo 	spin_lock_irqsave(&pcpu_lock, flags);
444833af842STejun Heo 
445833af842STejun Heo 	if (new_alloc <= chunk->map_alloc)
446833af842STejun Heo 		goto out_unlock;
447833af842STejun Heo 
448833af842STejun Heo 	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
449a002d148SHuang Shijie 	old = chunk->map;
450a002d148SHuang Shijie 
451a002d148SHuang Shijie 	memcpy(new, old, old_size);
4529f7dcf22STejun Heo 
4539f7dcf22STejun Heo 	chunk->map_alloc = new_alloc;
4549f7dcf22STejun Heo 	chunk->map = new;
455833af842STejun Heo 	new = NULL;
456833af842STejun Heo 
457833af842STejun Heo out_unlock:
458833af842STejun Heo 	spin_unlock_irqrestore(&pcpu_lock, flags);
459833af842STejun Heo 
460833af842STejun Heo 	/*
461833af842STejun Heo 	 * pcpu_mem_free() might end up calling vfree() which uses
462833af842STejun Heo 	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
463833af842STejun Heo 	 */
4641d5cfdb0STetsuo Handa 	pcpu_mem_free(old);
4651d5cfdb0STetsuo Handa 	pcpu_mem_free(new);
466833af842STejun Heo 
4679f7dcf22STejun Heo 	return 0;
4689f7dcf22STejun Heo }
4699f7dcf22STejun Heo 
4709f7dcf22STejun Heo /**
471a16037c8STejun Heo  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
472a16037c8STejun Heo  * @chunk: chunk the candidate area belongs to
473a16037c8STejun Heo  * @off: the offset to the start of the candidate area
474a16037c8STejun Heo  * @this_size: the size of the candidate area
475a16037c8STejun Heo  * @size: the size of the target allocation
476a16037c8STejun Heo  * @align: the alignment of the target allocation
477a16037c8STejun Heo  * @pop_only: only allocate from already populated region
478a16037c8STejun Heo  *
479a16037c8STejun Heo  * We're trying to allocate @size bytes aligned at @align.  @chunk's area
480a16037c8STejun Heo  * at @off sized @this_size is a candidate.  This function determines
481a16037c8STejun Heo  * whether the target allocation fits in the candidate area and returns the
482a16037c8STejun Heo  * number of bytes to pad after @off.  If the target area doesn't fit, -1
483a16037c8STejun Heo  * is returned.
484a16037c8STejun Heo  *
485a16037c8STejun Heo  * If @pop_only is %true, this function only considers the already
486a16037c8STejun Heo  * populated part of the candidate area.
487a16037c8STejun Heo  */
488a16037c8STejun Heo static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
489a16037c8STejun Heo 			    int size, int align, bool pop_only)
490a16037c8STejun Heo {
491a16037c8STejun Heo 	int cand_off = off;
492a16037c8STejun Heo 
493a16037c8STejun Heo 	while (true) {
494a16037c8STejun Heo 		int head = ALIGN(cand_off, align) - off;
495a16037c8STejun Heo 		int page_start, page_end, rs, re;
496a16037c8STejun Heo 
497a16037c8STejun Heo 		if (this_size < head + size)
498a16037c8STejun Heo 			return -1;
499a16037c8STejun Heo 
500a16037c8STejun Heo 		if (!pop_only)
501a16037c8STejun Heo 			return head;
502a16037c8STejun Heo 
503a16037c8STejun Heo 		/*
504a16037c8STejun Heo 		 * If the first unpopulated page is beyond the end of the
505a16037c8STejun Heo 		 * allocation, the whole allocation is populated;
506a16037c8STejun Heo 		 * otherwise, retry from the end of the unpopulated area.
507a16037c8STejun Heo 		 */
508a16037c8STejun Heo 		page_start = PFN_DOWN(head + off);
509a16037c8STejun Heo 		page_end = PFN_UP(head + off + size);
510a16037c8STejun Heo 
511a16037c8STejun Heo 		rs = page_start;
512a16037c8STejun Heo 		pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
513a16037c8STejun Heo 		if (rs >= page_end)
514a16037c8STejun Heo 			return head;
515a16037c8STejun Heo 		cand_off = re * PAGE_SIZE;
516a16037c8STejun Heo 	}
517a16037c8STejun Heo }
518a16037c8STejun Heo 
519a16037c8STejun Heo /**
520fbf59bc9STejun Heo  * pcpu_alloc_area - allocate area from a pcpu_chunk
521fbf59bc9STejun Heo  * @chunk: chunk of interest
522cae3aeb8STejun Heo  * @size: wanted size in bytes
523fbf59bc9STejun Heo  * @align: wanted align
524a16037c8STejun Heo  * @pop_only: allocate only from the populated area
525b539b87fSTejun Heo  * @occ_pages_p: out param for the number of pages the area occupies
526fbf59bc9STejun Heo  *
527fbf59bc9STejun Heo  * Try to allocate @size bytes area aligned at @align from @chunk.
528fbf59bc9STejun Heo  * Note that this function only allocates the offset.  It doesn't
529fbf59bc9STejun Heo  * populate or map the area.
530fbf59bc9STejun Heo  *
5319f7dcf22STejun Heo  * @chunk->map must have at least two free slots.
5329f7dcf22STejun Heo  *
533ccea34b5STejun Heo  * CONTEXT:
534ccea34b5STejun Heo  * pcpu_lock.
535ccea34b5STejun Heo  *
536fbf59bc9STejun Heo  * RETURNS:
5379f7dcf22STejun Heo  * Allocated offset in @chunk on success, -1 if no matching area is
5389f7dcf22STejun Heo  * found.
539fbf59bc9STejun Heo  */
540a16037c8STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
541b539b87fSTejun Heo 			   bool pop_only, int *occ_pages_p)
542fbf59bc9STejun Heo {
543fbf59bc9STejun Heo 	int oslot = pcpu_chunk_slot(chunk);
544fbf59bc9STejun Heo 	int max_contig = 0;
545fbf59bc9STejun Heo 	int i, off;
5463d331ad7SAl Viro 	bool seen_free = false;
547723ad1d9SAl Viro 	int *p;
548fbf59bc9STejun Heo 
5493d331ad7SAl Viro 	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
550fbf59bc9STejun Heo 		int head, tail;
551723ad1d9SAl Viro 		int this_size;
552723ad1d9SAl Viro 
553723ad1d9SAl Viro 		off = *p;
554723ad1d9SAl Viro 		if (off & 1)
555723ad1d9SAl Viro 			continue;
556fbf59bc9STejun Heo 
557723ad1d9SAl Viro 		this_size = (p[1] & ~1) - off;
558a16037c8STejun Heo 
559a16037c8STejun Heo 		head = pcpu_fit_in_area(chunk, off, this_size, size, align,
560a16037c8STejun Heo 					pop_only);
561a16037c8STejun Heo 		if (head < 0) {
5623d331ad7SAl Viro 			if (!seen_free) {
5633d331ad7SAl Viro 				chunk->first_free = i;
5643d331ad7SAl Viro 				seen_free = true;
5653d331ad7SAl Viro 			}
566723ad1d9SAl Viro 			max_contig = max(this_size, max_contig);
567fbf59bc9STejun Heo 			continue;
568fbf59bc9STejun Heo 		}
569fbf59bc9STejun Heo 
570fbf59bc9STejun Heo 		/*
571fbf59bc9STejun Heo 		 * If head is small or the previous block is free,
572fbf59bc9STejun Heo 		 * merge'em.  Note that 'small' is defined as smaller
573fbf59bc9STejun Heo 		 * than sizeof(int), which is very small but isn't too
574fbf59bc9STejun Heo 		 * uncommon for percpu allocations.
575fbf59bc9STejun Heo 		 */
576723ad1d9SAl Viro 		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
57721ddfd38SJianyu Zhan 			*p = off += head;
578723ad1d9SAl Viro 			if (p[-1] & 1)
579fbf59bc9STejun Heo 				chunk->free_size -= head;
58021ddfd38SJianyu Zhan 			else
58121ddfd38SJianyu Zhan 				max_contig = max(*p - p[-1], max_contig);
582723ad1d9SAl Viro 			this_size -= head;
583fbf59bc9STejun Heo 			head = 0;
584fbf59bc9STejun Heo 		}
585fbf59bc9STejun Heo 
586fbf59bc9STejun Heo 		/* if tail is small, just keep it around */
587723ad1d9SAl Viro 		tail = this_size - head - size;
588723ad1d9SAl Viro 		if (tail < sizeof(int)) {
589fbf59bc9STejun Heo 			tail = 0;
590723ad1d9SAl Viro 			size = this_size - head;
591723ad1d9SAl Viro 		}
592fbf59bc9STejun Heo 
593fbf59bc9STejun Heo 		/* split if warranted */
594fbf59bc9STejun Heo 		if (head || tail) {
595706c16f2SAl Viro 			int nr_extra = !!head + !!tail;
596706c16f2SAl Viro 
597706c16f2SAl Viro 			/* insert new subblocks */
598723ad1d9SAl Viro 			memmove(p + nr_extra + 1, p + 1,
599706c16f2SAl Viro 				sizeof(chunk->map[0]) * (chunk->map_used - i));
600706c16f2SAl Viro 			chunk->map_used += nr_extra;
601706c16f2SAl Viro 
602fbf59bc9STejun Heo 			if (head) {
6033d331ad7SAl Viro 				if (!seen_free) {
6043d331ad7SAl Viro 					chunk->first_free = i;
6053d331ad7SAl Viro 					seen_free = true;
6063d331ad7SAl Viro 				}
607723ad1d9SAl Viro 				*++p = off += head;
608723ad1d9SAl Viro 				++i;
609706c16f2SAl Viro 				max_contig = max(head, max_contig);
610fbf59bc9STejun Heo 			}
611706c16f2SAl Viro 			if (tail) {
612723ad1d9SAl Viro 				p[1] = off + size;
613706c16f2SAl Viro 				max_contig = max(tail, max_contig);
614706c16f2SAl Viro 			}
615fbf59bc9STejun Heo 		}
616fbf59bc9STejun Heo 
6173d331ad7SAl Viro 		if (!seen_free)
6183d331ad7SAl Viro 			chunk->first_free = i + 1;
6193d331ad7SAl Viro 
620fbf59bc9STejun Heo 		/* update hint and mark allocated */
621723ad1d9SAl Viro 		if (i + 1 == chunk->map_used)
622fbf59bc9STejun Heo 			chunk->contig_hint = max_contig; /* fully scanned */
623fbf59bc9STejun Heo 		else
624fbf59bc9STejun Heo 			chunk->contig_hint = max(chunk->contig_hint,
625fbf59bc9STejun Heo 						 max_contig);
626fbf59bc9STejun Heo 
627723ad1d9SAl Viro 		chunk->free_size -= size;
628723ad1d9SAl Viro 		*p |= 1;
629fbf59bc9STejun Heo 
630b539b87fSTejun Heo 		*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
631fbf59bc9STejun Heo 		pcpu_chunk_relocate(chunk, oslot);
632fbf59bc9STejun Heo 		return off;
633fbf59bc9STejun Heo 	}
634fbf59bc9STejun Heo 
635fbf59bc9STejun Heo 	chunk->contig_hint = max_contig;	/* fully scanned */
636fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, oslot);
637fbf59bc9STejun Heo 
6389f7dcf22STejun Heo 	/* tell the upper layer that this chunk has no matching area */
6399f7dcf22STejun Heo 	return -1;
640fbf59bc9STejun Heo }
641fbf59bc9STejun Heo 
642fbf59bc9STejun Heo /**
643fbf59bc9STejun Heo  * pcpu_free_area - free area to a pcpu_chunk
644fbf59bc9STejun Heo  * @chunk: chunk of interest
645fbf59bc9STejun Heo  * @freeme: offset of area to free
646b539b87fSTejun Heo  * @occ_pages_p: out param for the number of pages the area occupies
647fbf59bc9STejun Heo  *
648fbf59bc9STejun Heo  * Free area starting from @freeme to @chunk.  Note that this function
649fbf59bc9STejun Heo  * only modifies the allocation map.  It doesn't depopulate or unmap
650fbf59bc9STejun Heo  * the area.
651ccea34b5STejun Heo  *
652ccea34b5STejun Heo  * CONTEXT:
653ccea34b5STejun Heo  * pcpu_lock.
654fbf59bc9STejun Heo  */
655b539b87fSTejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
656b539b87fSTejun Heo 			   int *occ_pages_p)
657fbf59bc9STejun Heo {
658fbf59bc9STejun Heo 	int oslot = pcpu_chunk_slot(chunk);
659723ad1d9SAl Viro 	int off = 0;
660723ad1d9SAl Viro 	unsigned i, j;
661723ad1d9SAl Viro 	int to_free = 0;
662723ad1d9SAl Viro 	int *p;
663fbf59bc9STejun Heo 
6645ccd30e4SDennis Zhou 	lockdep_assert_held(&pcpu_lock);
66530a5b536SDennis Zhou 	pcpu_stats_area_dealloc(chunk);
6665ccd30e4SDennis Zhou 
667723ad1d9SAl Viro 	freeme |= 1;	/* we are searching for <given offset, in use> pair */
668723ad1d9SAl Viro 
669723ad1d9SAl Viro 	i = 0;
670723ad1d9SAl Viro 	j = chunk->map_used;
671723ad1d9SAl Viro 	while (i != j) {
672723ad1d9SAl Viro 		unsigned k = (i + j) / 2;
673723ad1d9SAl Viro 		off = chunk->map[k];
674723ad1d9SAl Viro 		if (off < freeme)
675723ad1d9SAl Viro 			i = k + 1;
676723ad1d9SAl Viro 		else if (off > freeme)
677723ad1d9SAl Viro 			j = k;
678723ad1d9SAl Viro 		else
679723ad1d9SAl Viro 			i = j = k;
680723ad1d9SAl Viro 	}
681fbf59bc9STejun Heo 	BUG_ON(off != freeme);
682fbf59bc9STejun Heo 
6833d331ad7SAl Viro 	if (i < chunk->first_free)
6843d331ad7SAl Viro 		chunk->first_free = i;
6853d331ad7SAl Viro 
686723ad1d9SAl Viro 	p = chunk->map + i;
687723ad1d9SAl Viro 	*p = off &= ~1;
688723ad1d9SAl Viro 	chunk->free_size += (p[1] & ~1) - off;
689fbf59bc9STejun Heo 
690b539b87fSTejun Heo 	*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
691b539b87fSTejun Heo 
692fbf59bc9STejun Heo 	/* merge with next? */
693723ad1d9SAl Viro 	if (!(p[1] & 1))
694723ad1d9SAl Viro 		to_free++;
695723ad1d9SAl Viro 	/* merge with previous? */
696723ad1d9SAl Viro 	if (i > 0 && !(p[-1] & 1)) {
697723ad1d9SAl Viro 		to_free++;
698723ad1d9SAl Viro 		i--;
699723ad1d9SAl Viro 		p--;
700723ad1d9SAl Viro 	}
701723ad1d9SAl Viro 	if (to_free) {
702723ad1d9SAl Viro 		chunk->map_used -= to_free;
703723ad1d9SAl Viro 		memmove(p + 1, p + 1 + to_free,
704723ad1d9SAl Viro 			(chunk->map_used - i) * sizeof(chunk->map[0]));
705fbf59bc9STejun Heo 	}
706fbf59bc9STejun Heo 
707723ad1d9SAl Viro 	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
708fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, oslot);
709fbf59bc9STejun Heo }
710fbf59bc9STejun Heo 
7116081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void)
7126081089fSTejun Heo {
7136081089fSTejun Heo 	struct pcpu_chunk *chunk;
7146081089fSTejun Heo 
71590459ce0SBob Liu 	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
7166081089fSTejun Heo 	if (!chunk)
7176081089fSTejun Heo 		return NULL;
7186081089fSTejun Heo 
71990459ce0SBob Liu 	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
72090459ce0SBob Liu 						sizeof(chunk->map[0]));
7216081089fSTejun Heo 	if (!chunk->map) {
7221d5cfdb0STetsuo Handa 		pcpu_mem_free(chunk);
7236081089fSTejun Heo 		return NULL;
7246081089fSTejun Heo 	}
7256081089fSTejun Heo 
7266081089fSTejun Heo 	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
727723ad1d9SAl Viro 	chunk->map[0] = 0;
728723ad1d9SAl Viro 	chunk->map[1] = pcpu_unit_size | 1;
729723ad1d9SAl Viro 	chunk->map_used = 1;
7306081089fSTejun Heo 
7316081089fSTejun Heo 	INIT_LIST_HEAD(&chunk->list);
7324f996e23STejun Heo 	INIT_LIST_HEAD(&chunk->map_extend_list);
7336081089fSTejun Heo 	chunk->free_size = pcpu_unit_size;
7346081089fSTejun Heo 	chunk->contig_hint = pcpu_unit_size;
7356081089fSTejun Heo 
7366081089fSTejun Heo 	return chunk;
7376081089fSTejun Heo }
7386081089fSTejun Heo 
7396081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk)
7406081089fSTejun Heo {
7416081089fSTejun Heo 	if (!chunk)
7426081089fSTejun Heo 		return;
7431d5cfdb0STetsuo Handa 	pcpu_mem_free(chunk->map);
7441d5cfdb0STetsuo Handa 	pcpu_mem_free(chunk);
7456081089fSTejun Heo }
7466081089fSTejun Heo 
747b539b87fSTejun Heo /**
748b539b87fSTejun Heo  * pcpu_chunk_populated - post-population bookkeeping
749b539b87fSTejun Heo  * @chunk: pcpu_chunk which got populated
750b539b87fSTejun Heo  * @page_start: the start page
751b539b87fSTejun Heo  * @page_end: the end page
752b539b87fSTejun Heo  *
753b539b87fSTejun Heo  * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
754b539b87fSTejun Heo  * the bookkeeping information accordingly.  Must be called after each
755b539b87fSTejun Heo  * successful population.
756b539b87fSTejun Heo  */
757b539b87fSTejun Heo static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
758b539b87fSTejun Heo 				 int page_start, int page_end)
759b539b87fSTejun Heo {
760b539b87fSTejun Heo 	int nr = page_end - page_start;
761b539b87fSTejun Heo 
762b539b87fSTejun Heo 	lockdep_assert_held(&pcpu_lock);
763b539b87fSTejun Heo 
764b539b87fSTejun Heo 	bitmap_set(chunk->populated, page_start, nr);
765b539b87fSTejun Heo 	chunk->nr_populated += nr;
766b539b87fSTejun Heo 	pcpu_nr_empty_pop_pages += nr;
767b539b87fSTejun Heo }
768b539b87fSTejun Heo 
769b539b87fSTejun Heo /**
770b539b87fSTejun Heo  * pcpu_chunk_depopulated - post-depopulation bookkeeping
771b539b87fSTejun Heo  * @chunk: pcpu_chunk which got depopulated
772b539b87fSTejun Heo  * @page_start: the start page
773b539b87fSTejun Heo  * @page_end: the end page
774b539b87fSTejun Heo  *
775b539b87fSTejun Heo  * Pages in [@page_start,@page_end) have been depopulated from @chunk.
776b539b87fSTejun Heo  * Update the bookkeeping information accordingly.  Must be called after
777b539b87fSTejun Heo  * each successful depopulation.
778b539b87fSTejun Heo  */
779b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
780b539b87fSTejun Heo 				   int page_start, int page_end)
781b539b87fSTejun Heo {
782b539b87fSTejun Heo 	int nr = page_end - page_start;
783b539b87fSTejun Heo 
784b539b87fSTejun Heo 	lockdep_assert_held(&pcpu_lock);
785b539b87fSTejun Heo 
786b539b87fSTejun Heo 	bitmap_clear(chunk->populated, page_start, nr);
787b539b87fSTejun Heo 	chunk->nr_populated -= nr;
788b539b87fSTejun Heo 	pcpu_nr_empty_pop_pages -= nr;
789b539b87fSTejun Heo }
790b539b87fSTejun Heo 
791fbf59bc9STejun Heo /*
7929f645532STejun Heo  * Chunk management implementation.
793fbf59bc9STejun Heo  *
7949f645532STejun Heo  * To allow different implementations, chunk alloc/free and
7959f645532STejun Heo  * [de]population are implemented in a separate file which is pulled
7969f645532STejun Heo  * into this file and compiled together.  The following functions
7979f645532STejun Heo  * should be implemented.
798ccea34b5STejun Heo  *
7999f645532STejun Heo  * pcpu_populate_chunk		- populate the specified range of a chunk
8009f645532STejun Heo  * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
8019f645532STejun Heo  * pcpu_create_chunk		- create a new chunk
8029f645532STejun Heo  * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
8039f645532STejun Heo  * pcpu_addr_to_page		- translate address to physical address
8049f645532STejun Heo  * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
805fbf59bc9STejun Heo  */
8069f645532STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
8079f645532STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
8089f645532STejun Heo static struct pcpu_chunk *pcpu_create_chunk(void);
8099f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
8109f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr);
8119f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
812fbf59bc9STejun Heo 
813b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM
814b0c9778bSTejun Heo #include "percpu-km.c"
815b0c9778bSTejun Heo #else
8169f645532STejun Heo #include "percpu-vm.c"
817b0c9778bSTejun Heo #endif
818fbf59bc9STejun Heo 
819fbf59bc9STejun Heo /**
82088999a89STejun Heo  * pcpu_chunk_addr_search - determine chunk containing specified address
82188999a89STejun Heo  * @addr: address for which the chunk needs to be determined.
82288999a89STejun Heo  *
82388999a89STejun Heo  * RETURNS:
82488999a89STejun Heo  * The address of the found chunk.
82588999a89STejun Heo  */
82688999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
82788999a89STejun Heo {
82888999a89STejun Heo 	/* is it in the first chunk? */
82988999a89STejun Heo 	if (pcpu_addr_in_first_chunk(addr)) {
83088999a89STejun Heo 		/* is it in the reserved area? */
83188999a89STejun Heo 		if (pcpu_addr_in_reserved_chunk(addr))
83288999a89STejun Heo 			return pcpu_reserved_chunk;
83388999a89STejun Heo 		return pcpu_first_chunk;
83488999a89STejun Heo 	}
83588999a89STejun Heo 
83688999a89STejun Heo 	/*
83788999a89STejun Heo 	 * The address is relative to unit0 which might be unused and
83888999a89STejun Heo 	 * thus unmapped.  Offset the address to the unit space of the
83988999a89STejun Heo 	 * current processor before looking it up in the vmalloc
84088999a89STejun Heo 	 * space.  Note that any possible cpu id can be used here, so
84188999a89STejun Heo 	 * there's no need to worry about preemption or cpu hotplug.
84288999a89STejun Heo 	 */
84388999a89STejun Heo 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
8449f645532STejun Heo 	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
84588999a89STejun Heo }
84688999a89STejun Heo 
84788999a89STejun Heo /**
848edcb4639STejun Heo  * pcpu_alloc - the percpu allocator
849cae3aeb8STejun Heo  * @size: size of area to allocate in bytes
850fbf59bc9STejun Heo  * @align: alignment of area (max PAGE_SIZE)
851edcb4639STejun Heo  * @reserved: allocate from the reserved chunk if available
8525835d96eSTejun Heo  * @gfp: allocation flags
853fbf59bc9STejun Heo  *
8545835d96eSTejun Heo  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
8555835d96eSTejun Heo  * contain %GFP_KERNEL, the allocation is atomic.
856fbf59bc9STejun Heo  *
857fbf59bc9STejun Heo  * RETURNS:
858fbf59bc9STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
859fbf59bc9STejun Heo  */
8605835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
8615835d96eSTejun Heo 				 gfp_t gfp)
862fbf59bc9STejun Heo {
863f2badb0cSTejun Heo 	static int warn_limit = 10;
864fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
865f2badb0cSTejun Heo 	const char *err;
8666ae833c7STejun Heo 	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
867b539b87fSTejun Heo 	int occ_pages = 0;
868b38d08f3STejun Heo 	int slot, off, new_alloc, cpu, ret;
869403a91b1SJiri Kosina 	unsigned long flags;
870f528f0b8SCatalin Marinas 	void __percpu *ptr;
871fbf59bc9STejun Heo 
872723ad1d9SAl Viro 	/*
873723ad1d9SAl Viro 	 * We want the lowest bit of offset available for in-use/free
8742f69fa82SViro 	 * indicator, so force >= 16bit alignment and make size even.
875723ad1d9SAl Viro 	 */
876723ad1d9SAl Viro 	if (unlikely(align < 2))
877723ad1d9SAl Viro 		align = 2;
878723ad1d9SAl Viro 
879fb009e3aSChristoph Lameter 	size = ALIGN(size, 2);
8802f69fa82SViro 
8813ca45a46Szijun_hu 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
8823ca45a46Szijun_hu 		     !is_power_of_2(align))) {
883756a025fSJoe Perches 		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
884756a025fSJoe Perches 		     size, align);
885fbf59bc9STejun Heo 		return NULL;
886fbf59bc9STejun Heo 	}
887fbf59bc9STejun Heo 
8886710e594STejun Heo 	if (!is_atomic)
8896710e594STejun Heo 		mutex_lock(&pcpu_alloc_mutex);
8906710e594STejun Heo 
891403a91b1SJiri Kosina 	spin_lock_irqsave(&pcpu_lock, flags);
892fbf59bc9STejun Heo 
893edcb4639STejun Heo 	/* serve reserved allocations from the reserved chunk if available */
894edcb4639STejun Heo 	if (reserved && pcpu_reserved_chunk) {
895edcb4639STejun Heo 		chunk = pcpu_reserved_chunk;
896833af842STejun Heo 
897833af842STejun Heo 		if (size > chunk->contig_hint) {
898833af842STejun Heo 			err = "alloc from reserved chunk failed";
899ccea34b5STejun Heo 			goto fail_unlock;
900f2badb0cSTejun Heo 		}
901833af842STejun Heo 
9029c824b6aSTejun Heo 		while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
903833af842STejun Heo 			spin_unlock_irqrestore(&pcpu_lock, flags);
9045835d96eSTejun Heo 			if (is_atomic ||
9055835d96eSTejun Heo 			    pcpu_extend_area_map(chunk, new_alloc) < 0) {
906833af842STejun Heo 				err = "failed to extend area map of reserved chunk";
907b38d08f3STejun Heo 				goto fail;
908833af842STejun Heo 			}
909833af842STejun Heo 			spin_lock_irqsave(&pcpu_lock, flags);
910833af842STejun Heo 		}
911833af842STejun Heo 
912b539b87fSTejun Heo 		off = pcpu_alloc_area(chunk, size, align, is_atomic,
913b539b87fSTejun Heo 				      &occ_pages);
914edcb4639STejun Heo 		if (off >= 0)
915edcb4639STejun Heo 			goto area_found;
916833af842STejun Heo 
917f2badb0cSTejun Heo 		err = "alloc from reserved chunk failed";
918ccea34b5STejun Heo 		goto fail_unlock;
919edcb4639STejun Heo 	}
920edcb4639STejun Heo 
921ccea34b5STejun Heo restart:
922edcb4639STejun Heo 	/* search through normal chunks */
923fbf59bc9STejun Heo 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
924fbf59bc9STejun Heo 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
925fbf59bc9STejun Heo 			if (size > chunk->contig_hint)
926fbf59bc9STejun Heo 				continue;
927ccea34b5STejun Heo 
9289c824b6aSTejun Heo 			new_alloc = pcpu_need_to_extend(chunk, is_atomic);
929833af842STejun Heo 			if (new_alloc) {
9305835d96eSTejun Heo 				if (is_atomic)
9315835d96eSTejun Heo 					continue;
932833af842STejun Heo 				spin_unlock_irqrestore(&pcpu_lock, flags);
933833af842STejun Heo 				if (pcpu_extend_area_map(chunk,
934833af842STejun Heo 							 new_alloc) < 0) {
935f2badb0cSTejun Heo 					err = "failed to extend area map";
936b38d08f3STejun Heo 					goto fail;
937833af842STejun Heo 				}
938833af842STejun Heo 				spin_lock_irqsave(&pcpu_lock, flags);
939833af842STejun Heo 				/*
940833af842STejun Heo 				 * pcpu_lock has been dropped, need to
941833af842STejun Heo 				 * restart cpu_slot list walking.
942833af842STejun Heo 				 */
943833af842STejun Heo 				goto restart;
944ccea34b5STejun Heo 			}
945ccea34b5STejun Heo 
946b539b87fSTejun Heo 			off = pcpu_alloc_area(chunk, size, align, is_atomic,
947b539b87fSTejun Heo 					      &occ_pages);
948fbf59bc9STejun Heo 			if (off >= 0)
949fbf59bc9STejun Heo 				goto area_found;
950fbf59bc9STejun Heo 		}
951fbf59bc9STejun Heo 	}
952fbf59bc9STejun Heo 
953403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
954ccea34b5STejun Heo 
955b38d08f3STejun Heo 	/*
956b38d08f3STejun Heo 	 * No space left.  Create a new chunk.  We don't want multiple
957b38d08f3STejun Heo 	 * tasks to create chunks simultaneously.  Serialize and create iff
958b38d08f3STejun Heo 	 * there's still no empty chunk after grabbing the mutex.
959b38d08f3STejun Heo 	 */
96011df02bfSDennis Zhou 	if (is_atomic) {
96111df02bfSDennis Zhou 		err = "atomic alloc failed, no space left";
9625835d96eSTejun Heo 		goto fail;
96311df02bfSDennis Zhou 	}
9645835d96eSTejun Heo 
965b38d08f3STejun Heo 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
9666081089fSTejun Heo 		chunk = pcpu_create_chunk();
967f2badb0cSTejun Heo 		if (!chunk) {
968f2badb0cSTejun Heo 			err = "failed to allocate new chunk";
969b38d08f3STejun Heo 			goto fail;
970f2badb0cSTejun Heo 		}
971ccea34b5STejun Heo 
972403a91b1SJiri Kosina 		spin_lock_irqsave(&pcpu_lock, flags);
973fbf59bc9STejun Heo 		pcpu_chunk_relocate(chunk, -1);
974b38d08f3STejun Heo 	} else {
975b38d08f3STejun Heo 		spin_lock_irqsave(&pcpu_lock, flags);
976b38d08f3STejun Heo 	}
977b38d08f3STejun Heo 
978ccea34b5STejun Heo 	goto restart;
979fbf59bc9STejun Heo 
980fbf59bc9STejun Heo area_found:
98130a5b536SDennis Zhou 	pcpu_stats_area_alloc(chunk, size);
982403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
983ccea34b5STejun Heo 
984dca49645STejun Heo 	/* populate if not all pages are already there */
9855835d96eSTejun Heo 	if (!is_atomic) {
986e04d3208STejun Heo 		int page_start, page_end, rs, re;
987e04d3208STejun Heo 
988dca49645STejun Heo 		page_start = PFN_DOWN(off);
989dca49645STejun Heo 		page_end = PFN_UP(off + size);
990dca49645STejun Heo 
991a93ace48STejun Heo 		pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
992dca49645STejun Heo 			WARN_ON(chunk->immutable);
993dca49645STejun Heo 
994b38d08f3STejun Heo 			ret = pcpu_populate_chunk(chunk, rs, re);
995b38d08f3STejun Heo 
996403a91b1SJiri Kosina 			spin_lock_irqsave(&pcpu_lock, flags);
997b38d08f3STejun Heo 			if (ret) {
998b539b87fSTejun Heo 				pcpu_free_area(chunk, off, &occ_pages);
999f2badb0cSTejun Heo 				err = "failed to populate";
1000ccea34b5STejun Heo 				goto fail_unlock;
1001fbf59bc9STejun Heo 			}
1002b539b87fSTejun Heo 			pcpu_chunk_populated(chunk, rs, re);
1003b38d08f3STejun Heo 			spin_unlock_irqrestore(&pcpu_lock, flags);
1004dca49645STejun Heo 		}
1005dca49645STejun Heo 
1006ccea34b5STejun Heo 		mutex_unlock(&pcpu_alloc_mutex);
1007e04d3208STejun Heo 	}
1008ccea34b5STejun Heo 
1009320661b0STahsin Erdogan 	if (chunk != pcpu_reserved_chunk) {
1010320661b0STahsin Erdogan 		spin_lock_irqsave(&pcpu_lock, flags);
1011b539b87fSTejun Heo 		pcpu_nr_empty_pop_pages -= occ_pages;
1012320661b0STahsin Erdogan 		spin_unlock_irqrestore(&pcpu_lock, flags);
1013320661b0STahsin Erdogan 	}
1014b539b87fSTejun Heo 
10151a4d7607STejun Heo 	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
10161a4d7607STejun Heo 		pcpu_schedule_balance_work();
10171a4d7607STejun Heo 
1018dca49645STejun Heo 	/* clear the areas and return address relative to base address */
1019dca49645STejun Heo 	for_each_possible_cpu(cpu)
1020dca49645STejun Heo 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1021dca49645STejun Heo 
1022f528f0b8SCatalin Marinas 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
10238a8c35faSLarry Finger 	kmemleak_alloc_percpu(ptr, size, gfp);
1024df95e795SDennis Zhou 
1025df95e795SDennis Zhou 	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1026df95e795SDennis Zhou 			chunk->base_addr, off, ptr);
1027df95e795SDennis Zhou 
1028f528f0b8SCatalin Marinas 	return ptr;
1029ccea34b5STejun Heo 
1030ccea34b5STejun Heo fail_unlock:
1031403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
1032b38d08f3STejun Heo fail:
1033df95e795SDennis Zhou 	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1034df95e795SDennis Zhou 
10355835d96eSTejun Heo 	if (!is_atomic && warn_limit) {
1036870d4b12SJoe Perches 		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
10375835d96eSTejun Heo 			size, align, is_atomic, err);
1038f2badb0cSTejun Heo 		dump_stack();
1039f2badb0cSTejun Heo 		if (!--warn_limit)
1040870d4b12SJoe Perches 			pr_info("limit reached, disable warning\n");
1041f2badb0cSTejun Heo 	}
10421a4d7607STejun Heo 	if (is_atomic) {
10431a4d7607STejun Heo 		/* see the flag handling in pcpu_blance_workfn() */
10441a4d7607STejun Heo 		pcpu_atomic_alloc_failed = true;
10451a4d7607STejun Heo 		pcpu_schedule_balance_work();
10466710e594STejun Heo 	} else {
10476710e594STejun Heo 		mutex_unlock(&pcpu_alloc_mutex);
10481a4d7607STejun Heo 	}
1049ccea34b5STejun Heo 	return NULL;
1050fbf59bc9STejun Heo }
1051edcb4639STejun Heo 
1052edcb4639STejun Heo /**
10535835d96eSTejun Heo  * __alloc_percpu_gfp - allocate dynamic percpu area
1054edcb4639STejun Heo  * @size: size of area to allocate in bytes
1055edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
10565835d96eSTejun Heo  * @gfp: allocation flags
1057edcb4639STejun Heo  *
10585835d96eSTejun Heo  * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
10595835d96eSTejun Heo  * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
10605835d96eSTejun Heo  * be called from any context but is a lot more likely to fail.
1061ccea34b5STejun Heo  *
1062edcb4639STejun Heo  * RETURNS:
1063edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
1064edcb4639STejun Heo  */
10655835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
10665835d96eSTejun Heo {
10675835d96eSTejun Heo 	return pcpu_alloc(size, align, false, gfp);
10685835d96eSTejun Heo }
10695835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
10705835d96eSTejun Heo 
10715835d96eSTejun Heo /**
10725835d96eSTejun Heo  * __alloc_percpu - allocate dynamic percpu area
10735835d96eSTejun Heo  * @size: size of area to allocate in bytes
10745835d96eSTejun Heo  * @align: alignment of area (max PAGE_SIZE)
10755835d96eSTejun Heo  *
10765835d96eSTejun Heo  * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
10775835d96eSTejun Heo  */
107843cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align)
1079edcb4639STejun Heo {
10805835d96eSTejun Heo 	return pcpu_alloc(size, align, false, GFP_KERNEL);
1081edcb4639STejun Heo }
1082fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu);
1083fbf59bc9STejun Heo 
1084edcb4639STejun Heo /**
1085edcb4639STejun Heo  * __alloc_reserved_percpu - allocate reserved percpu area
1086edcb4639STejun Heo  * @size: size of area to allocate in bytes
1087edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
1088edcb4639STejun Heo  *
10899329ba97STejun Heo  * Allocate zero-filled percpu area of @size bytes aligned at @align
10909329ba97STejun Heo  * from reserved percpu area if arch has set it up; otherwise,
10919329ba97STejun Heo  * allocation is served from the same dynamic area.  Might sleep.
10929329ba97STejun Heo  * Might trigger writeouts.
1093edcb4639STejun Heo  *
1094ccea34b5STejun Heo  * CONTEXT:
1095ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
1096ccea34b5STejun Heo  *
1097edcb4639STejun Heo  * RETURNS:
1098edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
1099edcb4639STejun Heo  */
110043cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1101edcb4639STejun Heo {
11025835d96eSTejun Heo 	return pcpu_alloc(size, align, true, GFP_KERNEL);
1103edcb4639STejun Heo }
1104edcb4639STejun Heo 
1105a56dbddfSTejun Heo /**
11061a4d7607STejun Heo  * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1107a56dbddfSTejun Heo  * @work: unused
1108a56dbddfSTejun Heo  *
1109a56dbddfSTejun Heo  * Reclaim all fully free chunks except for the first one.
1110a56dbddfSTejun Heo  */
1111fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work)
1112fbf59bc9STejun Heo {
1113fe6bd8c3STejun Heo 	LIST_HEAD(to_free);
1114fe6bd8c3STejun Heo 	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1115a56dbddfSTejun Heo 	struct pcpu_chunk *chunk, *next;
11161a4d7607STejun Heo 	int slot, nr_to_pop, ret;
1117a56dbddfSTejun Heo 
11181a4d7607STejun Heo 	/*
11191a4d7607STejun Heo 	 * There's no reason to keep around multiple unused chunks and VM
11201a4d7607STejun Heo 	 * areas can be scarce.  Destroy all free chunks except for one.
11211a4d7607STejun Heo 	 */
1122ccea34b5STejun Heo 	mutex_lock(&pcpu_alloc_mutex);
1123ccea34b5STejun Heo 	spin_lock_irq(&pcpu_lock);
1124a56dbddfSTejun Heo 
1125fe6bd8c3STejun Heo 	list_for_each_entry_safe(chunk, next, free_head, list) {
11268d408b4bSTejun Heo 		WARN_ON(chunk->immutable);
1127a56dbddfSTejun Heo 
1128a56dbddfSTejun Heo 		/* spare the first one */
1129fe6bd8c3STejun Heo 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1130a56dbddfSTejun Heo 			continue;
1131a56dbddfSTejun Heo 
11324f996e23STejun Heo 		list_del_init(&chunk->map_extend_list);
1133fe6bd8c3STejun Heo 		list_move(&chunk->list, &to_free);
1134a56dbddfSTejun Heo 	}
1135a56dbddfSTejun Heo 
1136ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
1137a56dbddfSTejun Heo 
1138fe6bd8c3STejun Heo 	list_for_each_entry_safe(chunk, next, &to_free, list) {
1139a93ace48STejun Heo 		int rs, re;
1140dca49645STejun Heo 
1141a93ace48STejun Heo 		pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1142a93ace48STejun Heo 			pcpu_depopulate_chunk(chunk, rs, re);
1143b539b87fSTejun Heo 			spin_lock_irq(&pcpu_lock);
1144b539b87fSTejun Heo 			pcpu_chunk_depopulated(chunk, rs, re);
1145b539b87fSTejun Heo 			spin_unlock_irq(&pcpu_lock);
1146a93ace48STejun Heo 		}
11476081089fSTejun Heo 		pcpu_destroy_chunk(chunk);
1148fbf59bc9STejun Heo 	}
1149971f3918STejun Heo 
11504f996e23STejun Heo 	/* service chunks which requested async area map extension */
11514f996e23STejun Heo 	do {
11524f996e23STejun Heo 		int new_alloc = 0;
11534f996e23STejun Heo 
11544f996e23STejun Heo 		spin_lock_irq(&pcpu_lock);
11554f996e23STejun Heo 
11564f996e23STejun Heo 		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
11574f996e23STejun Heo 					struct pcpu_chunk, map_extend_list);
11584f996e23STejun Heo 		if (chunk) {
11594f996e23STejun Heo 			list_del_init(&chunk->map_extend_list);
11604f996e23STejun Heo 			new_alloc = pcpu_need_to_extend(chunk, false);
11614f996e23STejun Heo 		}
11624f996e23STejun Heo 
11634f996e23STejun Heo 		spin_unlock_irq(&pcpu_lock);
11644f996e23STejun Heo 
11654f996e23STejun Heo 		if (new_alloc)
11664f996e23STejun Heo 			pcpu_extend_area_map(chunk, new_alloc);
11674f996e23STejun Heo 	} while (chunk);
11684f996e23STejun Heo 
11691a4d7607STejun Heo 	/*
11701a4d7607STejun Heo 	 * Ensure there are certain number of free populated pages for
11711a4d7607STejun Heo 	 * atomic allocs.  Fill up from the most packed so that atomic
11721a4d7607STejun Heo 	 * allocs don't increase fragmentation.  If atomic allocation
11731a4d7607STejun Heo 	 * failed previously, always populate the maximum amount.  This
11741a4d7607STejun Heo 	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
11751a4d7607STejun Heo 	 * failing indefinitely; however, large atomic allocs are not
11761a4d7607STejun Heo 	 * something we support properly and can be highly unreliable and
11771a4d7607STejun Heo 	 * inefficient.
11781a4d7607STejun Heo 	 */
11791a4d7607STejun Heo retry_pop:
11801a4d7607STejun Heo 	if (pcpu_atomic_alloc_failed) {
11811a4d7607STejun Heo 		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
11821a4d7607STejun Heo 		/* best effort anyway, don't worry about synchronization */
11831a4d7607STejun Heo 		pcpu_atomic_alloc_failed = false;
11841a4d7607STejun Heo 	} else {
11851a4d7607STejun Heo 		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
11861a4d7607STejun Heo 				  pcpu_nr_empty_pop_pages,
11871a4d7607STejun Heo 				  0, PCPU_EMPTY_POP_PAGES_HIGH);
11881a4d7607STejun Heo 	}
11891a4d7607STejun Heo 
11901a4d7607STejun Heo 	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
11911a4d7607STejun Heo 		int nr_unpop = 0, rs, re;
11921a4d7607STejun Heo 
11931a4d7607STejun Heo 		if (!nr_to_pop)
11941a4d7607STejun Heo 			break;
11951a4d7607STejun Heo 
11961a4d7607STejun Heo 		spin_lock_irq(&pcpu_lock);
11971a4d7607STejun Heo 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
11981a4d7607STejun Heo 			nr_unpop = pcpu_unit_pages - chunk->nr_populated;
11991a4d7607STejun Heo 			if (nr_unpop)
12001a4d7607STejun Heo 				break;
12011a4d7607STejun Heo 		}
12021a4d7607STejun Heo 		spin_unlock_irq(&pcpu_lock);
12031a4d7607STejun Heo 
12041a4d7607STejun Heo 		if (!nr_unpop)
12051a4d7607STejun Heo 			continue;
12061a4d7607STejun Heo 
12071a4d7607STejun Heo 		/* @chunk can't go away while pcpu_alloc_mutex is held */
12081a4d7607STejun Heo 		pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
12091a4d7607STejun Heo 			int nr = min(re - rs, nr_to_pop);
12101a4d7607STejun Heo 
12111a4d7607STejun Heo 			ret = pcpu_populate_chunk(chunk, rs, rs + nr);
12121a4d7607STejun Heo 			if (!ret) {
12131a4d7607STejun Heo 				nr_to_pop -= nr;
12141a4d7607STejun Heo 				spin_lock_irq(&pcpu_lock);
12151a4d7607STejun Heo 				pcpu_chunk_populated(chunk, rs, rs + nr);
12161a4d7607STejun Heo 				spin_unlock_irq(&pcpu_lock);
12171a4d7607STejun Heo 			} else {
12181a4d7607STejun Heo 				nr_to_pop = 0;
12191a4d7607STejun Heo 			}
12201a4d7607STejun Heo 
12211a4d7607STejun Heo 			if (!nr_to_pop)
12221a4d7607STejun Heo 				break;
12231a4d7607STejun Heo 		}
12241a4d7607STejun Heo 	}
12251a4d7607STejun Heo 
12261a4d7607STejun Heo 	if (nr_to_pop) {
12271a4d7607STejun Heo 		/* ran out of chunks to populate, create a new one and retry */
12281a4d7607STejun Heo 		chunk = pcpu_create_chunk();
12291a4d7607STejun Heo 		if (chunk) {
12301a4d7607STejun Heo 			spin_lock_irq(&pcpu_lock);
12311a4d7607STejun Heo 			pcpu_chunk_relocate(chunk, -1);
12321a4d7607STejun Heo 			spin_unlock_irq(&pcpu_lock);
12331a4d7607STejun Heo 			goto retry_pop;
12341a4d7607STejun Heo 		}
12351a4d7607STejun Heo 	}
12361a4d7607STejun Heo 
1237971f3918STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
1238a56dbddfSTejun Heo }
1239fbf59bc9STejun Heo 
1240fbf59bc9STejun Heo /**
1241fbf59bc9STejun Heo  * free_percpu - free percpu area
1242fbf59bc9STejun Heo  * @ptr: pointer to area to free
1243fbf59bc9STejun Heo  *
1244ccea34b5STejun Heo  * Free percpu area @ptr.
1245ccea34b5STejun Heo  *
1246ccea34b5STejun Heo  * CONTEXT:
1247ccea34b5STejun Heo  * Can be called from atomic context.
1248fbf59bc9STejun Heo  */
124943cf38ebSTejun Heo void free_percpu(void __percpu *ptr)
1250fbf59bc9STejun Heo {
1251129182e5SAndrew Morton 	void *addr;
1252fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
1253ccea34b5STejun Heo 	unsigned long flags;
1254b539b87fSTejun Heo 	int off, occ_pages;
1255fbf59bc9STejun Heo 
1256fbf59bc9STejun Heo 	if (!ptr)
1257fbf59bc9STejun Heo 		return;
1258fbf59bc9STejun Heo 
1259f528f0b8SCatalin Marinas 	kmemleak_free_percpu(ptr);
1260f528f0b8SCatalin Marinas 
1261129182e5SAndrew Morton 	addr = __pcpu_ptr_to_addr(ptr);
1262129182e5SAndrew Morton 
1263ccea34b5STejun Heo 	spin_lock_irqsave(&pcpu_lock, flags);
1264fbf59bc9STejun Heo 
1265fbf59bc9STejun Heo 	chunk = pcpu_chunk_addr_search(addr);
1266bba174f5STejun Heo 	off = addr - chunk->base_addr;
1267fbf59bc9STejun Heo 
1268b539b87fSTejun Heo 	pcpu_free_area(chunk, off, &occ_pages);
1269b539b87fSTejun Heo 
1270b539b87fSTejun Heo 	if (chunk != pcpu_reserved_chunk)
1271b539b87fSTejun Heo 		pcpu_nr_empty_pop_pages += occ_pages;
1272fbf59bc9STejun Heo 
1273a56dbddfSTejun Heo 	/* if there are more than one fully free chunks, wake up grim reaper */
1274fbf59bc9STejun Heo 	if (chunk->free_size == pcpu_unit_size) {
1275fbf59bc9STejun Heo 		struct pcpu_chunk *pos;
1276fbf59bc9STejun Heo 
1277a56dbddfSTejun Heo 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1278fbf59bc9STejun Heo 			if (pos != chunk) {
12791a4d7607STejun Heo 				pcpu_schedule_balance_work();
1280fbf59bc9STejun Heo 				break;
1281fbf59bc9STejun Heo 			}
1282fbf59bc9STejun Heo 	}
1283fbf59bc9STejun Heo 
1284df95e795SDennis Zhou 	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
1285df95e795SDennis Zhou 
1286ccea34b5STejun Heo 	spin_unlock_irqrestore(&pcpu_lock, flags);
1287fbf59bc9STejun Heo }
1288fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu);
1289fbf59bc9STejun Heo 
1290383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1291383776faSThomas Gleixner {
1292383776faSThomas Gleixner #ifdef CONFIG_SMP
1293383776faSThomas Gleixner 	const size_t static_size = __per_cpu_end - __per_cpu_start;
1294383776faSThomas Gleixner 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1295383776faSThomas Gleixner 	unsigned int cpu;
1296383776faSThomas Gleixner 
1297383776faSThomas Gleixner 	for_each_possible_cpu(cpu) {
1298383776faSThomas Gleixner 		void *start = per_cpu_ptr(base, cpu);
1299383776faSThomas Gleixner 		void *va = (void *)addr;
1300383776faSThomas Gleixner 
1301383776faSThomas Gleixner 		if (va >= start && va < start + static_size) {
13028ce371f9SPeter Zijlstra 			if (can_addr) {
1303383776faSThomas Gleixner 				*can_addr = (unsigned long) (va - start);
13048ce371f9SPeter Zijlstra 				*can_addr += (unsigned long)
13058ce371f9SPeter Zijlstra 					per_cpu_ptr(base, get_boot_cpu_id());
13068ce371f9SPeter Zijlstra 			}
1307383776faSThomas Gleixner 			return true;
1308383776faSThomas Gleixner 		}
1309383776faSThomas Gleixner 	}
1310383776faSThomas Gleixner #endif
1311383776faSThomas Gleixner 	/* on UP, can't distinguish from other static vars, always false */
1312383776faSThomas Gleixner 	return false;
1313383776faSThomas Gleixner }
1314383776faSThomas Gleixner 
13153b034b0dSVivek Goyal /**
131610fad5e4STejun Heo  * is_kernel_percpu_address - test whether address is from static percpu area
131710fad5e4STejun Heo  * @addr: address to test
131810fad5e4STejun Heo  *
131910fad5e4STejun Heo  * Test whether @addr belongs to in-kernel static percpu area.  Module
132010fad5e4STejun Heo  * static percpu areas are not considered.  For those, use
132110fad5e4STejun Heo  * is_module_percpu_address().
132210fad5e4STejun Heo  *
132310fad5e4STejun Heo  * RETURNS:
132410fad5e4STejun Heo  * %true if @addr is from in-kernel static percpu area, %false otherwise.
132510fad5e4STejun Heo  */
132610fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr)
132710fad5e4STejun Heo {
1328383776faSThomas Gleixner 	return __is_kernel_percpu_address(addr, NULL);
132910fad5e4STejun Heo }
133010fad5e4STejun Heo 
133110fad5e4STejun Heo /**
13323b034b0dSVivek Goyal  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
13333b034b0dSVivek Goyal  * @addr: the address to be converted to physical address
13343b034b0dSVivek Goyal  *
13353b034b0dSVivek Goyal  * Given @addr which is dereferenceable address obtained via one of
13363b034b0dSVivek Goyal  * percpu access macros, this function translates it into its physical
13373b034b0dSVivek Goyal  * address.  The caller is responsible for ensuring @addr stays valid
13383b034b0dSVivek Goyal  * until this function finishes.
13393b034b0dSVivek Goyal  *
134067589c71SDave Young  * percpu allocator has special setup for the first chunk, which currently
134167589c71SDave Young  * supports either embedding in linear address space or vmalloc mapping,
134267589c71SDave Young  * and, from the second one, the backing allocator (currently either vm or
134367589c71SDave Young  * km) provides translation.
134467589c71SDave Young  *
1345bffc4375SYannick Guerrini  * The addr can be translated simply without checking if it falls into the
134667589c71SDave Young  * first chunk. But the current code reflects better how percpu allocator
134767589c71SDave Young  * actually works, and the verification can discover both bugs in percpu
134867589c71SDave Young  * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
134967589c71SDave Young  * code.
135067589c71SDave Young  *
13513b034b0dSVivek Goyal  * RETURNS:
13523b034b0dSVivek Goyal  * The physical address for @addr.
13533b034b0dSVivek Goyal  */
13543b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr)
13553b034b0dSVivek Goyal {
13569983b6f0STejun Heo 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
13579983b6f0STejun Heo 	bool in_first_chunk = false;
1358a855b84cSTejun Heo 	unsigned long first_low, first_high;
13599983b6f0STejun Heo 	unsigned int cpu;
13609983b6f0STejun Heo 
13619983b6f0STejun Heo 	/*
1362a855b84cSTejun Heo 	 * The following test on unit_low/high isn't strictly
13639983b6f0STejun Heo 	 * necessary but will speed up lookups of addresses which
13649983b6f0STejun Heo 	 * aren't in the first chunk.
13659983b6f0STejun Heo 	 */
1366a855b84cSTejun Heo 	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1367a855b84cSTejun Heo 	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
13689983b6f0STejun Heo 				     pcpu_unit_pages);
1369a855b84cSTejun Heo 	if ((unsigned long)addr >= first_low &&
1370a855b84cSTejun Heo 	    (unsigned long)addr < first_high) {
13719983b6f0STejun Heo 		for_each_possible_cpu(cpu) {
13729983b6f0STejun Heo 			void *start = per_cpu_ptr(base, cpu);
13739983b6f0STejun Heo 
13749983b6f0STejun Heo 			if (addr >= start && addr < start + pcpu_unit_size) {
13759983b6f0STejun Heo 				in_first_chunk = true;
13769983b6f0STejun Heo 				break;
13779983b6f0STejun Heo 			}
13789983b6f0STejun Heo 		}
13799983b6f0STejun Heo 	}
13809983b6f0STejun Heo 
13819983b6f0STejun Heo 	if (in_first_chunk) {
1382eac522efSDavid Howells 		if (!is_vmalloc_addr(addr))
13833b034b0dSVivek Goyal 			return __pa(addr);
13843b034b0dSVivek Goyal 		else
13859f57bd4dSEugene Surovegin 			return page_to_phys(vmalloc_to_page(addr)) +
13869f57bd4dSEugene Surovegin 			       offset_in_page(addr);
1387020ec653STejun Heo 	} else
13889f57bd4dSEugene Surovegin 		return page_to_phys(pcpu_addr_to_page(addr)) +
13899f57bd4dSEugene Surovegin 		       offset_in_page(addr);
13903b034b0dSVivek Goyal }
13913b034b0dSVivek Goyal 
1392fbf59bc9STejun Heo /**
1393fd1e8a1fSTejun Heo  * pcpu_alloc_alloc_info - allocate percpu allocation info
1394fd1e8a1fSTejun Heo  * @nr_groups: the number of groups
1395fd1e8a1fSTejun Heo  * @nr_units: the number of units
1396033e48fbSTejun Heo  *
1397fd1e8a1fSTejun Heo  * Allocate ai which is large enough for @nr_groups groups containing
1398fd1e8a1fSTejun Heo  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1399fd1e8a1fSTejun Heo  * cpu_map array which is long enough for @nr_units and filled with
1400fd1e8a1fSTejun Heo  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1401fd1e8a1fSTejun Heo  * pointer of other groups.
1402033e48fbSTejun Heo  *
1403033e48fbSTejun Heo  * RETURNS:
1404fd1e8a1fSTejun Heo  * Pointer to the allocated pcpu_alloc_info on success, NULL on
1405fd1e8a1fSTejun Heo  * failure.
1406033e48fbSTejun Heo  */
1407fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1408fd1e8a1fSTejun Heo 						      int nr_units)
1409fd1e8a1fSTejun Heo {
1410fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
1411fd1e8a1fSTejun Heo 	size_t base_size, ai_size;
1412fd1e8a1fSTejun Heo 	void *ptr;
1413fd1e8a1fSTejun Heo 	int unit;
1414fd1e8a1fSTejun Heo 
1415fd1e8a1fSTejun Heo 	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1416fd1e8a1fSTejun Heo 			  __alignof__(ai->groups[0].cpu_map[0]));
1417fd1e8a1fSTejun Heo 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1418fd1e8a1fSTejun Heo 
1419999c17e3SSantosh Shilimkar 	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1420fd1e8a1fSTejun Heo 	if (!ptr)
1421fd1e8a1fSTejun Heo 		return NULL;
1422fd1e8a1fSTejun Heo 	ai = ptr;
1423fd1e8a1fSTejun Heo 	ptr += base_size;
1424fd1e8a1fSTejun Heo 
1425fd1e8a1fSTejun Heo 	ai->groups[0].cpu_map = ptr;
1426fd1e8a1fSTejun Heo 
1427fd1e8a1fSTejun Heo 	for (unit = 0; unit < nr_units; unit++)
1428fd1e8a1fSTejun Heo 		ai->groups[0].cpu_map[unit] = NR_CPUS;
1429fd1e8a1fSTejun Heo 
1430fd1e8a1fSTejun Heo 	ai->nr_groups = nr_groups;
1431fd1e8a1fSTejun Heo 	ai->__ai_size = PFN_ALIGN(ai_size);
1432fd1e8a1fSTejun Heo 
1433fd1e8a1fSTejun Heo 	return ai;
1434fd1e8a1fSTejun Heo }
1435fd1e8a1fSTejun Heo 
1436fd1e8a1fSTejun Heo /**
1437fd1e8a1fSTejun Heo  * pcpu_free_alloc_info - free percpu allocation info
1438fd1e8a1fSTejun Heo  * @ai: pcpu_alloc_info to free
1439fd1e8a1fSTejun Heo  *
1440fd1e8a1fSTejun Heo  * Free @ai which was allocated by pcpu_alloc_alloc_info().
1441fd1e8a1fSTejun Heo  */
1442fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1443fd1e8a1fSTejun Heo {
1444999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(ai), ai->__ai_size);
1445fd1e8a1fSTejun Heo }
1446fd1e8a1fSTejun Heo 
1447fd1e8a1fSTejun Heo /**
1448fd1e8a1fSTejun Heo  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1449fd1e8a1fSTejun Heo  * @lvl: loglevel
1450fd1e8a1fSTejun Heo  * @ai: allocation info to dump
1451fd1e8a1fSTejun Heo  *
1452fd1e8a1fSTejun Heo  * Print out information about @ai using loglevel @lvl.
1453fd1e8a1fSTejun Heo  */
1454fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl,
1455fd1e8a1fSTejun Heo 				 const struct pcpu_alloc_info *ai)
1456033e48fbSTejun Heo {
1457fd1e8a1fSTejun Heo 	int group_width = 1, cpu_width = 1, width;
1458033e48fbSTejun Heo 	char empty_str[] = "--------";
1459fd1e8a1fSTejun Heo 	int alloc = 0, alloc_end = 0;
1460fd1e8a1fSTejun Heo 	int group, v;
1461fd1e8a1fSTejun Heo 	int upa, apl;	/* units per alloc, allocs per line */
1462033e48fbSTejun Heo 
1463fd1e8a1fSTejun Heo 	v = ai->nr_groups;
1464033e48fbSTejun Heo 	while (v /= 10)
1465fd1e8a1fSTejun Heo 		group_width++;
1466033e48fbSTejun Heo 
1467fd1e8a1fSTejun Heo 	v = num_possible_cpus();
1468fd1e8a1fSTejun Heo 	while (v /= 10)
1469fd1e8a1fSTejun Heo 		cpu_width++;
1470fd1e8a1fSTejun Heo 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1471033e48fbSTejun Heo 
1472fd1e8a1fSTejun Heo 	upa = ai->alloc_size / ai->unit_size;
1473fd1e8a1fSTejun Heo 	width = upa * (cpu_width + 1) + group_width + 3;
1474fd1e8a1fSTejun Heo 	apl = rounddown_pow_of_two(max(60 / width, 1));
1475033e48fbSTejun Heo 
1476fd1e8a1fSTejun Heo 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1477fd1e8a1fSTejun Heo 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1478fd1e8a1fSTejun Heo 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1479fd1e8a1fSTejun Heo 
1480fd1e8a1fSTejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
1481fd1e8a1fSTejun Heo 		const struct pcpu_group_info *gi = &ai->groups[group];
1482fd1e8a1fSTejun Heo 		int unit = 0, unit_end = 0;
1483fd1e8a1fSTejun Heo 
1484fd1e8a1fSTejun Heo 		BUG_ON(gi->nr_units % upa);
1485fd1e8a1fSTejun Heo 		for (alloc_end += gi->nr_units / upa;
1486fd1e8a1fSTejun Heo 		     alloc < alloc_end; alloc++) {
1487fd1e8a1fSTejun Heo 			if (!(alloc % apl)) {
14881170532bSJoe Perches 				pr_cont("\n");
1489fd1e8a1fSTejun Heo 				printk("%spcpu-alloc: ", lvl);
1490033e48fbSTejun Heo 			}
14911170532bSJoe Perches 			pr_cont("[%0*d] ", group_width, group);
1492fd1e8a1fSTejun Heo 
1493fd1e8a1fSTejun Heo 			for (unit_end += upa; unit < unit_end; unit++)
1494fd1e8a1fSTejun Heo 				if (gi->cpu_map[unit] != NR_CPUS)
14951170532bSJoe Perches 					pr_cont("%0*d ",
14961170532bSJoe Perches 						cpu_width, gi->cpu_map[unit]);
1497033e48fbSTejun Heo 				else
14981170532bSJoe Perches 					pr_cont("%s ", empty_str);
1499033e48fbSTejun Heo 		}
1500fd1e8a1fSTejun Heo 	}
15011170532bSJoe Perches 	pr_cont("\n");
1502033e48fbSTejun Heo }
1503033e48fbSTejun Heo 
1504fbf59bc9STejun Heo /**
15058d408b4bSTejun Heo  * pcpu_setup_first_chunk - initialize the first percpu chunk
1506fd1e8a1fSTejun Heo  * @ai: pcpu_alloc_info describing how to percpu area is shaped
150738a6be52STejun Heo  * @base_addr: mapped address
1508fbf59bc9STejun Heo  *
15098d408b4bSTejun Heo  * Initialize the first percpu chunk which contains the kernel static
15108d408b4bSTejun Heo  * perpcu area.  This function is to be called from arch percpu area
151138a6be52STejun Heo  * setup path.
15128d408b4bSTejun Heo  *
1513fd1e8a1fSTejun Heo  * @ai contains all information necessary to initialize the first
1514fd1e8a1fSTejun Heo  * chunk and prime the dynamic percpu allocator.
15158d408b4bSTejun Heo  *
1516fd1e8a1fSTejun Heo  * @ai->static_size is the size of static percpu area.
1517fd1e8a1fSTejun Heo  *
1518fd1e8a1fSTejun Heo  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1519edcb4639STejun Heo  * reserve after the static area in the first chunk.  This reserves
1520edcb4639STejun Heo  * the first chunk such that it's available only through reserved
1521edcb4639STejun Heo  * percpu allocation.  This is primarily used to serve module percpu
1522edcb4639STejun Heo  * static areas on architectures where the addressing model has
1523edcb4639STejun Heo  * limited offset range for symbol relocations to guarantee module
1524edcb4639STejun Heo  * percpu symbols fall inside the relocatable range.
1525edcb4639STejun Heo  *
1526fd1e8a1fSTejun Heo  * @ai->dyn_size determines the number of bytes available for dynamic
1527fd1e8a1fSTejun Heo  * allocation in the first chunk.  The area between @ai->static_size +
1528fd1e8a1fSTejun Heo  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
15296074d5b0STejun Heo  *
1530fd1e8a1fSTejun Heo  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1531fd1e8a1fSTejun Heo  * and equal to or larger than @ai->static_size + @ai->reserved_size +
1532fd1e8a1fSTejun Heo  * @ai->dyn_size.
15338d408b4bSTejun Heo  *
1534fd1e8a1fSTejun Heo  * @ai->atom_size is the allocation atom size and used as alignment
1535fd1e8a1fSTejun Heo  * for vm areas.
15368d408b4bSTejun Heo  *
1537fd1e8a1fSTejun Heo  * @ai->alloc_size is the allocation size and always multiple of
1538fd1e8a1fSTejun Heo  * @ai->atom_size.  This is larger than @ai->atom_size if
1539fd1e8a1fSTejun Heo  * @ai->unit_size is larger than @ai->atom_size.
1540fd1e8a1fSTejun Heo  *
1541fd1e8a1fSTejun Heo  * @ai->nr_groups and @ai->groups describe virtual memory layout of
1542fd1e8a1fSTejun Heo  * percpu areas.  Units which should be colocated are put into the
1543fd1e8a1fSTejun Heo  * same group.  Dynamic VM areas will be allocated according to these
1544fd1e8a1fSTejun Heo  * groupings.  If @ai->nr_groups is zero, a single group containing
1545fd1e8a1fSTejun Heo  * all units is assumed.
15468d408b4bSTejun Heo  *
154738a6be52STejun Heo  * The caller should have mapped the first chunk at @base_addr and
154838a6be52STejun Heo  * copied static data to each unit.
1549fbf59bc9STejun Heo  *
1550edcb4639STejun Heo  * If the first chunk ends up with both reserved and dynamic areas, it
1551edcb4639STejun Heo  * is served by two chunks - one to serve the core static and reserved
1552edcb4639STejun Heo  * areas and the other for the dynamic area.  They share the same vm
1553edcb4639STejun Heo  * and page map but uses different area allocation map to stay away
1554edcb4639STejun Heo  * from each other.  The latter chunk is circulated in the chunk slots
1555edcb4639STejun Heo  * and available for dynamic allocation like any other chunks.
1556edcb4639STejun Heo  *
1557fbf59bc9STejun Heo  * RETURNS:
1558fb435d52STejun Heo  * 0 on success, -errno on failure.
1559fbf59bc9STejun Heo  */
1560fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1561fd1e8a1fSTejun Heo 				  void *base_addr)
1562fbf59bc9STejun Heo {
1563099a19d9STejun Heo 	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1564099a19d9STejun Heo 	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1565*b9c39442SDennis Zhou (Facebook) 	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1566edcb4639STejun Heo 	struct pcpu_chunk *schunk, *dchunk = NULL;
15676563297cSTejun Heo 	unsigned long *group_offsets;
15686563297cSTejun Heo 	size_t *group_sizes;
1569fb435d52STejun Heo 	unsigned long *unit_off;
1570fbf59bc9STejun Heo 	unsigned int cpu;
1571fd1e8a1fSTejun Heo 	int *unit_map;
1572fd1e8a1fSTejun Heo 	int group, unit, i;
1573fbf59bc9STejun Heo 
1574635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond)	do {					\
1575635b75fcSTejun Heo 	if (unlikely(cond)) {						\
1576870d4b12SJoe Perches 		pr_emerg("failed to initialize, %s\n", #cond);		\
1577870d4b12SJoe Perches 		pr_emerg("cpu_possible_mask=%*pb\n",			\
1578807de073STejun Heo 			 cpumask_pr_args(cpu_possible_mask));		\
1579635b75fcSTejun Heo 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1580635b75fcSTejun Heo 		BUG();							\
1581635b75fcSTejun Heo 	}								\
1582635b75fcSTejun Heo } while (0)
1583635b75fcSTejun Heo 
15842f39e637STejun Heo 	/* sanity checks */
1585635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1586bbddff05STejun Heo #ifdef CONFIG_SMP
1587635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(!ai->static_size);
1588f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1589bbddff05STejun Heo #endif
1590635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(!base_addr);
1591f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1592635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1593f09f1243SAlexander Kuleshov 	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1594635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1595099a19d9STejun Heo 	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1596fb29a2ccSDennis Zhou (Facebook) 	PCPU_SETUP_BUG_ON(!ai->dyn_size);
15979f645532STejun Heo 	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
15988d408b4bSTejun Heo 
15996563297cSTejun Heo 	/* process group information and build config tables accordingly */
1600999c17e3SSantosh Shilimkar 	group_offsets = memblock_virt_alloc(ai->nr_groups *
1601999c17e3SSantosh Shilimkar 					     sizeof(group_offsets[0]), 0);
1602999c17e3SSantosh Shilimkar 	group_sizes = memblock_virt_alloc(ai->nr_groups *
1603999c17e3SSantosh Shilimkar 					   sizeof(group_sizes[0]), 0);
1604999c17e3SSantosh Shilimkar 	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1605999c17e3SSantosh Shilimkar 	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
16062f39e637STejun Heo 
1607fd1e8a1fSTejun Heo 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1608ffe0d5a5STejun Heo 		unit_map[cpu] = UINT_MAX;
1609a855b84cSTejun Heo 
1610a855b84cSTejun Heo 	pcpu_low_unit_cpu = NR_CPUS;
1611a855b84cSTejun Heo 	pcpu_high_unit_cpu = NR_CPUS;
16122f39e637STejun Heo 
1613fd1e8a1fSTejun Heo 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1614fd1e8a1fSTejun Heo 		const struct pcpu_group_info *gi = &ai->groups[group];
16152f39e637STejun Heo 
16166563297cSTejun Heo 		group_offsets[group] = gi->base_offset;
16176563297cSTejun Heo 		group_sizes[group] = gi->nr_units * ai->unit_size;
16186563297cSTejun Heo 
1619fd1e8a1fSTejun Heo 		for (i = 0; i < gi->nr_units; i++) {
1620fd1e8a1fSTejun Heo 			cpu = gi->cpu_map[i];
1621fd1e8a1fSTejun Heo 			if (cpu == NR_CPUS)
1622fd1e8a1fSTejun Heo 				continue;
1623fd1e8a1fSTejun Heo 
16249f295664SDan Carpenter 			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1625635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1626635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1627fd1e8a1fSTejun Heo 
1628fd1e8a1fSTejun Heo 			unit_map[cpu] = unit + i;
1629fb435d52STejun Heo 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1630fb435d52STejun Heo 
1631a855b84cSTejun Heo 			/* determine low/high unit_cpu */
1632a855b84cSTejun Heo 			if (pcpu_low_unit_cpu == NR_CPUS ||
1633a855b84cSTejun Heo 			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1634a855b84cSTejun Heo 				pcpu_low_unit_cpu = cpu;
1635a855b84cSTejun Heo 			if (pcpu_high_unit_cpu == NR_CPUS ||
1636a855b84cSTejun Heo 			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1637a855b84cSTejun Heo 				pcpu_high_unit_cpu = cpu;
16380fc0531eSLinus Torvalds 		}
16390fc0531eSLinus Torvalds 	}
1640fd1e8a1fSTejun Heo 	pcpu_nr_units = unit;
16412f39e637STejun Heo 
16422f39e637STejun Heo 	for_each_possible_cpu(cpu)
1643635b75fcSTejun Heo 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1644635b75fcSTejun Heo 
1645635b75fcSTejun Heo 	/* we're done parsing the input, undefine BUG macro and dump config */
1646635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON
1647bcbea798STejun Heo 	pcpu_dump_alloc_info(KERN_DEBUG, ai);
16482f39e637STejun Heo 
16496563297cSTejun Heo 	pcpu_nr_groups = ai->nr_groups;
16506563297cSTejun Heo 	pcpu_group_offsets = group_offsets;
16516563297cSTejun Heo 	pcpu_group_sizes = group_sizes;
1652fd1e8a1fSTejun Heo 	pcpu_unit_map = unit_map;
1653fb435d52STejun Heo 	pcpu_unit_offsets = unit_off;
16542f39e637STejun Heo 
16552f39e637STejun Heo 	/* determine basic parameters */
1656fd1e8a1fSTejun Heo 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1657d9b55eebSTejun Heo 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
16586563297cSTejun Heo 	pcpu_atom_size = ai->atom_size;
1659ce3141a2STejun Heo 	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1660ce3141a2STejun Heo 		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1661cafe8816STejun Heo 
166230a5b536SDennis Zhou 	pcpu_stats_save_ai(ai);
166330a5b536SDennis Zhou 
1664d9b55eebSTejun Heo 	/*
1665d9b55eebSTejun Heo 	 * Allocate chunk slots.  The additional last slot is for
1666d9b55eebSTejun Heo 	 * empty chunks.
1667d9b55eebSTejun Heo 	 */
1668d9b55eebSTejun Heo 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1669999c17e3SSantosh Shilimkar 	pcpu_slot = memblock_virt_alloc(
1670999c17e3SSantosh Shilimkar 			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1671fbf59bc9STejun Heo 	for (i = 0; i < pcpu_nr_slots; i++)
1672fbf59bc9STejun Heo 		INIT_LIST_HEAD(&pcpu_slot[i]);
1673fbf59bc9STejun Heo 
1674edcb4639STejun Heo 	/*
1675edcb4639STejun Heo 	 * Initialize static chunk.  If reserved_size is zero, the
1676edcb4639STejun Heo 	 * static chunk covers static area + dynamic allocation area
1677edcb4639STejun Heo 	 * in the first chunk.  If reserved_size is not zero, it
1678edcb4639STejun Heo 	 * covers static area + reserved area (mostly used for module
1679edcb4639STejun Heo 	 * static percpu allocation).
1680edcb4639STejun Heo 	 */
1681999c17e3SSantosh Shilimkar 	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
16822441d15cSTejun Heo 	INIT_LIST_HEAD(&schunk->list);
16834f996e23STejun Heo 	INIT_LIST_HEAD(&schunk->map_extend_list);
1684bba174f5STejun Heo 	schunk->base_addr = base_addr;
1685e2266705SDennis Zhou (Facebook) 	schunk->start_offset = ai->static_size;
168661ace7faSTejun Heo 	schunk->map = smap;
168761ace7faSTejun Heo 	schunk->map_alloc = ARRAY_SIZE(smap);
168838a6be52STejun Heo 	schunk->immutable = true;
1689ce3141a2STejun Heo 	bitmap_fill(schunk->populated, pcpu_unit_pages);
1690b539b87fSTejun Heo 	schunk->nr_populated = pcpu_unit_pages;
1691edcb4639STejun Heo 
1692*b9c39442SDennis Zhou (Facebook) 	schunk->free_size = ai->reserved_size ?: ai->dyn_size;
1693fb29a2ccSDennis Zhou (Facebook) 	schunk->contig_hint = schunk->free_size;
1694723ad1d9SAl Viro 	schunk->map[0] = 1;
1695e2266705SDennis Zhou (Facebook) 	schunk->map[1] = schunk->start_offset;
1696fb29a2ccSDennis Zhou (Facebook) 	schunk->map[2] = (ai->static_size + schunk->free_size) | 1;
1697fb29a2ccSDennis Zhou (Facebook) 	schunk->map_used = 2;
169861ace7faSTejun Heo 
1699edcb4639STejun Heo 	/* init dynamic chunk if necessary */
1700*b9c39442SDennis Zhou (Facebook) 	if (ai->reserved_size) {
1701*b9c39442SDennis Zhou (Facebook) 		pcpu_reserved_chunk = schunk;
1702*b9c39442SDennis Zhou (Facebook) 
1703999c17e3SSantosh Shilimkar 		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1704edcb4639STejun Heo 		INIT_LIST_HEAD(&dchunk->list);
17054f996e23STejun Heo 		INIT_LIST_HEAD(&dchunk->map_extend_list);
1706bba174f5STejun Heo 		dchunk->base_addr = base_addr;
1707e2266705SDennis Zhou (Facebook) 		dchunk->start_offset = ai->static_size + ai->reserved_size;
1708edcb4639STejun Heo 		dchunk->map = dmap;
1709edcb4639STejun Heo 		dchunk->map_alloc = ARRAY_SIZE(dmap);
171038a6be52STejun Heo 		dchunk->immutable = true;
1711ce3141a2STejun Heo 		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1712b539b87fSTejun Heo 		dchunk->nr_populated = pcpu_unit_pages;
1713edcb4639STejun Heo 
1714*b9c39442SDennis Zhou (Facebook) 		dchunk->contig_hint = dchunk->free_size = ai->dyn_size;
1715723ad1d9SAl Viro 		dchunk->map[0] = 1;
1716e2266705SDennis Zhou (Facebook) 		dchunk->map[1] = dchunk->start_offset;
1717e2266705SDennis Zhou (Facebook) 		dchunk->map[2] = (dchunk->start_offset + dchunk->free_size) | 1;
1718723ad1d9SAl Viro 		dchunk->map_used = 2;
1719edcb4639STejun Heo 	}
1720edcb4639STejun Heo 
17212441d15cSTejun Heo 	/* link the first chunk in */
1722ae9e6bc9STejun Heo 	pcpu_first_chunk = dchunk ?: schunk;
1723e2266705SDennis Zhou (Facebook) 	i = (pcpu_first_chunk->start_offset) ? 1 : 0;
1724b539b87fSTejun Heo 	pcpu_nr_empty_pop_pages +=
1725e2266705SDennis Zhou (Facebook) 		pcpu_count_occupied_pages(pcpu_first_chunk, i);
1726ae9e6bc9STejun Heo 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1727fbf59bc9STejun Heo 
172830a5b536SDennis Zhou 	pcpu_stats_chunk_alloc();
1729df95e795SDennis Zhou 	trace_percpu_create_chunk(base_addr);
173030a5b536SDennis Zhou 
1731fbf59bc9STejun Heo 	/* we're done */
1732bba174f5STejun Heo 	pcpu_base_addr = base_addr;
1733fb435d52STejun Heo 	return 0;
1734fbf59bc9STejun Heo }
173566c3a757STejun Heo 
1736bbddff05STejun Heo #ifdef CONFIG_SMP
1737bbddff05STejun Heo 
173817f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1739f58dc01bSTejun Heo 	[PCPU_FC_AUTO]	= "auto",
1740f58dc01bSTejun Heo 	[PCPU_FC_EMBED]	= "embed",
1741f58dc01bSTejun Heo 	[PCPU_FC_PAGE]	= "page",
1742f58dc01bSTejun Heo };
174366c3a757STejun Heo 
1744f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1745f58dc01bSTejun Heo 
1746f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str)
174766c3a757STejun Heo {
17485479c78aSCyrill Gorcunov 	if (!str)
17495479c78aSCyrill Gorcunov 		return -EINVAL;
17505479c78aSCyrill Gorcunov 
1751f58dc01bSTejun Heo 	if (0)
1752f58dc01bSTejun Heo 		/* nada */;
1753f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1754f58dc01bSTejun Heo 	else if (!strcmp(str, "embed"))
1755f58dc01bSTejun Heo 		pcpu_chosen_fc = PCPU_FC_EMBED;
1756f58dc01bSTejun Heo #endif
1757f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1758f58dc01bSTejun Heo 	else if (!strcmp(str, "page"))
1759f58dc01bSTejun Heo 		pcpu_chosen_fc = PCPU_FC_PAGE;
1760f58dc01bSTejun Heo #endif
1761f58dc01bSTejun Heo 	else
1762870d4b12SJoe Perches 		pr_warn("unknown allocator %s specified\n", str);
176366c3a757STejun Heo 
1764f58dc01bSTejun Heo 	return 0;
176566c3a757STejun Heo }
1766f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup);
176766c3a757STejun Heo 
17683c9a024fSTejun Heo /*
17693c9a024fSTejun Heo  * pcpu_embed_first_chunk() is used by the generic percpu setup.
17703c9a024fSTejun Heo  * Build it if needed by the arch config or the generic setup is going
17713c9a024fSTejun Heo  * to be used.
17723c9a024fSTejun Heo  */
177308fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
177408fc4580STejun Heo 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
17753c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK
17763c9a024fSTejun Heo #endif
17773c9a024fSTejun Heo 
17783c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */
17793c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
17803c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK
17813c9a024fSTejun Heo #endif
17823c9a024fSTejun Heo 
17833c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */
17843c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
17853c9a024fSTejun Heo /**
1786fbf59bc9STejun Heo  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1787fbf59bc9STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
1788fbf59bc9STejun Heo  * @dyn_size: minimum free size for dynamic allocation in bytes
1789fbf59bc9STejun Heo  * @atom_size: allocation atom size
1790fbf59bc9STejun Heo  * @cpu_distance_fn: callback to determine distance between cpus, optional
1791fbf59bc9STejun Heo  *
1792fbf59bc9STejun Heo  * This function determines grouping of units, their mappings to cpus
1793fbf59bc9STejun Heo  * and other parameters considering needed percpu size, allocation
1794fbf59bc9STejun Heo  * atom size and distances between CPUs.
1795fbf59bc9STejun Heo  *
1796bffc4375SYannick Guerrini  * Groups are always multiples of atom size and CPUs which are of
1797fbf59bc9STejun Heo  * LOCAL_DISTANCE both ways are grouped together and share space for
1798fbf59bc9STejun Heo  * units in the same group.  The returned configuration is guaranteed
1799fbf59bc9STejun Heo  * to have CPUs on different nodes on different groups and >=75% usage
1800fbf59bc9STejun Heo  * of allocated virtual address space.
1801fbf59bc9STejun Heo  *
1802fbf59bc9STejun Heo  * RETURNS:
1803fbf59bc9STejun Heo  * On success, pointer to the new allocation_info is returned.  On
1804fbf59bc9STejun Heo  * failure, ERR_PTR value is returned.
1805fbf59bc9STejun Heo  */
1806fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1807fbf59bc9STejun Heo 				size_t reserved_size, size_t dyn_size,
1808fbf59bc9STejun Heo 				size_t atom_size,
1809fbf59bc9STejun Heo 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1810fbf59bc9STejun Heo {
1811fbf59bc9STejun Heo 	static int group_map[NR_CPUS] __initdata;
1812fbf59bc9STejun Heo 	static int group_cnt[NR_CPUS] __initdata;
1813fbf59bc9STejun Heo 	const size_t static_size = __per_cpu_end - __per_cpu_start;
1814fbf59bc9STejun Heo 	int nr_groups = 1, nr_units = 0;
1815fbf59bc9STejun Heo 	size_t size_sum, min_unit_size, alloc_size;
1816fbf59bc9STejun Heo 	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1817fbf59bc9STejun Heo 	int last_allocs, group, unit;
1818fbf59bc9STejun Heo 	unsigned int cpu, tcpu;
1819fbf59bc9STejun Heo 	struct pcpu_alloc_info *ai;
1820fbf59bc9STejun Heo 	unsigned int *cpu_map;
1821fbf59bc9STejun Heo 
1822fbf59bc9STejun Heo 	/* this function may be called multiple times */
1823fbf59bc9STejun Heo 	memset(group_map, 0, sizeof(group_map));
1824fbf59bc9STejun Heo 	memset(group_cnt, 0, sizeof(group_cnt));
1825fbf59bc9STejun Heo 
1826fbf59bc9STejun Heo 	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1827fbf59bc9STejun Heo 	size_sum = PFN_ALIGN(static_size + reserved_size +
1828fbf59bc9STejun Heo 			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1829fbf59bc9STejun Heo 	dyn_size = size_sum - static_size - reserved_size;
1830fbf59bc9STejun Heo 
1831fbf59bc9STejun Heo 	/*
1832fbf59bc9STejun Heo 	 * Determine min_unit_size, alloc_size and max_upa such that
1833fbf59bc9STejun Heo 	 * alloc_size is multiple of atom_size and is the smallest
183425985edcSLucas De Marchi 	 * which can accommodate 4k aligned segments which are equal to
1835fbf59bc9STejun Heo 	 * or larger than min_unit_size.
1836fbf59bc9STejun Heo 	 */
1837fbf59bc9STejun Heo 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1838fbf59bc9STejun Heo 
18399c015162SDennis Zhou (Facebook) 	/* determine the maximum # of units that can fit in an allocation */
1840fbf59bc9STejun Heo 	alloc_size = roundup(min_unit_size, atom_size);
1841fbf59bc9STejun Heo 	upa = alloc_size / min_unit_size;
1842f09f1243SAlexander Kuleshov 	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1843fbf59bc9STejun Heo 		upa--;
1844fbf59bc9STejun Heo 	max_upa = upa;
1845fbf59bc9STejun Heo 
1846fbf59bc9STejun Heo 	/* group cpus according to their proximity */
1847fbf59bc9STejun Heo 	for_each_possible_cpu(cpu) {
1848fbf59bc9STejun Heo 		group = 0;
1849fbf59bc9STejun Heo 	next_group:
1850fbf59bc9STejun Heo 		for_each_possible_cpu(tcpu) {
1851fbf59bc9STejun Heo 			if (cpu == tcpu)
1852fbf59bc9STejun Heo 				break;
1853fbf59bc9STejun Heo 			if (group_map[tcpu] == group && cpu_distance_fn &&
1854fbf59bc9STejun Heo 			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1855fbf59bc9STejun Heo 			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1856fbf59bc9STejun Heo 				group++;
1857fbf59bc9STejun Heo 				nr_groups = max(nr_groups, group + 1);
1858fbf59bc9STejun Heo 				goto next_group;
1859fbf59bc9STejun Heo 			}
1860fbf59bc9STejun Heo 		}
1861fbf59bc9STejun Heo 		group_map[cpu] = group;
1862fbf59bc9STejun Heo 		group_cnt[group]++;
1863fbf59bc9STejun Heo 	}
1864fbf59bc9STejun Heo 
1865fbf59bc9STejun Heo 	/*
18669c015162SDennis Zhou (Facebook) 	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
18679c015162SDennis Zhou (Facebook) 	 * Expand the unit_size until we use >= 75% of the units allocated.
18689c015162SDennis Zhou (Facebook) 	 * Related to atom_size, which could be much larger than the unit_size.
1869fbf59bc9STejun Heo 	 */
1870fbf59bc9STejun Heo 	last_allocs = INT_MAX;
1871fbf59bc9STejun Heo 	for (upa = max_upa; upa; upa--) {
1872fbf59bc9STejun Heo 		int allocs = 0, wasted = 0;
1873fbf59bc9STejun Heo 
1874f09f1243SAlexander Kuleshov 		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1875fbf59bc9STejun Heo 			continue;
1876fbf59bc9STejun Heo 
1877fbf59bc9STejun Heo 		for (group = 0; group < nr_groups; group++) {
1878fbf59bc9STejun Heo 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1879fbf59bc9STejun Heo 			allocs += this_allocs;
1880fbf59bc9STejun Heo 			wasted += this_allocs * upa - group_cnt[group];
1881fbf59bc9STejun Heo 		}
1882fbf59bc9STejun Heo 
1883fbf59bc9STejun Heo 		/*
1884fbf59bc9STejun Heo 		 * Don't accept if wastage is over 1/3.  The
1885fbf59bc9STejun Heo 		 * greater-than comparison ensures upa==1 always
1886fbf59bc9STejun Heo 		 * passes the following check.
1887fbf59bc9STejun Heo 		 */
1888fbf59bc9STejun Heo 		if (wasted > num_possible_cpus() / 3)
1889fbf59bc9STejun Heo 			continue;
1890fbf59bc9STejun Heo 
1891fbf59bc9STejun Heo 		/* and then don't consume more memory */
1892fbf59bc9STejun Heo 		if (allocs > last_allocs)
1893fbf59bc9STejun Heo 			break;
1894fbf59bc9STejun Heo 		last_allocs = allocs;
1895fbf59bc9STejun Heo 		best_upa = upa;
1896fbf59bc9STejun Heo 	}
1897fbf59bc9STejun Heo 	upa = best_upa;
1898fbf59bc9STejun Heo 
1899fbf59bc9STejun Heo 	/* allocate and fill alloc_info */
1900fbf59bc9STejun Heo 	for (group = 0; group < nr_groups; group++)
1901fbf59bc9STejun Heo 		nr_units += roundup(group_cnt[group], upa);
1902fbf59bc9STejun Heo 
1903fbf59bc9STejun Heo 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1904fbf59bc9STejun Heo 	if (!ai)
1905fbf59bc9STejun Heo 		return ERR_PTR(-ENOMEM);
1906fbf59bc9STejun Heo 	cpu_map = ai->groups[0].cpu_map;
1907fbf59bc9STejun Heo 
1908fbf59bc9STejun Heo 	for (group = 0; group < nr_groups; group++) {
1909fbf59bc9STejun Heo 		ai->groups[group].cpu_map = cpu_map;
1910fbf59bc9STejun Heo 		cpu_map += roundup(group_cnt[group], upa);
1911fbf59bc9STejun Heo 	}
1912fbf59bc9STejun Heo 
1913fbf59bc9STejun Heo 	ai->static_size = static_size;
1914fbf59bc9STejun Heo 	ai->reserved_size = reserved_size;
1915fbf59bc9STejun Heo 	ai->dyn_size = dyn_size;
1916fbf59bc9STejun Heo 	ai->unit_size = alloc_size / upa;
1917fbf59bc9STejun Heo 	ai->atom_size = atom_size;
1918fbf59bc9STejun Heo 	ai->alloc_size = alloc_size;
1919fbf59bc9STejun Heo 
1920fbf59bc9STejun Heo 	for (group = 0, unit = 0; group_cnt[group]; group++) {
1921fbf59bc9STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
1922fbf59bc9STejun Heo 
1923fbf59bc9STejun Heo 		/*
1924fbf59bc9STejun Heo 		 * Initialize base_offset as if all groups are located
1925fbf59bc9STejun Heo 		 * back-to-back.  The caller should update this to
1926fbf59bc9STejun Heo 		 * reflect actual allocation.
1927fbf59bc9STejun Heo 		 */
1928fbf59bc9STejun Heo 		gi->base_offset = unit * ai->unit_size;
1929fbf59bc9STejun Heo 
1930fbf59bc9STejun Heo 		for_each_possible_cpu(cpu)
1931fbf59bc9STejun Heo 			if (group_map[cpu] == group)
1932fbf59bc9STejun Heo 				gi->cpu_map[gi->nr_units++] = cpu;
1933fbf59bc9STejun Heo 		gi->nr_units = roundup(gi->nr_units, upa);
1934fbf59bc9STejun Heo 		unit += gi->nr_units;
1935fbf59bc9STejun Heo 	}
1936fbf59bc9STejun Heo 	BUG_ON(unit != nr_units);
1937fbf59bc9STejun Heo 
1938fbf59bc9STejun Heo 	return ai;
1939fbf59bc9STejun Heo }
19403c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1941fbf59bc9STejun Heo 
19423c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK)
194366c3a757STejun Heo /**
194466c3a757STejun Heo  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
194566c3a757STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
19464ba6ce25STejun Heo  * @dyn_size: minimum free size for dynamic allocation in bytes
1947c8826dd5STejun Heo  * @atom_size: allocation atom size
1948c8826dd5STejun Heo  * @cpu_distance_fn: callback to determine distance between cpus, optional
1949c8826dd5STejun Heo  * @alloc_fn: function to allocate percpu page
195025985edcSLucas De Marchi  * @free_fn: function to free percpu page
195166c3a757STejun Heo  *
195266c3a757STejun Heo  * This is a helper to ease setting up embedded first percpu chunk and
195366c3a757STejun Heo  * can be called where pcpu_setup_first_chunk() is expected.
195466c3a757STejun Heo  *
195566c3a757STejun Heo  * If this function is used to setup the first chunk, it is allocated
1956c8826dd5STejun Heo  * by calling @alloc_fn and used as-is without being mapped into
1957c8826dd5STejun Heo  * vmalloc area.  Allocations are always whole multiples of @atom_size
1958c8826dd5STejun Heo  * aligned to @atom_size.
1959c8826dd5STejun Heo  *
1960c8826dd5STejun Heo  * This enables the first chunk to piggy back on the linear physical
1961c8826dd5STejun Heo  * mapping which often uses larger page size.  Please note that this
1962c8826dd5STejun Heo  * can result in very sparse cpu->unit mapping on NUMA machines thus
1963c8826dd5STejun Heo  * requiring large vmalloc address space.  Don't use this allocator if
1964c8826dd5STejun Heo  * vmalloc space is not orders of magnitude larger than distances
1965c8826dd5STejun Heo  * between node memory addresses (ie. 32bit NUMA machines).
196666c3a757STejun Heo  *
19674ba6ce25STejun Heo  * @dyn_size specifies the minimum dynamic area size.
196866c3a757STejun Heo  *
196966c3a757STejun Heo  * If the needed size is smaller than the minimum or specified unit
1970c8826dd5STejun Heo  * size, the leftover is returned using @free_fn.
197166c3a757STejun Heo  *
197266c3a757STejun Heo  * RETURNS:
1973fb435d52STejun Heo  * 0 on success, -errno on failure.
197466c3a757STejun Heo  */
19754ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1976c8826dd5STejun Heo 				  size_t atom_size,
1977c8826dd5STejun Heo 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1978c8826dd5STejun Heo 				  pcpu_fc_alloc_fn_t alloc_fn,
1979c8826dd5STejun Heo 				  pcpu_fc_free_fn_t free_fn)
198066c3a757STejun Heo {
1981c8826dd5STejun Heo 	void *base = (void *)ULONG_MAX;
1982c8826dd5STejun Heo 	void **areas = NULL;
1983fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
198493c76b6bSzijun_hu 	size_t size_sum, areas_size;
198593c76b6bSzijun_hu 	unsigned long max_distance;
19869b739662Szijun_hu 	int group, i, highest_group, rc;
198766c3a757STejun Heo 
1988c8826dd5STejun Heo 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1989c8826dd5STejun Heo 				   cpu_distance_fn);
1990fd1e8a1fSTejun Heo 	if (IS_ERR(ai))
1991fd1e8a1fSTejun Heo 		return PTR_ERR(ai);
199266c3a757STejun Heo 
1993fd1e8a1fSTejun Heo 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1994c8826dd5STejun Heo 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
199566c3a757STejun Heo 
1996999c17e3SSantosh Shilimkar 	areas = memblock_virt_alloc_nopanic(areas_size, 0);
1997c8826dd5STejun Heo 	if (!areas) {
1998fb435d52STejun Heo 		rc = -ENOMEM;
1999c8826dd5STejun Heo 		goto out_free;
2000fa8a7094STejun Heo 	}
200166c3a757STejun Heo 
20029b739662Szijun_hu 	/* allocate, copy and determine base address & max_distance */
20039b739662Szijun_hu 	highest_group = 0;
2004c8826dd5STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
2005c8826dd5STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
2006c8826dd5STejun Heo 		unsigned int cpu = NR_CPUS;
2007c8826dd5STejun Heo 		void *ptr;
200866c3a757STejun Heo 
2009c8826dd5STejun Heo 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2010c8826dd5STejun Heo 			cpu = gi->cpu_map[i];
2011c8826dd5STejun Heo 		BUG_ON(cpu == NR_CPUS);
2012c8826dd5STejun Heo 
2013c8826dd5STejun Heo 		/* allocate space for the whole group */
2014c8826dd5STejun Heo 		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2015c8826dd5STejun Heo 		if (!ptr) {
2016c8826dd5STejun Heo 			rc = -ENOMEM;
2017c8826dd5STejun Heo 			goto out_free_areas;
2018c8826dd5STejun Heo 		}
2019f528f0b8SCatalin Marinas 		/* kmemleak tracks the percpu allocations separately */
2020f528f0b8SCatalin Marinas 		kmemleak_free(ptr);
2021c8826dd5STejun Heo 		areas[group] = ptr;
2022c8826dd5STejun Heo 
2023c8826dd5STejun Heo 		base = min(ptr, base);
20249b739662Szijun_hu 		if (ptr > areas[highest_group])
20259b739662Szijun_hu 			highest_group = group;
20269b739662Szijun_hu 	}
20279b739662Szijun_hu 	max_distance = areas[highest_group] - base;
20289b739662Szijun_hu 	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
20299b739662Szijun_hu 
20309b739662Szijun_hu 	/* warn if maximum distance is further than 75% of vmalloc space */
20319b739662Szijun_hu 	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
20329b739662Szijun_hu 		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
20339b739662Szijun_hu 				max_distance, VMALLOC_TOTAL);
20349b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
20359b739662Szijun_hu 		/* and fail if we have fallback */
20369b739662Szijun_hu 		rc = -EINVAL;
20379b739662Szijun_hu 		goto out_free_areas;
20389b739662Szijun_hu #endif
203942b64281STejun Heo 	}
204042b64281STejun Heo 
204142b64281STejun Heo 	/*
204242b64281STejun Heo 	 * Copy data and free unused parts.  This should happen after all
204342b64281STejun Heo 	 * allocations are complete; otherwise, we may end up with
204442b64281STejun Heo 	 * overlapping groups.
204542b64281STejun Heo 	 */
204642b64281STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
204742b64281STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
204842b64281STejun Heo 		void *ptr = areas[group];
2049c8826dd5STejun Heo 
2050c8826dd5STejun Heo 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2051c8826dd5STejun Heo 			if (gi->cpu_map[i] == NR_CPUS) {
2052c8826dd5STejun Heo 				/* unused unit, free whole */
2053c8826dd5STejun Heo 				free_fn(ptr, ai->unit_size);
2054c8826dd5STejun Heo 				continue;
2055c8826dd5STejun Heo 			}
2056c8826dd5STejun Heo 			/* copy and return the unused part */
2057fd1e8a1fSTejun Heo 			memcpy(ptr, __per_cpu_load, ai->static_size);
2058c8826dd5STejun Heo 			free_fn(ptr + size_sum, ai->unit_size - size_sum);
2059c8826dd5STejun Heo 		}
206066c3a757STejun Heo 	}
206166c3a757STejun Heo 
2062c8826dd5STejun Heo 	/* base address is now known, determine group base offsets */
20636ea529a2STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
2064c8826dd5STejun Heo 		ai->groups[group].base_offset = areas[group] - base;
20656ea529a2STejun Heo 	}
2066c8826dd5STejun Heo 
2067870d4b12SJoe Perches 	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2068fd1e8a1fSTejun Heo 		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2069fd1e8a1fSTejun Heo 		ai->dyn_size, ai->unit_size);
207066c3a757STejun Heo 
2071fb435d52STejun Heo 	rc = pcpu_setup_first_chunk(ai, base);
2072c8826dd5STejun Heo 	goto out_free;
2073c8826dd5STejun Heo 
2074c8826dd5STejun Heo out_free_areas:
2075c8826dd5STejun Heo 	for (group = 0; group < ai->nr_groups; group++)
2076f851c8d8SMichael Holzheu 		if (areas[group])
2077c8826dd5STejun Heo 			free_fn(areas[group],
2078c8826dd5STejun Heo 				ai->groups[group].nr_units * ai->unit_size);
2079c8826dd5STejun Heo out_free:
2080fd1e8a1fSTejun Heo 	pcpu_free_alloc_info(ai);
2081c8826dd5STejun Heo 	if (areas)
2082999c17e3SSantosh Shilimkar 		memblock_free_early(__pa(areas), areas_size);
2083fb435d52STejun Heo 	return rc;
2084d4b95f80STejun Heo }
20853c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */
2086d4b95f80STejun Heo 
20873c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK
2088d4b95f80STejun Heo /**
208900ae4064STejun Heo  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2090d4b95f80STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
2091d4b95f80STejun Heo  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
209225985edcSLucas De Marchi  * @free_fn: function to free percpu page, always called with PAGE_SIZE
2093d4b95f80STejun Heo  * @populate_pte_fn: function to populate pte
2094d4b95f80STejun Heo  *
209500ae4064STejun Heo  * This is a helper to ease setting up page-remapped first percpu
209600ae4064STejun Heo  * chunk and can be called where pcpu_setup_first_chunk() is expected.
2097d4b95f80STejun Heo  *
2098d4b95f80STejun Heo  * This is the basic allocator.  Static percpu area is allocated
2099d4b95f80STejun Heo  * page-by-page into vmalloc area.
2100d4b95f80STejun Heo  *
2101d4b95f80STejun Heo  * RETURNS:
2102fb435d52STejun Heo  * 0 on success, -errno on failure.
2103d4b95f80STejun Heo  */
2104fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size,
2105d4b95f80STejun Heo 				 pcpu_fc_alloc_fn_t alloc_fn,
2106d4b95f80STejun Heo 				 pcpu_fc_free_fn_t free_fn,
2107d4b95f80STejun Heo 				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2108d4b95f80STejun Heo {
21098f05a6a6STejun Heo 	static struct vm_struct vm;
2110fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
211100ae4064STejun Heo 	char psize_str[16];
2112ce3141a2STejun Heo 	int unit_pages;
2113d4b95f80STejun Heo 	size_t pages_size;
2114ce3141a2STejun Heo 	struct page **pages;
2115fb435d52STejun Heo 	int unit, i, j, rc;
21168f606604Szijun_hu 	int upa;
21178f606604Szijun_hu 	int nr_g0_units;
2118d4b95f80STejun Heo 
211900ae4064STejun Heo 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
212000ae4064STejun Heo 
21214ba6ce25STejun Heo 	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2122fd1e8a1fSTejun Heo 	if (IS_ERR(ai))
2123fd1e8a1fSTejun Heo 		return PTR_ERR(ai);
2124fd1e8a1fSTejun Heo 	BUG_ON(ai->nr_groups != 1);
21258f606604Szijun_hu 	upa = ai->alloc_size/ai->unit_size;
21268f606604Szijun_hu 	nr_g0_units = roundup(num_possible_cpus(), upa);
21278f606604Szijun_hu 	if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
21288f606604Szijun_hu 		pcpu_free_alloc_info(ai);
21298f606604Szijun_hu 		return -EINVAL;
21308f606604Szijun_hu 	}
2131fd1e8a1fSTejun Heo 
2132fd1e8a1fSTejun Heo 	unit_pages = ai->unit_size >> PAGE_SHIFT;
2133d4b95f80STejun Heo 
2134d4b95f80STejun Heo 	/* unaligned allocations can't be freed, round up to page size */
2135fd1e8a1fSTejun Heo 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2136fd1e8a1fSTejun Heo 			       sizeof(pages[0]));
2137999c17e3SSantosh Shilimkar 	pages = memblock_virt_alloc(pages_size, 0);
2138d4b95f80STejun Heo 
21398f05a6a6STejun Heo 	/* allocate pages */
2140d4b95f80STejun Heo 	j = 0;
21418f606604Szijun_hu 	for (unit = 0; unit < num_possible_cpus(); unit++) {
2142fd1e8a1fSTejun Heo 		unsigned int cpu = ai->groups[0].cpu_map[unit];
21438f606604Szijun_hu 		for (i = 0; i < unit_pages; i++) {
2144d4b95f80STejun Heo 			void *ptr;
2145d4b95f80STejun Heo 
21463cbc8565STejun Heo 			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2147d4b95f80STejun Heo 			if (!ptr) {
2148870d4b12SJoe Perches 				pr_warn("failed to allocate %s page for cpu%u\n",
2149598d8091SJoe Perches 						psize_str, cpu);
2150d4b95f80STejun Heo 				goto enomem;
2151d4b95f80STejun Heo 			}
2152f528f0b8SCatalin Marinas 			/* kmemleak tracks the percpu allocations separately */
2153f528f0b8SCatalin Marinas 			kmemleak_free(ptr);
2154ce3141a2STejun Heo 			pages[j++] = virt_to_page(ptr);
2155d4b95f80STejun Heo 		}
21568f606604Szijun_hu 	}
2157d4b95f80STejun Heo 
21588f05a6a6STejun Heo 	/* allocate vm area, map the pages and copy static data */
21598f05a6a6STejun Heo 	vm.flags = VM_ALLOC;
2160fd1e8a1fSTejun Heo 	vm.size = num_possible_cpus() * ai->unit_size;
21618f05a6a6STejun Heo 	vm_area_register_early(&vm, PAGE_SIZE);
21628f05a6a6STejun Heo 
2163fd1e8a1fSTejun Heo 	for (unit = 0; unit < num_possible_cpus(); unit++) {
21641d9d3257STejun Heo 		unsigned long unit_addr =
2165fd1e8a1fSTejun Heo 			(unsigned long)vm.addr + unit * ai->unit_size;
21668f05a6a6STejun Heo 
2167ce3141a2STejun Heo 		for (i = 0; i < unit_pages; i++)
21688f05a6a6STejun Heo 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
21698f05a6a6STejun Heo 
21708f05a6a6STejun Heo 		/* pte already populated, the following shouldn't fail */
2171fb435d52STejun Heo 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2172ce3141a2STejun Heo 				      unit_pages);
2173fb435d52STejun Heo 		if (rc < 0)
2174fb435d52STejun Heo 			panic("failed to map percpu area, err=%d\n", rc);
21758f05a6a6STejun Heo 
21768f05a6a6STejun Heo 		/*
21778f05a6a6STejun Heo 		 * FIXME: Archs with virtual cache should flush local
21788f05a6a6STejun Heo 		 * cache for the linear mapping here - something
21798f05a6a6STejun Heo 		 * equivalent to flush_cache_vmap() on the local cpu.
21808f05a6a6STejun Heo 		 * flush_cache_vmap() can't be used as most supporting
21818f05a6a6STejun Heo 		 * data structures are not set up yet.
21828f05a6a6STejun Heo 		 */
21838f05a6a6STejun Heo 
21848f05a6a6STejun Heo 		/* copy static data */
2185fd1e8a1fSTejun Heo 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
218666c3a757STejun Heo 	}
218766c3a757STejun Heo 
218866c3a757STejun Heo 	/* we're ready, commit */
2189870d4b12SJoe Perches 	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2190fd1e8a1fSTejun Heo 		unit_pages, psize_str, vm.addr, ai->static_size,
2191fd1e8a1fSTejun Heo 		ai->reserved_size, ai->dyn_size);
219266c3a757STejun Heo 
2193fb435d52STejun Heo 	rc = pcpu_setup_first_chunk(ai, vm.addr);
2194d4b95f80STejun Heo 	goto out_free_ar;
2195d4b95f80STejun Heo 
2196d4b95f80STejun Heo enomem:
2197d4b95f80STejun Heo 	while (--j >= 0)
2198ce3141a2STejun Heo 		free_fn(page_address(pages[j]), PAGE_SIZE);
2199fb435d52STejun Heo 	rc = -ENOMEM;
2200d4b95f80STejun Heo out_free_ar:
2201999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(pages), pages_size);
2202fd1e8a1fSTejun Heo 	pcpu_free_alloc_info(ai);
2203fb435d52STejun Heo 	return rc;
220466c3a757STejun Heo }
22053c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */
2206d4b95f80STejun Heo 
2207bbddff05STejun Heo #ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
22088c4bfc6eSTejun Heo /*
2209bbddff05STejun Heo  * Generic SMP percpu area setup.
2210e74e3962STejun Heo  *
2211e74e3962STejun Heo  * The embedding helper is used because its behavior closely resembles
2212e74e3962STejun Heo  * the original non-dynamic generic percpu area setup.  This is
2213e74e3962STejun Heo  * important because many archs have addressing restrictions and might
2214e74e3962STejun Heo  * fail if the percpu area is located far away from the previous
2215e74e3962STejun Heo  * location.  As an added bonus, in non-NUMA cases, embedding is
2216e74e3962STejun Heo  * generally a good idea TLB-wise because percpu area can piggy back
2217e74e3962STejun Heo  * on the physical linear memory mapping which uses large page
2218e74e3962STejun Heo  * mappings on applicable archs.
2219e74e3962STejun Heo  */
2220e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2221e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset);
2222e74e3962STejun Heo 
2223c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2224c8826dd5STejun Heo 				       size_t align)
2225c8826dd5STejun Heo {
2226999c17e3SSantosh Shilimkar 	return  memblock_virt_alloc_from_nopanic(
2227999c17e3SSantosh Shilimkar 			size, align, __pa(MAX_DMA_ADDRESS));
2228c8826dd5STejun Heo }
2229c8826dd5STejun Heo 
2230c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2231c8826dd5STejun Heo {
2232999c17e3SSantosh Shilimkar 	memblock_free_early(__pa(ptr), size);
2233c8826dd5STejun Heo }
2234c8826dd5STejun Heo 
2235e74e3962STejun Heo void __init setup_per_cpu_areas(void)
2236e74e3962STejun Heo {
2237e74e3962STejun Heo 	unsigned long delta;
2238e74e3962STejun Heo 	unsigned int cpu;
2239fb435d52STejun Heo 	int rc;
2240e74e3962STejun Heo 
2241e74e3962STejun Heo 	/*
2242e74e3962STejun Heo 	 * Always reserve area for module percpu variables.  That's
2243e74e3962STejun Heo 	 * what the legacy allocator did.
2244e74e3962STejun Heo 	 */
2245fb435d52STejun Heo 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2246c8826dd5STejun Heo 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2247c8826dd5STejun Heo 				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2248fb435d52STejun Heo 	if (rc < 0)
2249bbddff05STejun Heo 		panic("Failed to initialize percpu areas.");
2250e74e3962STejun Heo 
2251e74e3962STejun Heo 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2252e74e3962STejun Heo 	for_each_possible_cpu(cpu)
2253fb435d52STejun Heo 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2254e74e3962STejun Heo }
2255e74e3962STejun Heo #endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2256099a19d9STejun Heo 
2257bbddff05STejun Heo #else	/* CONFIG_SMP */
2258bbddff05STejun Heo 
2259bbddff05STejun Heo /*
2260bbddff05STejun Heo  * UP percpu area setup.
2261bbddff05STejun Heo  *
2262bbddff05STejun Heo  * UP always uses km-based percpu allocator with identity mapping.
2263bbddff05STejun Heo  * Static percpu variables are indistinguishable from the usual static
2264bbddff05STejun Heo  * variables and don't require any special preparation.
2265bbddff05STejun Heo  */
2266bbddff05STejun Heo void __init setup_per_cpu_areas(void)
2267bbddff05STejun Heo {
2268bbddff05STejun Heo 	const size_t unit_size =
2269bbddff05STejun Heo 		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2270bbddff05STejun Heo 					 PERCPU_DYNAMIC_RESERVE));
2271bbddff05STejun Heo 	struct pcpu_alloc_info *ai;
2272bbddff05STejun Heo 	void *fc;
2273bbddff05STejun Heo 
2274bbddff05STejun Heo 	ai = pcpu_alloc_alloc_info(1, 1);
2275999c17e3SSantosh Shilimkar 	fc = memblock_virt_alloc_from_nopanic(unit_size,
2276999c17e3SSantosh Shilimkar 					      PAGE_SIZE,
2277999c17e3SSantosh Shilimkar 					      __pa(MAX_DMA_ADDRESS));
2278bbddff05STejun Heo 	if (!ai || !fc)
2279bbddff05STejun Heo 		panic("Failed to allocate memory for percpu areas.");
2280100d13c3SCatalin Marinas 	/* kmemleak tracks the percpu allocations separately */
2281100d13c3SCatalin Marinas 	kmemleak_free(fc);
2282bbddff05STejun Heo 
2283bbddff05STejun Heo 	ai->dyn_size = unit_size;
2284bbddff05STejun Heo 	ai->unit_size = unit_size;
2285bbddff05STejun Heo 	ai->atom_size = unit_size;
2286bbddff05STejun Heo 	ai->alloc_size = unit_size;
2287bbddff05STejun Heo 	ai->groups[0].nr_units = 1;
2288bbddff05STejun Heo 	ai->groups[0].cpu_map[0] = 0;
2289bbddff05STejun Heo 
2290bbddff05STejun Heo 	if (pcpu_setup_first_chunk(ai, fc) < 0)
2291bbddff05STejun Heo 		panic("Failed to initialize percpu areas.");
2292bbddff05STejun Heo }
2293bbddff05STejun Heo 
2294bbddff05STejun Heo #endif	/* CONFIG_SMP */
2295bbddff05STejun Heo 
2296099a19d9STejun Heo /*
2297099a19d9STejun Heo  * First and reserved chunks are initialized with temporary allocation
2298099a19d9STejun Heo  * map in initdata so that they can be used before slab is online.
2299099a19d9STejun Heo  * This function is called after slab is brought up and replaces those
2300099a19d9STejun Heo  * with properly allocated maps.
2301099a19d9STejun Heo  */
2302099a19d9STejun Heo void __init percpu_init_late(void)
2303099a19d9STejun Heo {
2304099a19d9STejun Heo 	struct pcpu_chunk *target_chunks[] =
2305099a19d9STejun Heo 		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
2306099a19d9STejun Heo 	struct pcpu_chunk *chunk;
2307099a19d9STejun Heo 	unsigned long flags;
2308099a19d9STejun Heo 	int i;
2309099a19d9STejun Heo 
2310099a19d9STejun Heo 	for (i = 0; (chunk = target_chunks[i]); i++) {
2311099a19d9STejun Heo 		int *map;
2312099a19d9STejun Heo 		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
2313099a19d9STejun Heo 
2314099a19d9STejun Heo 		BUILD_BUG_ON(size > PAGE_SIZE);
2315099a19d9STejun Heo 
231690459ce0SBob Liu 		map = pcpu_mem_zalloc(size);
2317099a19d9STejun Heo 		BUG_ON(!map);
2318099a19d9STejun Heo 
2319099a19d9STejun Heo 		spin_lock_irqsave(&pcpu_lock, flags);
2320099a19d9STejun Heo 		memcpy(map, chunk->map, size);
2321099a19d9STejun Heo 		chunk->map = map;
2322099a19d9STejun Heo 		spin_unlock_irqrestore(&pcpu_lock, flags);
2323099a19d9STejun Heo 	}
2324099a19d9STejun Heo }
23251a4d7607STejun Heo 
23261a4d7607STejun Heo /*
23271a4d7607STejun Heo  * Percpu allocator is initialized early during boot when neither slab or
23281a4d7607STejun Heo  * workqueue is available.  Plug async management until everything is up
23291a4d7607STejun Heo  * and running.
23301a4d7607STejun Heo  */
23311a4d7607STejun Heo static int __init percpu_enable_async(void)
23321a4d7607STejun Heo {
23331a4d7607STejun Heo 	pcpu_async_enabled = true;
23341a4d7607STejun Heo 	return 0;
23351a4d7607STejun Heo }
23361a4d7607STejun Heo subsys_initcall(percpu_enable_async);
2337