xref: /linux/mm/percpu.c (revision 5479c78ac6f688ea5ea8c49b44cf90ea87b63931)
1fbf59bc9STejun Heo /*
288999a89STejun Heo  * mm/percpu.c - percpu memory allocator
3fbf59bc9STejun Heo  *
4fbf59bc9STejun Heo  * Copyright (C) 2009		SUSE Linux Products GmbH
5fbf59bc9STejun Heo  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6fbf59bc9STejun Heo  *
7fbf59bc9STejun Heo  * This file is released under the GPLv2.
8fbf59bc9STejun Heo  *
9fbf59bc9STejun Heo  * This is percpu allocator which can handle both static and dynamic
1088999a89STejun Heo  * areas.  Percpu areas are allocated in chunks.  Each chunk is
1188999a89STejun Heo  * consisted of boot-time determined number of units and the first
1288999a89STejun Heo  * chunk is used for static percpu variables in the kernel image
132f39e637STejun Heo  * (special boot time alloc/init handling necessary as these areas
142f39e637STejun Heo  * need to be brought up before allocation services are running).
152f39e637STejun Heo  * Unit grows as necessary and all units grow or shrink in unison.
1688999a89STejun Heo  * When a chunk is filled up, another chunk is allocated.
17fbf59bc9STejun Heo  *
18fbf59bc9STejun Heo  *  c0                           c1                         c2
19fbf59bc9STejun Heo  *  -------------------          -------------------        ------------
20fbf59bc9STejun Heo  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
21fbf59bc9STejun Heo  *  -------------------  ......  -------------------  ....  ------------
22fbf59bc9STejun Heo  *
23fbf59bc9STejun Heo  * Allocation is done in offset-size areas of single unit space.  Ie,
24fbf59bc9STejun Heo  * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
252f39e637STejun Heo  * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
262f39e637STejun Heo  * cpus.  On NUMA, the mapping can be non-linear and even sparse.
272f39e637STejun Heo  * Percpu access can be done by configuring percpu base registers
282f39e637STejun Heo  * according to cpu to unit mapping and pcpu_unit_size.
29fbf59bc9STejun Heo  *
302f39e637STejun Heo  * There are usually many small percpu allocations many of them being
312f39e637STejun Heo  * as small as 4 bytes.  The allocator organizes chunks into lists
32fbf59bc9STejun Heo  * according to free size and tries to allocate from the fullest one.
33fbf59bc9STejun Heo  * Each chunk keeps the maximum contiguous area size hint which is
344785879eSNamhyung Kim  * guaranteed to be equal to or larger than the maximum contiguous
35fbf59bc9STejun Heo  * area in the chunk.  This helps the allocator not to iterate the
36fbf59bc9STejun Heo  * chunk maps unnecessarily.
37fbf59bc9STejun Heo  *
38fbf59bc9STejun Heo  * Allocation state in each chunk is kept using an array of integers
39fbf59bc9STejun Heo  * on chunk->map.  A positive value in the map represents a free
40fbf59bc9STejun Heo  * region and negative allocated.  Allocation inside a chunk is done
41fbf59bc9STejun Heo  * by scanning this map sequentially and serving the first matching
42fbf59bc9STejun Heo  * entry.  This is mostly copied from the percpu_modalloc() allocator.
43e1b9aa3fSChristoph Lameter  * Chunks can be determined from the address using the index field
44e1b9aa3fSChristoph Lameter  * in the page struct. The index field contains a pointer to the chunk.
45fbf59bc9STejun Heo  *
46fbf59bc9STejun Heo  * To use this allocator, arch code should do the followings.
47fbf59bc9STejun Heo  *
48fbf59bc9STejun Heo  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49e0100983STejun Heo  *   regular address to percpu pointer and back if they need to be
50e0100983STejun Heo  *   different from the default
51fbf59bc9STejun Heo  *
528d408b4bSTejun Heo  * - use pcpu_setup_first_chunk() during percpu area initialization to
538d408b4bSTejun Heo  *   setup the first chunk containing the kernel static percpu area
54fbf59bc9STejun Heo  */
55fbf59bc9STejun Heo 
56fbf59bc9STejun Heo #include <linux/bitmap.h>
57fbf59bc9STejun Heo #include <linux/bootmem.h>
58fd1e8a1fSTejun Heo #include <linux/err.h>
59fbf59bc9STejun Heo #include <linux/list.h>
60a530b795STejun Heo #include <linux/log2.h>
61fbf59bc9STejun Heo #include <linux/mm.h>
62fbf59bc9STejun Heo #include <linux/module.h>
63fbf59bc9STejun Heo #include <linux/mutex.h>
64fbf59bc9STejun Heo #include <linux/percpu.h>
65fbf59bc9STejun Heo #include <linux/pfn.h>
66fbf59bc9STejun Heo #include <linux/slab.h>
67ccea34b5STejun Heo #include <linux/spinlock.h>
68fbf59bc9STejun Heo #include <linux/vmalloc.h>
69a56dbddfSTejun Heo #include <linux/workqueue.h>
70f528f0b8SCatalin Marinas #include <linux/kmemleak.h>
71fbf59bc9STejun Heo 
72fbf59bc9STejun Heo #include <asm/cacheflush.h>
73e0100983STejun Heo #include <asm/sections.h>
74fbf59bc9STejun Heo #include <asm/tlbflush.h>
753b034b0dSVivek Goyal #include <asm/io.h>
76fbf59bc9STejun Heo 
77fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
78fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
79fbf59bc9STejun Heo 
80bbddff05STejun Heo #ifdef CONFIG_SMP
81e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
82e0100983STejun Heo #ifndef __addr_to_pcpu_ptr
83e0100983STejun Heo #define __addr_to_pcpu_ptr(addr)					\
8443cf38ebSTejun Heo 	(void __percpu *)((unsigned long)(addr) -			\
8543cf38ebSTejun Heo 			  (unsigned long)pcpu_base_addr	+		\
8643cf38ebSTejun Heo 			  (unsigned long)__per_cpu_start)
87e0100983STejun Heo #endif
88e0100983STejun Heo #ifndef __pcpu_ptr_to_addr
89e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr)						\
9043cf38ebSTejun Heo 	(void __force *)((unsigned long)(ptr) +				\
9143cf38ebSTejun Heo 			 (unsigned long)pcpu_base_addr -		\
9243cf38ebSTejun Heo 			 (unsigned long)__per_cpu_start)
93e0100983STejun Heo #endif
94bbddff05STejun Heo #else	/* CONFIG_SMP */
95bbddff05STejun Heo /* on UP, it's always identity mapped */
96bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
97bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
98bbddff05STejun Heo #endif	/* CONFIG_SMP */
99e0100983STejun Heo 
100fbf59bc9STejun Heo struct pcpu_chunk {
101fbf59bc9STejun Heo 	struct list_head	list;		/* linked to pcpu_slot lists */
102fbf59bc9STejun Heo 	int			free_size;	/* free bytes in the chunk */
103fbf59bc9STejun Heo 	int			contig_hint;	/* max contiguous size hint */
104bba174f5STejun Heo 	void			*base_addr;	/* base address of this chunk */
105fbf59bc9STejun Heo 	int			map_used;	/* # of map entries used */
106fbf59bc9STejun Heo 	int			map_alloc;	/* # of map entries allocated */
107fbf59bc9STejun Heo 	int			*map;		/* allocation map */
10888999a89STejun Heo 	void			*data;		/* chunk data */
1098d408b4bSTejun Heo 	bool			immutable;	/* no [de]population allowed */
110ce3141a2STejun Heo 	unsigned long		populated[];	/* populated bitmap */
111fbf59bc9STejun Heo };
112fbf59bc9STejun Heo 
11340150d37STejun Heo static int pcpu_unit_pages __read_mostly;
11440150d37STejun Heo static int pcpu_unit_size __read_mostly;
1152f39e637STejun Heo static int pcpu_nr_units __read_mostly;
1166563297cSTejun Heo static int pcpu_atom_size __read_mostly;
11740150d37STejun Heo static int pcpu_nr_slots __read_mostly;
11840150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly;
119fbf59bc9STejun Heo 
120a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */
121a855b84cSTejun Heo static unsigned int pcpu_low_unit_cpu __read_mostly;
122a855b84cSTejun Heo static unsigned int pcpu_high_unit_cpu __read_mostly;
1232f39e637STejun Heo 
124fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */
12540150d37STejun Heo void *pcpu_base_addr __read_mostly;
126fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr);
127fbf59bc9STejun Heo 
128fb435d52STejun Heo static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
129fb435d52STejun Heo const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
1302f39e637STejun Heo 
1316563297cSTejun Heo /* group information, used for vm allocation */
1326563297cSTejun Heo static int pcpu_nr_groups __read_mostly;
1336563297cSTejun Heo static const unsigned long *pcpu_group_offsets __read_mostly;
1346563297cSTejun Heo static const size_t *pcpu_group_sizes __read_mostly;
1356563297cSTejun Heo 
136ae9e6bc9STejun Heo /*
137ae9e6bc9STejun Heo  * The first chunk which always exists.  Note that unlike other
138ae9e6bc9STejun Heo  * chunks, this one can be allocated and mapped in several different
139ae9e6bc9STejun Heo  * ways and thus often doesn't live in the vmalloc area.
140ae9e6bc9STejun Heo  */
141ae9e6bc9STejun Heo static struct pcpu_chunk *pcpu_first_chunk;
142ae9e6bc9STejun Heo 
143ae9e6bc9STejun Heo /*
144ae9e6bc9STejun Heo  * Optional reserved chunk.  This chunk reserves part of the first
145ae9e6bc9STejun Heo  * chunk and serves it for reserved allocations.  The amount of
146ae9e6bc9STejun Heo  * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
147ae9e6bc9STejun Heo  * area doesn't exist, the following variables contain NULL and 0
148ae9e6bc9STejun Heo  * respectively.
149ae9e6bc9STejun Heo  */
150edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk;
151edcb4639STejun Heo static int pcpu_reserved_chunk_limit;
152edcb4639STejun Heo 
153fbf59bc9STejun Heo /*
154ccea34b5STejun Heo  * Synchronization rules.
155fbf59bc9STejun Heo  *
156ccea34b5STejun Heo  * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
157ce3141a2STejun Heo  * protects allocation/reclaim paths, chunks, populated bitmap and
158ce3141a2STejun Heo  * vmalloc mapping.  The latter is a spinlock and protects the index
159ce3141a2STejun Heo  * data structures - chunk slots, chunks and area maps in chunks.
160fbf59bc9STejun Heo  *
161ccea34b5STejun Heo  * During allocation, pcpu_alloc_mutex is kept locked all the time and
162ccea34b5STejun Heo  * pcpu_lock is grabbed and released as necessary.  All actual memory
163403a91b1SJiri Kosina  * allocations are done using GFP_KERNEL with pcpu_lock released.  In
164403a91b1SJiri Kosina  * general, percpu memory can't be allocated with irq off but
165403a91b1SJiri Kosina  * irqsave/restore are still used in alloc path so that it can be used
166403a91b1SJiri Kosina  * from early init path - sched_init() specifically.
167ccea34b5STejun Heo  *
168ccea34b5STejun Heo  * Free path accesses and alters only the index data structures, so it
169ccea34b5STejun Heo  * can be safely called from atomic context.  When memory needs to be
170ccea34b5STejun Heo  * returned to the system, free path schedules reclaim_work which
171ccea34b5STejun Heo  * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
172ccea34b5STejun Heo  * reclaimed, release both locks and frees the chunks.  Note that it's
173ccea34b5STejun Heo  * necessary to grab both locks to remove a chunk from circulation as
174ccea34b5STejun Heo  * allocation path might be referencing the chunk with only
175ccea34b5STejun Heo  * pcpu_alloc_mutex locked.
176fbf59bc9STejun Heo  */
177ccea34b5STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
178ccea34b5STejun Heo static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
179fbf59bc9STejun Heo 
18040150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
181fbf59bc9STejun Heo 
182a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */
183a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work);
184a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
185a56dbddfSTejun Heo 
186020ec653STejun Heo static bool pcpu_addr_in_first_chunk(void *addr)
187020ec653STejun Heo {
188020ec653STejun Heo 	void *first_start = pcpu_first_chunk->base_addr;
189020ec653STejun Heo 
190020ec653STejun Heo 	return addr >= first_start && addr < first_start + pcpu_unit_size;
191020ec653STejun Heo }
192020ec653STejun Heo 
193020ec653STejun Heo static bool pcpu_addr_in_reserved_chunk(void *addr)
194020ec653STejun Heo {
195020ec653STejun Heo 	void *first_start = pcpu_first_chunk->base_addr;
196020ec653STejun Heo 
197020ec653STejun Heo 	return addr >= first_start &&
198020ec653STejun Heo 		addr < first_start + pcpu_reserved_chunk_limit;
199020ec653STejun Heo }
200020ec653STejun Heo 
201d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size)
202fbf59bc9STejun Heo {
203cae3aeb8STejun Heo 	int highbit = fls(size);	/* size is in bytes */
204fbf59bc9STejun Heo 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
205fbf59bc9STejun Heo }
206fbf59bc9STejun Heo 
207d9b55eebSTejun Heo static int pcpu_size_to_slot(int size)
208d9b55eebSTejun Heo {
209d9b55eebSTejun Heo 	if (size == pcpu_unit_size)
210d9b55eebSTejun Heo 		return pcpu_nr_slots - 1;
211d9b55eebSTejun Heo 	return __pcpu_size_to_slot(size);
212d9b55eebSTejun Heo }
213d9b55eebSTejun Heo 
214fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
215fbf59bc9STejun Heo {
216fbf59bc9STejun Heo 	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
217fbf59bc9STejun Heo 		return 0;
218fbf59bc9STejun Heo 
219fbf59bc9STejun Heo 	return pcpu_size_to_slot(chunk->free_size);
220fbf59bc9STejun Heo }
221fbf59bc9STejun Heo 
22288999a89STejun Heo /* set the pointer to a chunk in a page struct */
22388999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
22488999a89STejun Heo {
22588999a89STejun Heo 	page->index = (unsigned long)pcpu;
22688999a89STejun Heo }
22788999a89STejun Heo 
22888999a89STejun Heo /* obtain pointer to a chunk from a page struct */
22988999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
23088999a89STejun Heo {
23188999a89STejun Heo 	return (struct pcpu_chunk *)page->index;
23288999a89STejun Heo }
23388999a89STejun Heo 
23488999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
235fbf59bc9STejun Heo {
2362f39e637STejun Heo 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
237fbf59bc9STejun Heo }
238fbf59bc9STejun Heo 
2399983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
240fbf59bc9STejun Heo 				     unsigned int cpu, int page_idx)
241fbf59bc9STejun Heo {
242bba174f5STejun Heo 	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
243fb435d52STejun Heo 		(page_idx << PAGE_SHIFT);
244fbf59bc9STejun Heo }
245fbf59bc9STejun Heo 
24688999a89STejun Heo static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
24788999a89STejun Heo 					   int *rs, int *re, int end)
248ce3141a2STejun Heo {
249ce3141a2STejun Heo 	*rs = find_next_zero_bit(chunk->populated, end, *rs);
250ce3141a2STejun Heo 	*re = find_next_bit(chunk->populated, end, *rs + 1);
251ce3141a2STejun Heo }
252ce3141a2STejun Heo 
25388999a89STejun Heo static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
25488999a89STejun Heo 					 int *rs, int *re, int end)
255ce3141a2STejun Heo {
256ce3141a2STejun Heo 	*rs = find_next_bit(chunk->populated, end, *rs);
257ce3141a2STejun Heo 	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
258ce3141a2STejun Heo }
259ce3141a2STejun Heo 
260ce3141a2STejun Heo /*
261ce3141a2STejun Heo  * (Un)populated page region iterators.  Iterate over (un)populated
262b595076aSUwe Kleine-König  * page regions between @start and @end in @chunk.  @rs and @re should
263ce3141a2STejun Heo  * be integer variables and will be set to start and end page index of
264ce3141a2STejun Heo  * the current region.
265ce3141a2STejun Heo  */
266ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
267ce3141a2STejun Heo 	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
268ce3141a2STejun Heo 	     (rs) < (re);						    \
269ce3141a2STejun Heo 	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
270ce3141a2STejun Heo 
271ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
272ce3141a2STejun Heo 	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
273ce3141a2STejun Heo 	     (rs) < (re);						    \
274ce3141a2STejun Heo 	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
275ce3141a2STejun Heo 
276fbf59bc9STejun Heo /**
27790459ce0SBob Liu  * pcpu_mem_zalloc - allocate memory
2781880d93bSTejun Heo  * @size: bytes to allocate
279fbf59bc9STejun Heo  *
2801880d93bSTejun Heo  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
28190459ce0SBob Liu  * kzalloc() is used; otherwise, vzalloc() is used.  The returned
2821880d93bSTejun Heo  * memory is always zeroed.
283fbf59bc9STejun Heo  *
284ccea34b5STejun Heo  * CONTEXT:
285ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
286ccea34b5STejun Heo  *
287fbf59bc9STejun Heo  * RETURNS:
2881880d93bSTejun Heo  * Pointer to the allocated area on success, NULL on failure.
289fbf59bc9STejun Heo  */
29090459ce0SBob Liu static void *pcpu_mem_zalloc(size_t size)
291fbf59bc9STejun Heo {
292099a19d9STejun Heo 	if (WARN_ON_ONCE(!slab_is_available()))
293099a19d9STejun Heo 		return NULL;
294099a19d9STejun Heo 
295fbf59bc9STejun Heo 	if (size <= PAGE_SIZE)
2961880d93bSTejun Heo 		return kzalloc(size, GFP_KERNEL);
2977af4c093SJesper Juhl 	else
2987af4c093SJesper Juhl 		return vzalloc(size);
2991880d93bSTejun Heo }
300fbf59bc9STejun Heo 
3011880d93bSTejun Heo /**
3021880d93bSTejun Heo  * pcpu_mem_free - free memory
3031880d93bSTejun Heo  * @ptr: memory to free
3041880d93bSTejun Heo  * @size: size of the area
3051880d93bSTejun Heo  *
30690459ce0SBob Liu  * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
3071880d93bSTejun Heo  */
3081880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size)
3091880d93bSTejun Heo {
3101880d93bSTejun Heo 	if (size <= PAGE_SIZE)
3111880d93bSTejun Heo 		kfree(ptr);
3121880d93bSTejun Heo 	else
3131880d93bSTejun Heo 		vfree(ptr);
314fbf59bc9STejun Heo }
315fbf59bc9STejun Heo 
316fbf59bc9STejun Heo /**
317fbf59bc9STejun Heo  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
318fbf59bc9STejun Heo  * @chunk: chunk of interest
319fbf59bc9STejun Heo  * @oslot: the previous slot it was on
320fbf59bc9STejun Heo  *
321fbf59bc9STejun Heo  * This function is called after an allocation or free changed @chunk.
322fbf59bc9STejun Heo  * New slot according to the changed state is determined and @chunk is
323edcb4639STejun Heo  * moved to the slot.  Note that the reserved chunk is never put on
324edcb4639STejun Heo  * chunk slots.
325ccea34b5STejun Heo  *
326ccea34b5STejun Heo  * CONTEXT:
327ccea34b5STejun Heo  * pcpu_lock.
328fbf59bc9STejun Heo  */
329fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
330fbf59bc9STejun Heo {
331fbf59bc9STejun Heo 	int nslot = pcpu_chunk_slot(chunk);
332fbf59bc9STejun Heo 
333edcb4639STejun Heo 	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
334fbf59bc9STejun Heo 		if (oslot < nslot)
335fbf59bc9STejun Heo 			list_move(&chunk->list, &pcpu_slot[nslot]);
336fbf59bc9STejun Heo 		else
337fbf59bc9STejun Heo 			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
338fbf59bc9STejun Heo 	}
339fbf59bc9STejun Heo }
340fbf59bc9STejun Heo 
341fbf59bc9STejun Heo /**
342833af842STejun Heo  * pcpu_need_to_extend - determine whether chunk area map needs to be extended
343833af842STejun Heo  * @chunk: chunk of interest
3449f7dcf22STejun Heo  *
345833af842STejun Heo  * Determine whether area map of @chunk needs to be extended to
34625985edcSLucas De Marchi  * accommodate a new allocation.
3479f7dcf22STejun Heo  *
348ccea34b5STejun Heo  * CONTEXT:
349833af842STejun Heo  * pcpu_lock.
350ccea34b5STejun Heo  *
3519f7dcf22STejun Heo  * RETURNS:
352833af842STejun Heo  * New target map allocation length if extension is necessary, 0
353833af842STejun Heo  * otherwise.
3549f7dcf22STejun Heo  */
355833af842STejun Heo static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
3569f7dcf22STejun Heo {
3579f7dcf22STejun Heo 	int new_alloc;
3589f7dcf22STejun Heo 
3599f7dcf22STejun Heo 	if (chunk->map_alloc >= chunk->map_used + 2)
3609f7dcf22STejun Heo 		return 0;
3619f7dcf22STejun Heo 
3629f7dcf22STejun Heo 	new_alloc = PCPU_DFL_MAP_ALLOC;
3639f7dcf22STejun Heo 	while (new_alloc < chunk->map_used + 2)
3649f7dcf22STejun Heo 		new_alloc *= 2;
3659f7dcf22STejun Heo 
366833af842STejun Heo 	return new_alloc;
367ccea34b5STejun Heo }
368ccea34b5STejun Heo 
369833af842STejun Heo /**
370833af842STejun Heo  * pcpu_extend_area_map - extend area map of a chunk
371833af842STejun Heo  * @chunk: chunk of interest
372833af842STejun Heo  * @new_alloc: new target allocation length of the area map
373833af842STejun Heo  *
374833af842STejun Heo  * Extend area map of @chunk to have @new_alloc entries.
375833af842STejun Heo  *
376833af842STejun Heo  * CONTEXT:
377833af842STejun Heo  * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
378833af842STejun Heo  *
379833af842STejun Heo  * RETURNS:
380833af842STejun Heo  * 0 on success, -errno on failure.
381ccea34b5STejun Heo  */
382833af842STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
383833af842STejun Heo {
384833af842STejun Heo 	int *old = NULL, *new = NULL;
385833af842STejun Heo 	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
386833af842STejun Heo 	unsigned long flags;
3879f7dcf22STejun Heo 
38890459ce0SBob Liu 	new = pcpu_mem_zalloc(new_size);
389833af842STejun Heo 	if (!new)
390833af842STejun Heo 		return -ENOMEM;
391833af842STejun Heo 
392833af842STejun Heo 	/* acquire pcpu_lock and switch to new area map */
393833af842STejun Heo 	spin_lock_irqsave(&pcpu_lock, flags);
394833af842STejun Heo 
395833af842STejun Heo 	if (new_alloc <= chunk->map_alloc)
396833af842STejun Heo 		goto out_unlock;
397833af842STejun Heo 
398833af842STejun Heo 	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
399a002d148SHuang Shijie 	old = chunk->map;
400a002d148SHuang Shijie 
401a002d148SHuang Shijie 	memcpy(new, old, old_size);
4029f7dcf22STejun Heo 
4039f7dcf22STejun Heo 	chunk->map_alloc = new_alloc;
4049f7dcf22STejun Heo 	chunk->map = new;
405833af842STejun Heo 	new = NULL;
406833af842STejun Heo 
407833af842STejun Heo out_unlock:
408833af842STejun Heo 	spin_unlock_irqrestore(&pcpu_lock, flags);
409833af842STejun Heo 
410833af842STejun Heo 	/*
411833af842STejun Heo 	 * pcpu_mem_free() might end up calling vfree() which uses
412833af842STejun Heo 	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
413833af842STejun Heo 	 */
414833af842STejun Heo 	pcpu_mem_free(old, old_size);
415833af842STejun Heo 	pcpu_mem_free(new, new_size);
416833af842STejun Heo 
4179f7dcf22STejun Heo 	return 0;
4189f7dcf22STejun Heo }
4199f7dcf22STejun Heo 
4209f7dcf22STejun Heo /**
421fbf59bc9STejun Heo  * pcpu_split_block - split a map block
422fbf59bc9STejun Heo  * @chunk: chunk of interest
423fbf59bc9STejun Heo  * @i: index of map block to split
424cae3aeb8STejun Heo  * @head: head size in bytes (can be 0)
425cae3aeb8STejun Heo  * @tail: tail size in bytes (can be 0)
426fbf59bc9STejun Heo  *
427fbf59bc9STejun Heo  * Split the @i'th map block into two or three blocks.  If @head is
428fbf59bc9STejun Heo  * non-zero, @head bytes block is inserted before block @i moving it
429fbf59bc9STejun Heo  * to @i+1 and reducing its size by @head bytes.
430fbf59bc9STejun Heo  *
431fbf59bc9STejun Heo  * If @tail is non-zero, the target block, which can be @i or @i+1
432fbf59bc9STejun Heo  * depending on @head, is reduced by @tail bytes and @tail byte block
433fbf59bc9STejun Heo  * is inserted after the target block.
434fbf59bc9STejun Heo  *
43525985edcSLucas De Marchi  * @chunk->map must have enough free slots to accommodate the split.
436ccea34b5STejun Heo  *
437ccea34b5STejun Heo  * CONTEXT:
438ccea34b5STejun Heo  * pcpu_lock.
439fbf59bc9STejun Heo  */
4409f7dcf22STejun Heo static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
4419f7dcf22STejun Heo 			     int head, int tail)
442fbf59bc9STejun Heo {
443fbf59bc9STejun Heo 	int nr_extra = !!head + !!tail;
444fbf59bc9STejun Heo 
4459f7dcf22STejun Heo 	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
446fbf59bc9STejun Heo 
4479f7dcf22STejun Heo 	/* insert new subblocks */
448fbf59bc9STejun Heo 	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
449fbf59bc9STejun Heo 		sizeof(chunk->map[0]) * (chunk->map_used - i));
450fbf59bc9STejun Heo 	chunk->map_used += nr_extra;
451fbf59bc9STejun Heo 
452fbf59bc9STejun Heo 	if (head) {
453fbf59bc9STejun Heo 		chunk->map[i + 1] = chunk->map[i] - head;
454fbf59bc9STejun Heo 		chunk->map[i++] = head;
455fbf59bc9STejun Heo 	}
456fbf59bc9STejun Heo 	if (tail) {
457fbf59bc9STejun Heo 		chunk->map[i++] -= tail;
458fbf59bc9STejun Heo 		chunk->map[i] = tail;
459fbf59bc9STejun Heo 	}
460fbf59bc9STejun Heo }
461fbf59bc9STejun Heo 
462fbf59bc9STejun Heo /**
463fbf59bc9STejun Heo  * pcpu_alloc_area - allocate area from a pcpu_chunk
464fbf59bc9STejun Heo  * @chunk: chunk of interest
465cae3aeb8STejun Heo  * @size: wanted size in bytes
466fbf59bc9STejun Heo  * @align: wanted align
467fbf59bc9STejun Heo  *
468fbf59bc9STejun Heo  * Try to allocate @size bytes area aligned at @align from @chunk.
469fbf59bc9STejun Heo  * Note that this function only allocates the offset.  It doesn't
470fbf59bc9STejun Heo  * populate or map the area.
471fbf59bc9STejun Heo  *
4729f7dcf22STejun Heo  * @chunk->map must have at least two free slots.
4739f7dcf22STejun Heo  *
474ccea34b5STejun Heo  * CONTEXT:
475ccea34b5STejun Heo  * pcpu_lock.
476ccea34b5STejun Heo  *
477fbf59bc9STejun Heo  * RETURNS:
4789f7dcf22STejun Heo  * Allocated offset in @chunk on success, -1 if no matching area is
4799f7dcf22STejun Heo  * found.
480fbf59bc9STejun Heo  */
481fbf59bc9STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
482fbf59bc9STejun Heo {
483fbf59bc9STejun Heo 	int oslot = pcpu_chunk_slot(chunk);
484fbf59bc9STejun Heo 	int max_contig = 0;
485fbf59bc9STejun Heo 	int i, off;
486fbf59bc9STejun Heo 
487fbf59bc9STejun Heo 	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
488fbf59bc9STejun Heo 		bool is_last = i + 1 == chunk->map_used;
489fbf59bc9STejun Heo 		int head, tail;
490fbf59bc9STejun Heo 
491fbf59bc9STejun Heo 		/* extra for alignment requirement */
492fbf59bc9STejun Heo 		head = ALIGN(off, align) - off;
493fbf59bc9STejun Heo 		BUG_ON(i == 0 && head != 0);
494fbf59bc9STejun Heo 
495fbf59bc9STejun Heo 		if (chunk->map[i] < 0)
496fbf59bc9STejun Heo 			continue;
497fbf59bc9STejun Heo 		if (chunk->map[i] < head + size) {
498fbf59bc9STejun Heo 			max_contig = max(chunk->map[i], max_contig);
499fbf59bc9STejun Heo 			continue;
500fbf59bc9STejun Heo 		}
501fbf59bc9STejun Heo 
502fbf59bc9STejun Heo 		/*
503fbf59bc9STejun Heo 		 * If head is small or the previous block is free,
504fbf59bc9STejun Heo 		 * merge'em.  Note that 'small' is defined as smaller
505fbf59bc9STejun Heo 		 * than sizeof(int), which is very small but isn't too
506fbf59bc9STejun Heo 		 * uncommon for percpu allocations.
507fbf59bc9STejun Heo 		 */
508fbf59bc9STejun Heo 		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
509fbf59bc9STejun Heo 			if (chunk->map[i - 1] > 0)
510fbf59bc9STejun Heo 				chunk->map[i - 1] += head;
511fbf59bc9STejun Heo 			else {
512fbf59bc9STejun Heo 				chunk->map[i - 1] -= head;
513fbf59bc9STejun Heo 				chunk->free_size -= head;
514fbf59bc9STejun Heo 			}
515fbf59bc9STejun Heo 			chunk->map[i] -= head;
516fbf59bc9STejun Heo 			off += head;
517fbf59bc9STejun Heo 			head = 0;
518fbf59bc9STejun Heo 		}
519fbf59bc9STejun Heo 
520fbf59bc9STejun Heo 		/* if tail is small, just keep it around */
521fbf59bc9STejun Heo 		tail = chunk->map[i] - head - size;
522fbf59bc9STejun Heo 		if (tail < sizeof(int))
523fbf59bc9STejun Heo 			tail = 0;
524fbf59bc9STejun Heo 
525fbf59bc9STejun Heo 		/* split if warranted */
526fbf59bc9STejun Heo 		if (head || tail) {
5279f7dcf22STejun Heo 			pcpu_split_block(chunk, i, head, tail);
528fbf59bc9STejun Heo 			if (head) {
529fbf59bc9STejun Heo 				i++;
530fbf59bc9STejun Heo 				off += head;
531fbf59bc9STejun Heo 				max_contig = max(chunk->map[i - 1], max_contig);
532fbf59bc9STejun Heo 			}
533fbf59bc9STejun Heo 			if (tail)
534fbf59bc9STejun Heo 				max_contig = max(chunk->map[i + 1], max_contig);
535fbf59bc9STejun Heo 		}
536fbf59bc9STejun Heo 
537fbf59bc9STejun Heo 		/* update hint and mark allocated */
538fbf59bc9STejun Heo 		if (is_last)
539fbf59bc9STejun Heo 			chunk->contig_hint = max_contig; /* fully scanned */
540fbf59bc9STejun Heo 		else
541fbf59bc9STejun Heo 			chunk->contig_hint = max(chunk->contig_hint,
542fbf59bc9STejun Heo 						 max_contig);
543fbf59bc9STejun Heo 
544fbf59bc9STejun Heo 		chunk->free_size -= chunk->map[i];
545fbf59bc9STejun Heo 		chunk->map[i] = -chunk->map[i];
546fbf59bc9STejun Heo 
547fbf59bc9STejun Heo 		pcpu_chunk_relocate(chunk, oslot);
548fbf59bc9STejun Heo 		return off;
549fbf59bc9STejun Heo 	}
550fbf59bc9STejun Heo 
551fbf59bc9STejun Heo 	chunk->contig_hint = max_contig;	/* fully scanned */
552fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, oslot);
553fbf59bc9STejun Heo 
5549f7dcf22STejun Heo 	/* tell the upper layer that this chunk has no matching area */
5559f7dcf22STejun Heo 	return -1;
556fbf59bc9STejun Heo }
557fbf59bc9STejun Heo 
558fbf59bc9STejun Heo /**
559fbf59bc9STejun Heo  * pcpu_free_area - free area to a pcpu_chunk
560fbf59bc9STejun Heo  * @chunk: chunk of interest
561fbf59bc9STejun Heo  * @freeme: offset of area to free
562fbf59bc9STejun Heo  *
563fbf59bc9STejun Heo  * Free area starting from @freeme to @chunk.  Note that this function
564fbf59bc9STejun Heo  * only modifies the allocation map.  It doesn't depopulate or unmap
565fbf59bc9STejun Heo  * the area.
566ccea34b5STejun Heo  *
567ccea34b5STejun Heo  * CONTEXT:
568ccea34b5STejun Heo  * pcpu_lock.
569fbf59bc9STejun Heo  */
570fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
571fbf59bc9STejun Heo {
572fbf59bc9STejun Heo 	int oslot = pcpu_chunk_slot(chunk);
573fbf59bc9STejun Heo 	int i, off;
574fbf59bc9STejun Heo 
575fbf59bc9STejun Heo 	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
576fbf59bc9STejun Heo 		if (off == freeme)
577fbf59bc9STejun Heo 			break;
578fbf59bc9STejun Heo 	BUG_ON(off != freeme);
579fbf59bc9STejun Heo 	BUG_ON(chunk->map[i] > 0);
580fbf59bc9STejun Heo 
581fbf59bc9STejun Heo 	chunk->map[i] = -chunk->map[i];
582fbf59bc9STejun Heo 	chunk->free_size += chunk->map[i];
583fbf59bc9STejun Heo 
584fbf59bc9STejun Heo 	/* merge with previous? */
585fbf59bc9STejun Heo 	if (i > 0 && chunk->map[i - 1] >= 0) {
586fbf59bc9STejun Heo 		chunk->map[i - 1] += chunk->map[i];
587fbf59bc9STejun Heo 		chunk->map_used--;
588fbf59bc9STejun Heo 		memmove(&chunk->map[i], &chunk->map[i + 1],
589fbf59bc9STejun Heo 			(chunk->map_used - i) * sizeof(chunk->map[0]));
590fbf59bc9STejun Heo 		i--;
591fbf59bc9STejun Heo 	}
592fbf59bc9STejun Heo 	/* merge with next? */
593fbf59bc9STejun Heo 	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
594fbf59bc9STejun Heo 		chunk->map[i] += chunk->map[i + 1];
595fbf59bc9STejun Heo 		chunk->map_used--;
596fbf59bc9STejun Heo 		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
597fbf59bc9STejun Heo 			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
598fbf59bc9STejun Heo 	}
599fbf59bc9STejun Heo 
600fbf59bc9STejun Heo 	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
601fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, oslot);
602fbf59bc9STejun Heo }
603fbf59bc9STejun Heo 
6046081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void)
6056081089fSTejun Heo {
6066081089fSTejun Heo 	struct pcpu_chunk *chunk;
6076081089fSTejun Heo 
60890459ce0SBob Liu 	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
6096081089fSTejun Heo 	if (!chunk)
6106081089fSTejun Heo 		return NULL;
6116081089fSTejun Heo 
61290459ce0SBob Liu 	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
61390459ce0SBob Liu 						sizeof(chunk->map[0]));
6146081089fSTejun Heo 	if (!chunk->map) {
6156081089fSTejun Heo 		kfree(chunk);
6166081089fSTejun Heo 		return NULL;
6176081089fSTejun Heo 	}
6186081089fSTejun Heo 
6196081089fSTejun Heo 	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
6206081089fSTejun Heo 	chunk->map[chunk->map_used++] = pcpu_unit_size;
6216081089fSTejun Heo 
6226081089fSTejun Heo 	INIT_LIST_HEAD(&chunk->list);
6236081089fSTejun Heo 	chunk->free_size = pcpu_unit_size;
6246081089fSTejun Heo 	chunk->contig_hint = pcpu_unit_size;
6256081089fSTejun Heo 
6266081089fSTejun Heo 	return chunk;
6276081089fSTejun Heo }
6286081089fSTejun Heo 
6296081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk)
6306081089fSTejun Heo {
6316081089fSTejun Heo 	if (!chunk)
6326081089fSTejun Heo 		return;
6336081089fSTejun Heo 	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
634b4916cb1SJoonsoo Kim 	pcpu_mem_free(chunk, pcpu_chunk_struct_size);
6356081089fSTejun Heo }
6366081089fSTejun Heo 
637fbf59bc9STejun Heo /*
6389f645532STejun Heo  * Chunk management implementation.
639fbf59bc9STejun Heo  *
6409f645532STejun Heo  * To allow different implementations, chunk alloc/free and
6419f645532STejun Heo  * [de]population are implemented in a separate file which is pulled
6429f645532STejun Heo  * into this file and compiled together.  The following functions
6439f645532STejun Heo  * should be implemented.
644ccea34b5STejun Heo  *
6459f645532STejun Heo  * pcpu_populate_chunk		- populate the specified range of a chunk
6469f645532STejun Heo  * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
6479f645532STejun Heo  * pcpu_create_chunk		- create a new chunk
6489f645532STejun Heo  * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
6499f645532STejun Heo  * pcpu_addr_to_page		- translate address to physical address
6509f645532STejun Heo  * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
651fbf59bc9STejun Heo  */
6529f645532STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
6539f645532STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
6549f645532STejun Heo static struct pcpu_chunk *pcpu_create_chunk(void);
6559f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
6569f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr);
6579f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
658fbf59bc9STejun Heo 
659b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM
660b0c9778bSTejun Heo #include "percpu-km.c"
661b0c9778bSTejun Heo #else
6629f645532STejun Heo #include "percpu-vm.c"
663b0c9778bSTejun Heo #endif
664fbf59bc9STejun Heo 
665fbf59bc9STejun Heo /**
66688999a89STejun Heo  * pcpu_chunk_addr_search - determine chunk containing specified address
66788999a89STejun Heo  * @addr: address for which the chunk needs to be determined.
66888999a89STejun Heo  *
66988999a89STejun Heo  * RETURNS:
67088999a89STejun Heo  * The address of the found chunk.
67188999a89STejun Heo  */
67288999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
67388999a89STejun Heo {
67488999a89STejun Heo 	/* is it in the first chunk? */
67588999a89STejun Heo 	if (pcpu_addr_in_first_chunk(addr)) {
67688999a89STejun Heo 		/* is it in the reserved area? */
67788999a89STejun Heo 		if (pcpu_addr_in_reserved_chunk(addr))
67888999a89STejun Heo 			return pcpu_reserved_chunk;
67988999a89STejun Heo 		return pcpu_first_chunk;
68088999a89STejun Heo 	}
68188999a89STejun Heo 
68288999a89STejun Heo 	/*
68388999a89STejun Heo 	 * The address is relative to unit0 which might be unused and
68488999a89STejun Heo 	 * thus unmapped.  Offset the address to the unit space of the
68588999a89STejun Heo 	 * current processor before looking it up in the vmalloc
68688999a89STejun Heo 	 * space.  Note that any possible cpu id can be used here, so
68788999a89STejun Heo 	 * there's no need to worry about preemption or cpu hotplug.
68888999a89STejun Heo 	 */
68988999a89STejun Heo 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
6909f645532STejun Heo 	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
69188999a89STejun Heo }
69288999a89STejun Heo 
69388999a89STejun Heo /**
694edcb4639STejun Heo  * pcpu_alloc - the percpu allocator
695cae3aeb8STejun Heo  * @size: size of area to allocate in bytes
696fbf59bc9STejun Heo  * @align: alignment of area (max PAGE_SIZE)
697edcb4639STejun Heo  * @reserved: allocate from the reserved chunk if available
698fbf59bc9STejun Heo  *
699ccea34b5STejun Heo  * Allocate percpu area of @size bytes aligned at @align.
700ccea34b5STejun Heo  *
701ccea34b5STejun Heo  * CONTEXT:
702ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
703fbf59bc9STejun Heo  *
704fbf59bc9STejun Heo  * RETURNS:
705fbf59bc9STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
706fbf59bc9STejun Heo  */
70743cf38ebSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
708fbf59bc9STejun Heo {
709f2badb0cSTejun Heo 	static int warn_limit = 10;
710fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
711f2badb0cSTejun Heo 	const char *err;
712833af842STejun Heo 	int slot, off, new_alloc;
713403a91b1SJiri Kosina 	unsigned long flags;
714f528f0b8SCatalin Marinas 	void __percpu *ptr;
715fbf59bc9STejun Heo 
7168d408b4bSTejun Heo 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
717fbf59bc9STejun Heo 		WARN(true, "illegal size (%zu) or align (%zu) for "
718fbf59bc9STejun Heo 		     "percpu allocation\n", size, align);
719fbf59bc9STejun Heo 		return NULL;
720fbf59bc9STejun Heo 	}
721fbf59bc9STejun Heo 
722ccea34b5STejun Heo 	mutex_lock(&pcpu_alloc_mutex);
723403a91b1SJiri Kosina 	spin_lock_irqsave(&pcpu_lock, flags);
724fbf59bc9STejun Heo 
725edcb4639STejun Heo 	/* serve reserved allocations from the reserved chunk if available */
726edcb4639STejun Heo 	if (reserved && pcpu_reserved_chunk) {
727edcb4639STejun Heo 		chunk = pcpu_reserved_chunk;
728833af842STejun Heo 
729833af842STejun Heo 		if (size > chunk->contig_hint) {
730833af842STejun Heo 			err = "alloc from reserved chunk failed";
731ccea34b5STejun Heo 			goto fail_unlock;
732f2badb0cSTejun Heo 		}
733833af842STejun Heo 
734833af842STejun Heo 		while ((new_alloc = pcpu_need_to_extend(chunk))) {
735833af842STejun Heo 			spin_unlock_irqrestore(&pcpu_lock, flags);
736833af842STejun Heo 			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
737833af842STejun Heo 				err = "failed to extend area map of reserved chunk";
738833af842STejun Heo 				goto fail_unlock_mutex;
739833af842STejun Heo 			}
740833af842STejun Heo 			spin_lock_irqsave(&pcpu_lock, flags);
741833af842STejun Heo 		}
742833af842STejun Heo 
743edcb4639STejun Heo 		off = pcpu_alloc_area(chunk, size, align);
744edcb4639STejun Heo 		if (off >= 0)
745edcb4639STejun Heo 			goto area_found;
746833af842STejun Heo 
747f2badb0cSTejun Heo 		err = "alloc from reserved chunk failed";
748ccea34b5STejun Heo 		goto fail_unlock;
749edcb4639STejun Heo 	}
750edcb4639STejun Heo 
751ccea34b5STejun Heo restart:
752edcb4639STejun Heo 	/* search through normal chunks */
753fbf59bc9STejun Heo 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
754fbf59bc9STejun Heo 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
755fbf59bc9STejun Heo 			if (size > chunk->contig_hint)
756fbf59bc9STejun Heo 				continue;
757ccea34b5STejun Heo 
758833af842STejun Heo 			new_alloc = pcpu_need_to_extend(chunk);
759833af842STejun Heo 			if (new_alloc) {
760833af842STejun Heo 				spin_unlock_irqrestore(&pcpu_lock, flags);
761833af842STejun Heo 				if (pcpu_extend_area_map(chunk,
762833af842STejun Heo 							 new_alloc) < 0) {
763f2badb0cSTejun Heo 					err = "failed to extend area map";
764833af842STejun Heo 					goto fail_unlock_mutex;
765833af842STejun Heo 				}
766833af842STejun Heo 				spin_lock_irqsave(&pcpu_lock, flags);
767833af842STejun Heo 				/*
768833af842STejun Heo 				 * pcpu_lock has been dropped, need to
769833af842STejun Heo 				 * restart cpu_slot list walking.
770833af842STejun Heo 				 */
771833af842STejun Heo 				goto restart;
772ccea34b5STejun Heo 			}
773ccea34b5STejun Heo 
774fbf59bc9STejun Heo 			off = pcpu_alloc_area(chunk, size, align);
775fbf59bc9STejun Heo 			if (off >= 0)
776fbf59bc9STejun Heo 				goto area_found;
777fbf59bc9STejun Heo 		}
778fbf59bc9STejun Heo 	}
779fbf59bc9STejun Heo 
780fbf59bc9STejun Heo 	/* hmmm... no space left, create a new chunk */
781403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
782ccea34b5STejun Heo 
7836081089fSTejun Heo 	chunk = pcpu_create_chunk();
784f2badb0cSTejun Heo 	if (!chunk) {
785f2badb0cSTejun Heo 		err = "failed to allocate new chunk";
786ccea34b5STejun Heo 		goto fail_unlock_mutex;
787f2badb0cSTejun Heo 	}
788ccea34b5STejun Heo 
789403a91b1SJiri Kosina 	spin_lock_irqsave(&pcpu_lock, flags);
790fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, -1);
791ccea34b5STejun Heo 	goto restart;
792fbf59bc9STejun Heo 
793fbf59bc9STejun Heo area_found:
794403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
795ccea34b5STejun Heo 
796fbf59bc9STejun Heo 	/* populate, map and clear the area */
797fbf59bc9STejun Heo 	if (pcpu_populate_chunk(chunk, off, size)) {
798403a91b1SJiri Kosina 		spin_lock_irqsave(&pcpu_lock, flags);
799fbf59bc9STejun Heo 		pcpu_free_area(chunk, off);
800f2badb0cSTejun Heo 		err = "failed to populate";
801ccea34b5STejun Heo 		goto fail_unlock;
802fbf59bc9STejun Heo 	}
803fbf59bc9STejun Heo 
804ccea34b5STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
805ccea34b5STejun Heo 
806bba174f5STejun Heo 	/* return address relative to base address */
807f528f0b8SCatalin Marinas 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
808f528f0b8SCatalin Marinas 	kmemleak_alloc_percpu(ptr, size);
809f528f0b8SCatalin Marinas 	return ptr;
810ccea34b5STejun Heo 
811ccea34b5STejun Heo fail_unlock:
812403a91b1SJiri Kosina 	spin_unlock_irqrestore(&pcpu_lock, flags);
813ccea34b5STejun Heo fail_unlock_mutex:
814ccea34b5STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
815f2badb0cSTejun Heo 	if (warn_limit) {
816f2badb0cSTejun Heo 		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
817f2badb0cSTejun Heo 			   "%s\n", size, align, err);
818f2badb0cSTejun Heo 		dump_stack();
819f2badb0cSTejun Heo 		if (!--warn_limit)
820f2badb0cSTejun Heo 			pr_info("PERCPU: limit reached, disable warning\n");
821f2badb0cSTejun Heo 	}
822ccea34b5STejun Heo 	return NULL;
823fbf59bc9STejun Heo }
824edcb4639STejun Heo 
825edcb4639STejun Heo /**
826edcb4639STejun Heo  * __alloc_percpu - allocate dynamic percpu area
827edcb4639STejun Heo  * @size: size of area to allocate in bytes
828edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
829edcb4639STejun Heo  *
8309329ba97STejun Heo  * Allocate zero-filled percpu area of @size bytes aligned at @align.
8319329ba97STejun Heo  * Might sleep.  Might trigger writeouts.
832edcb4639STejun Heo  *
833ccea34b5STejun Heo  * CONTEXT:
834ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
835ccea34b5STejun Heo  *
836edcb4639STejun Heo  * RETURNS:
837edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
838edcb4639STejun Heo  */
83943cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align)
840edcb4639STejun Heo {
841edcb4639STejun Heo 	return pcpu_alloc(size, align, false);
842edcb4639STejun Heo }
843fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu);
844fbf59bc9STejun Heo 
845edcb4639STejun Heo /**
846edcb4639STejun Heo  * __alloc_reserved_percpu - allocate reserved percpu area
847edcb4639STejun Heo  * @size: size of area to allocate in bytes
848edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
849edcb4639STejun Heo  *
8509329ba97STejun Heo  * Allocate zero-filled percpu area of @size bytes aligned at @align
8519329ba97STejun Heo  * from reserved percpu area if arch has set it up; otherwise,
8529329ba97STejun Heo  * allocation is served from the same dynamic area.  Might sleep.
8539329ba97STejun Heo  * Might trigger writeouts.
854edcb4639STejun Heo  *
855ccea34b5STejun Heo  * CONTEXT:
856ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
857ccea34b5STejun Heo  *
858edcb4639STejun Heo  * RETURNS:
859edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
860edcb4639STejun Heo  */
86143cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
862edcb4639STejun Heo {
863edcb4639STejun Heo 	return pcpu_alloc(size, align, true);
864edcb4639STejun Heo }
865edcb4639STejun Heo 
866a56dbddfSTejun Heo /**
867a56dbddfSTejun Heo  * pcpu_reclaim - reclaim fully free chunks, workqueue function
868a56dbddfSTejun Heo  * @work: unused
869a56dbddfSTejun Heo  *
870a56dbddfSTejun Heo  * Reclaim all fully free chunks except for the first one.
871ccea34b5STejun Heo  *
872ccea34b5STejun Heo  * CONTEXT:
873ccea34b5STejun Heo  * workqueue context.
874a56dbddfSTejun Heo  */
875a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work)
876fbf59bc9STejun Heo {
877a56dbddfSTejun Heo 	LIST_HEAD(todo);
878a56dbddfSTejun Heo 	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
879a56dbddfSTejun Heo 	struct pcpu_chunk *chunk, *next;
880a56dbddfSTejun Heo 
881ccea34b5STejun Heo 	mutex_lock(&pcpu_alloc_mutex);
882ccea34b5STejun Heo 	spin_lock_irq(&pcpu_lock);
883a56dbddfSTejun Heo 
884a56dbddfSTejun Heo 	list_for_each_entry_safe(chunk, next, head, list) {
8858d408b4bSTejun Heo 		WARN_ON(chunk->immutable);
886a56dbddfSTejun Heo 
887a56dbddfSTejun Heo 		/* spare the first one */
888a56dbddfSTejun Heo 		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
889a56dbddfSTejun Heo 			continue;
890a56dbddfSTejun Heo 
891a56dbddfSTejun Heo 		list_move(&chunk->list, &todo);
892a56dbddfSTejun Heo 	}
893a56dbddfSTejun Heo 
894ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
895a56dbddfSTejun Heo 
896a56dbddfSTejun Heo 	list_for_each_entry_safe(chunk, next, &todo, list) {
897ce3141a2STejun Heo 		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
8986081089fSTejun Heo 		pcpu_destroy_chunk(chunk);
899fbf59bc9STejun Heo 	}
900971f3918STejun Heo 
901971f3918STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
902a56dbddfSTejun Heo }
903fbf59bc9STejun Heo 
904fbf59bc9STejun Heo /**
905fbf59bc9STejun Heo  * free_percpu - free percpu area
906fbf59bc9STejun Heo  * @ptr: pointer to area to free
907fbf59bc9STejun Heo  *
908ccea34b5STejun Heo  * Free percpu area @ptr.
909ccea34b5STejun Heo  *
910ccea34b5STejun Heo  * CONTEXT:
911ccea34b5STejun Heo  * Can be called from atomic context.
912fbf59bc9STejun Heo  */
91343cf38ebSTejun Heo void free_percpu(void __percpu *ptr)
914fbf59bc9STejun Heo {
915129182e5SAndrew Morton 	void *addr;
916fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
917ccea34b5STejun Heo 	unsigned long flags;
918fbf59bc9STejun Heo 	int off;
919fbf59bc9STejun Heo 
920fbf59bc9STejun Heo 	if (!ptr)
921fbf59bc9STejun Heo 		return;
922fbf59bc9STejun Heo 
923f528f0b8SCatalin Marinas 	kmemleak_free_percpu(ptr);
924f528f0b8SCatalin Marinas 
925129182e5SAndrew Morton 	addr = __pcpu_ptr_to_addr(ptr);
926129182e5SAndrew Morton 
927ccea34b5STejun Heo 	spin_lock_irqsave(&pcpu_lock, flags);
928fbf59bc9STejun Heo 
929fbf59bc9STejun Heo 	chunk = pcpu_chunk_addr_search(addr);
930bba174f5STejun Heo 	off = addr - chunk->base_addr;
931fbf59bc9STejun Heo 
932fbf59bc9STejun Heo 	pcpu_free_area(chunk, off);
933fbf59bc9STejun Heo 
934a56dbddfSTejun Heo 	/* if there are more than one fully free chunks, wake up grim reaper */
935fbf59bc9STejun Heo 	if (chunk->free_size == pcpu_unit_size) {
936fbf59bc9STejun Heo 		struct pcpu_chunk *pos;
937fbf59bc9STejun Heo 
938a56dbddfSTejun Heo 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
939fbf59bc9STejun Heo 			if (pos != chunk) {
940a56dbddfSTejun Heo 				schedule_work(&pcpu_reclaim_work);
941fbf59bc9STejun Heo 				break;
942fbf59bc9STejun Heo 			}
943fbf59bc9STejun Heo 	}
944fbf59bc9STejun Heo 
945ccea34b5STejun Heo 	spin_unlock_irqrestore(&pcpu_lock, flags);
946fbf59bc9STejun Heo }
947fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu);
948fbf59bc9STejun Heo 
9493b034b0dSVivek Goyal /**
95010fad5e4STejun Heo  * is_kernel_percpu_address - test whether address is from static percpu area
95110fad5e4STejun Heo  * @addr: address to test
95210fad5e4STejun Heo  *
95310fad5e4STejun Heo  * Test whether @addr belongs to in-kernel static percpu area.  Module
95410fad5e4STejun Heo  * static percpu areas are not considered.  For those, use
95510fad5e4STejun Heo  * is_module_percpu_address().
95610fad5e4STejun Heo  *
95710fad5e4STejun Heo  * RETURNS:
95810fad5e4STejun Heo  * %true if @addr is from in-kernel static percpu area, %false otherwise.
95910fad5e4STejun Heo  */
96010fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr)
96110fad5e4STejun Heo {
962bbddff05STejun Heo #ifdef CONFIG_SMP
96310fad5e4STejun Heo 	const size_t static_size = __per_cpu_end - __per_cpu_start;
96410fad5e4STejun Heo 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
96510fad5e4STejun Heo 	unsigned int cpu;
96610fad5e4STejun Heo 
96710fad5e4STejun Heo 	for_each_possible_cpu(cpu) {
96810fad5e4STejun Heo 		void *start = per_cpu_ptr(base, cpu);
96910fad5e4STejun Heo 
97010fad5e4STejun Heo 		if ((void *)addr >= start && (void *)addr < start + static_size)
97110fad5e4STejun Heo 			return true;
97210fad5e4STejun Heo         }
973bbddff05STejun Heo #endif
974bbddff05STejun Heo 	/* on UP, can't distinguish from other static vars, always false */
97510fad5e4STejun Heo 	return false;
97610fad5e4STejun Heo }
97710fad5e4STejun Heo 
97810fad5e4STejun Heo /**
9793b034b0dSVivek Goyal  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
9803b034b0dSVivek Goyal  * @addr: the address to be converted to physical address
9813b034b0dSVivek Goyal  *
9823b034b0dSVivek Goyal  * Given @addr which is dereferenceable address obtained via one of
9833b034b0dSVivek Goyal  * percpu access macros, this function translates it into its physical
9843b034b0dSVivek Goyal  * address.  The caller is responsible for ensuring @addr stays valid
9853b034b0dSVivek Goyal  * until this function finishes.
9863b034b0dSVivek Goyal  *
98767589c71SDave Young  * percpu allocator has special setup for the first chunk, which currently
98867589c71SDave Young  * supports either embedding in linear address space or vmalloc mapping,
98967589c71SDave Young  * and, from the second one, the backing allocator (currently either vm or
99067589c71SDave Young  * km) provides translation.
99167589c71SDave Young  *
99267589c71SDave Young  * The addr can be tranlated simply without checking if it falls into the
99367589c71SDave Young  * first chunk. But the current code reflects better how percpu allocator
99467589c71SDave Young  * actually works, and the verification can discover both bugs in percpu
99567589c71SDave Young  * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
99667589c71SDave Young  * code.
99767589c71SDave Young  *
9983b034b0dSVivek Goyal  * RETURNS:
9993b034b0dSVivek Goyal  * The physical address for @addr.
10003b034b0dSVivek Goyal  */
10013b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr)
10023b034b0dSVivek Goyal {
10039983b6f0STejun Heo 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
10049983b6f0STejun Heo 	bool in_first_chunk = false;
1005a855b84cSTejun Heo 	unsigned long first_low, first_high;
10069983b6f0STejun Heo 	unsigned int cpu;
10079983b6f0STejun Heo 
10089983b6f0STejun Heo 	/*
1009a855b84cSTejun Heo 	 * The following test on unit_low/high isn't strictly
10109983b6f0STejun Heo 	 * necessary but will speed up lookups of addresses which
10119983b6f0STejun Heo 	 * aren't in the first chunk.
10129983b6f0STejun Heo 	 */
1013a855b84cSTejun Heo 	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1014a855b84cSTejun Heo 	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
10159983b6f0STejun Heo 				     pcpu_unit_pages);
1016a855b84cSTejun Heo 	if ((unsigned long)addr >= first_low &&
1017a855b84cSTejun Heo 	    (unsigned long)addr < first_high) {
10189983b6f0STejun Heo 		for_each_possible_cpu(cpu) {
10199983b6f0STejun Heo 			void *start = per_cpu_ptr(base, cpu);
10209983b6f0STejun Heo 
10219983b6f0STejun Heo 			if (addr >= start && addr < start + pcpu_unit_size) {
10229983b6f0STejun Heo 				in_first_chunk = true;
10239983b6f0STejun Heo 				break;
10249983b6f0STejun Heo 			}
10259983b6f0STejun Heo 		}
10269983b6f0STejun Heo 	}
10279983b6f0STejun Heo 
10289983b6f0STejun Heo 	if (in_first_chunk) {
1029eac522efSDavid Howells 		if (!is_vmalloc_addr(addr))
10303b034b0dSVivek Goyal 			return __pa(addr);
10313b034b0dSVivek Goyal 		else
10329f57bd4dSEugene Surovegin 			return page_to_phys(vmalloc_to_page(addr)) +
10339f57bd4dSEugene Surovegin 			       offset_in_page(addr);
1034020ec653STejun Heo 	} else
10359f57bd4dSEugene Surovegin 		return page_to_phys(pcpu_addr_to_page(addr)) +
10369f57bd4dSEugene Surovegin 		       offset_in_page(addr);
10373b034b0dSVivek Goyal }
10383b034b0dSVivek Goyal 
1039fbf59bc9STejun Heo /**
1040fd1e8a1fSTejun Heo  * pcpu_alloc_alloc_info - allocate percpu allocation info
1041fd1e8a1fSTejun Heo  * @nr_groups: the number of groups
1042fd1e8a1fSTejun Heo  * @nr_units: the number of units
1043033e48fbSTejun Heo  *
1044fd1e8a1fSTejun Heo  * Allocate ai which is large enough for @nr_groups groups containing
1045fd1e8a1fSTejun Heo  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1046fd1e8a1fSTejun Heo  * cpu_map array which is long enough for @nr_units and filled with
1047fd1e8a1fSTejun Heo  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1048fd1e8a1fSTejun Heo  * pointer of other groups.
1049033e48fbSTejun Heo  *
1050033e48fbSTejun Heo  * RETURNS:
1051fd1e8a1fSTejun Heo  * Pointer to the allocated pcpu_alloc_info on success, NULL on
1052fd1e8a1fSTejun Heo  * failure.
1053033e48fbSTejun Heo  */
1054fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1055fd1e8a1fSTejun Heo 						      int nr_units)
1056fd1e8a1fSTejun Heo {
1057fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
1058fd1e8a1fSTejun Heo 	size_t base_size, ai_size;
1059fd1e8a1fSTejun Heo 	void *ptr;
1060fd1e8a1fSTejun Heo 	int unit;
1061fd1e8a1fSTejun Heo 
1062fd1e8a1fSTejun Heo 	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1063fd1e8a1fSTejun Heo 			  __alignof__(ai->groups[0].cpu_map[0]));
1064fd1e8a1fSTejun Heo 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1065fd1e8a1fSTejun Heo 
1066fd1e8a1fSTejun Heo 	ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1067fd1e8a1fSTejun Heo 	if (!ptr)
1068fd1e8a1fSTejun Heo 		return NULL;
1069fd1e8a1fSTejun Heo 	ai = ptr;
1070fd1e8a1fSTejun Heo 	ptr += base_size;
1071fd1e8a1fSTejun Heo 
1072fd1e8a1fSTejun Heo 	ai->groups[0].cpu_map = ptr;
1073fd1e8a1fSTejun Heo 
1074fd1e8a1fSTejun Heo 	for (unit = 0; unit < nr_units; unit++)
1075fd1e8a1fSTejun Heo 		ai->groups[0].cpu_map[unit] = NR_CPUS;
1076fd1e8a1fSTejun Heo 
1077fd1e8a1fSTejun Heo 	ai->nr_groups = nr_groups;
1078fd1e8a1fSTejun Heo 	ai->__ai_size = PFN_ALIGN(ai_size);
1079fd1e8a1fSTejun Heo 
1080fd1e8a1fSTejun Heo 	return ai;
1081fd1e8a1fSTejun Heo }
1082fd1e8a1fSTejun Heo 
1083fd1e8a1fSTejun Heo /**
1084fd1e8a1fSTejun Heo  * pcpu_free_alloc_info - free percpu allocation info
1085fd1e8a1fSTejun Heo  * @ai: pcpu_alloc_info to free
1086fd1e8a1fSTejun Heo  *
1087fd1e8a1fSTejun Heo  * Free @ai which was allocated by pcpu_alloc_alloc_info().
1088fd1e8a1fSTejun Heo  */
1089fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1090fd1e8a1fSTejun Heo {
1091fd1e8a1fSTejun Heo 	free_bootmem(__pa(ai), ai->__ai_size);
1092fd1e8a1fSTejun Heo }
1093fd1e8a1fSTejun Heo 
1094fd1e8a1fSTejun Heo /**
1095fd1e8a1fSTejun Heo  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1096fd1e8a1fSTejun Heo  * @lvl: loglevel
1097fd1e8a1fSTejun Heo  * @ai: allocation info to dump
1098fd1e8a1fSTejun Heo  *
1099fd1e8a1fSTejun Heo  * Print out information about @ai using loglevel @lvl.
1100fd1e8a1fSTejun Heo  */
1101fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl,
1102fd1e8a1fSTejun Heo 				 const struct pcpu_alloc_info *ai)
1103033e48fbSTejun Heo {
1104fd1e8a1fSTejun Heo 	int group_width = 1, cpu_width = 1, width;
1105033e48fbSTejun Heo 	char empty_str[] = "--------";
1106fd1e8a1fSTejun Heo 	int alloc = 0, alloc_end = 0;
1107fd1e8a1fSTejun Heo 	int group, v;
1108fd1e8a1fSTejun Heo 	int upa, apl;	/* units per alloc, allocs per line */
1109033e48fbSTejun Heo 
1110fd1e8a1fSTejun Heo 	v = ai->nr_groups;
1111033e48fbSTejun Heo 	while (v /= 10)
1112fd1e8a1fSTejun Heo 		group_width++;
1113033e48fbSTejun Heo 
1114fd1e8a1fSTejun Heo 	v = num_possible_cpus();
1115fd1e8a1fSTejun Heo 	while (v /= 10)
1116fd1e8a1fSTejun Heo 		cpu_width++;
1117fd1e8a1fSTejun Heo 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1118033e48fbSTejun Heo 
1119fd1e8a1fSTejun Heo 	upa = ai->alloc_size / ai->unit_size;
1120fd1e8a1fSTejun Heo 	width = upa * (cpu_width + 1) + group_width + 3;
1121fd1e8a1fSTejun Heo 	apl = rounddown_pow_of_two(max(60 / width, 1));
1122033e48fbSTejun Heo 
1123fd1e8a1fSTejun Heo 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1124fd1e8a1fSTejun Heo 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1125fd1e8a1fSTejun Heo 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1126fd1e8a1fSTejun Heo 
1127fd1e8a1fSTejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
1128fd1e8a1fSTejun Heo 		const struct pcpu_group_info *gi = &ai->groups[group];
1129fd1e8a1fSTejun Heo 		int unit = 0, unit_end = 0;
1130fd1e8a1fSTejun Heo 
1131fd1e8a1fSTejun Heo 		BUG_ON(gi->nr_units % upa);
1132fd1e8a1fSTejun Heo 		for (alloc_end += gi->nr_units / upa;
1133fd1e8a1fSTejun Heo 		     alloc < alloc_end; alloc++) {
1134fd1e8a1fSTejun Heo 			if (!(alloc % apl)) {
1135cb129820STejun Heo 				printk(KERN_CONT "\n");
1136fd1e8a1fSTejun Heo 				printk("%spcpu-alloc: ", lvl);
1137033e48fbSTejun Heo 			}
1138cb129820STejun Heo 			printk(KERN_CONT "[%0*d] ", group_width, group);
1139fd1e8a1fSTejun Heo 
1140fd1e8a1fSTejun Heo 			for (unit_end += upa; unit < unit_end; unit++)
1141fd1e8a1fSTejun Heo 				if (gi->cpu_map[unit] != NR_CPUS)
1142cb129820STejun Heo 					printk(KERN_CONT "%0*d ", cpu_width,
1143fd1e8a1fSTejun Heo 					       gi->cpu_map[unit]);
1144033e48fbSTejun Heo 				else
1145cb129820STejun Heo 					printk(KERN_CONT "%s ", empty_str);
1146033e48fbSTejun Heo 		}
1147fd1e8a1fSTejun Heo 	}
1148cb129820STejun Heo 	printk(KERN_CONT "\n");
1149033e48fbSTejun Heo }
1150033e48fbSTejun Heo 
1151fbf59bc9STejun Heo /**
11528d408b4bSTejun Heo  * pcpu_setup_first_chunk - initialize the first percpu chunk
1153fd1e8a1fSTejun Heo  * @ai: pcpu_alloc_info describing how to percpu area is shaped
115438a6be52STejun Heo  * @base_addr: mapped address
1155fbf59bc9STejun Heo  *
11568d408b4bSTejun Heo  * Initialize the first percpu chunk which contains the kernel static
11578d408b4bSTejun Heo  * perpcu area.  This function is to be called from arch percpu area
115838a6be52STejun Heo  * setup path.
11598d408b4bSTejun Heo  *
1160fd1e8a1fSTejun Heo  * @ai contains all information necessary to initialize the first
1161fd1e8a1fSTejun Heo  * chunk and prime the dynamic percpu allocator.
11628d408b4bSTejun Heo  *
1163fd1e8a1fSTejun Heo  * @ai->static_size is the size of static percpu area.
1164fd1e8a1fSTejun Heo  *
1165fd1e8a1fSTejun Heo  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1166edcb4639STejun Heo  * reserve after the static area in the first chunk.  This reserves
1167edcb4639STejun Heo  * the first chunk such that it's available only through reserved
1168edcb4639STejun Heo  * percpu allocation.  This is primarily used to serve module percpu
1169edcb4639STejun Heo  * static areas on architectures where the addressing model has
1170edcb4639STejun Heo  * limited offset range for symbol relocations to guarantee module
1171edcb4639STejun Heo  * percpu symbols fall inside the relocatable range.
1172edcb4639STejun Heo  *
1173fd1e8a1fSTejun Heo  * @ai->dyn_size determines the number of bytes available for dynamic
1174fd1e8a1fSTejun Heo  * allocation in the first chunk.  The area between @ai->static_size +
1175fd1e8a1fSTejun Heo  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
11766074d5b0STejun Heo  *
1177fd1e8a1fSTejun Heo  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1178fd1e8a1fSTejun Heo  * and equal to or larger than @ai->static_size + @ai->reserved_size +
1179fd1e8a1fSTejun Heo  * @ai->dyn_size.
11808d408b4bSTejun Heo  *
1181fd1e8a1fSTejun Heo  * @ai->atom_size is the allocation atom size and used as alignment
1182fd1e8a1fSTejun Heo  * for vm areas.
11838d408b4bSTejun Heo  *
1184fd1e8a1fSTejun Heo  * @ai->alloc_size is the allocation size and always multiple of
1185fd1e8a1fSTejun Heo  * @ai->atom_size.  This is larger than @ai->atom_size if
1186fd1e8a1fSTejun Heo  * @ai->unit_size is larger than @ai->atom_size.
1187fd1e8a1fSTejun Heo  *
1188fd1e8a1fSTejun Heo  * @ai->nr_groups and @ai->groups describe virtual memory layout of
1189fd1e8a1fSTejun Heo  * percpu areas.  Units which should be colocated are put into the
1190fd1e8a1fSTejun Heo  * same group.  Dynamic VM areas will be allocated according to these
1191fd1e8a1fSTejun Heo  * groupings.  If @ai->nr_groups is zero, a single group containing
1192fd1e8a1fSTejun Heo  * all units is assumed.
11938d408b4bSTejun Heo  *
119438a6be52STejun Heo  * The caller should have mapped the first chunk at @base_addr and
119538a6be52STejun Heo  * copied static data to each unit.
1196fbf59bc9STejun Heo  *
1197edcb4639STejun Heo  * If the first chunk ends up with both reserved and dynamic areas, it
1198edcb4639STejun Heo  * is served by two chunks - one to serve the core static and reserved
1199edcb4639STejun Heo  * areas and the other for the dynamic area.  They share the same vm
1200edcb4639STejun Heo  * and page map but uses different area allocation map to stay away
1201edcb4639STejun Heo  * from each other.  The latter chunk is circulated in the chunk slots
1202edcb4639STejun Heo  * and available for dynamic allocation like any other chunks.
1203edcb4639STejun Heo  *
1204fbf59bc9STejun Heo  * RETURNS:
1205fb435d52STejun Heo  * 0 on success, -errno on failure.
1206fbf59bc9STejun Heo  */
1207fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1208fd1e8a1fSTejun Heo 				  void *base_addr)
1209fbf59bc9STejun Heo {
1210635b75fcSTejun Heo 	static char cpus_buf[4096] __initdata;
1211099a19d9STejun Heo 	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1212099a19d9STejun Heo 	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1213fd1e8a1fSTejun Heo 	size_t dyn_size = ai->dyn_size;
1214fd1e8a1fSTejun Heo 	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1215edcb4639STejun Heo 	struct pcpu_chunk *schunk, *dchunk = NULL;
12166563297cSTejun Heo 	unsigned long *group_offsets;
12176563297cSTejun Heo 	size_t *group_sizes;
1218fb435d52STejun Heo 	unsigned long *unit_off;
1219fbf59bc9STejun Heo 	unsigned int cpu;
1220fd1e8a1fSTejun Heo 	int *unit_map;
1221fd1e8a1fSTejun Heo 	int group, unit, i;
1222fbf59bc9STejun Heo 
1223635b75fcSTejun Heo 	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1224635b75fcSTejun Heo 
1225635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond)	do {					\
1226635b75fcSTejun Heo 	if (unlikely(cond)) {						\
1227635b75fcSTejun Heo 		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
1228635b75fcSTejun Heo 		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
1229635b75fcSTejun Heo 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1230635b75fcSTejun Heo 		BUG();							\
1231635b75fcSTejun Heo 	}								\
1232635b75fcSTejun Heo } while (0)
1233635b75fcSTejun Heo 
12342f39e637STejun Heo 	/* sanity checks */
1235635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1236bbddff05STejun Heo #ifdef CONFIG_SMP
1237635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(!ai->static_size);
12380415b00dSTejun Heo 	PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1239bbddff05STejun Heo #endif
1240635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(!base_addr);
12410415b00dSTejun Heo 	PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1242635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1243635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1244635b75fcSTejun Heo 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1245099a19d9STejun Heo 	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
12469f645532STejun Heo 	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
12478d408b4bSTejun Heo 
12486563297cSTejun Heo 	/* process group information and build config tables accordingly */
12496563297cSTejun Heo 	group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
12506563297cSTejun Heo 	group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1251fd1e8a1fSTejun Heo 	unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1252fb435d52STejun Heo 	unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
12532f39e637STejun Heo 
1254fd1e8a1fSTejun Heo 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1255ffe0d5a5STejun Heo 		unit_map[cpu] = UINT_MAX;
1256a855b84cSTejun Heo 
1257a855b84cSTejun Heo 	pcpu_low_unit_cpu = NR_CPUS;
1258a855b84cSTejun Heo 	pcpu_high_unit_cpu = NR_CPUS;
12592f39e637STejun Heo 
1260fd1e8a1fSTejun Heo 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1261fd1e8a1fSTejun Heo 		const struct pcpu_group_info *gi = &ai->groups[group];
12622f39e637STejun Heo 
12636563297cSTejun Heo 		group_offsets[group] = gi->base_offset;
12646563297cSTejun Heo 		group_sizes[group] = gi->nr_units * ai->unit_size;
12656563297cSTejun Heo 
1266fd1e8a1fSTejun Heo 		for (i = 0; i < gi->nr_units; i++) {
1267fd1e8a1fSTejun Heo 			cpu = gi->cpu_map[i];
1268fd1e8a1fSTejun Heo 			if (cpu == NR_CPUS)
1269fd1e8a1fSTejun Heo 				continue;
1270fd1e8a1fSTejun Heo 
1271635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1272635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1273635b75fcSTejun Heo 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1274fd1e8a1fSTejun Heo 
1275fd1e8a1fSTejun Heo 			unit_map[cpu] = unit + i;
1276fb435d52STejun Heo 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1277fb435d52STejun Heo 
1278a855b84cSTejun Heo 			/* determine low/high unit_cpu */
1279a855b84cSTejun Heo 			if (pcpu_low_unit_cpu == NR_CPUS ||
1280a855b84cSTejun Heo 			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1281a855b84cSTejun Heo 				pcpu_low_unit_cpu = cpu;
1282a855b84cSTejun Heo 			if (pcpu_high_unit_cpu == NR_CPUS ||
1283a855b84cSTejun Heo 			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1284a855b84cSTejun Heo 				pcpu_high_unit_cpu = cpu;
12850fc0531eSLinus Torvalds 		}
12860fc0531eSLinus Torvalds 	}
1287fd1e8a1fSTejun Heo 	pcpu_nr_units = unit;
12882f39e637STejun Heo 
12892f39e637STejun Heo 	for_each_possible_cpu(cpu)
1290635b75fcSTejun Heo 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1291635b75fcSTejun Heo 
1292635b75fcSTejun Heo 	/* we're done parsing the input, undefine BUG macro and dump config */
1293635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON
1294bcbea798STejun Heo 	pcpu_dump_alloc_info(KERN_DEBUG, ai);
12952f39e637STejun Heo 
12966563297cSTejun Heo 	pcpu_nr_groups = ai->nr_groups;
12976563297cSTejun Heo 	pcpu_group_offsets = group_offsets;
12986563297cSTejun Heo 	pcpu_group_sizes = group_sizes;
1299fd1e8a1fSTejun Heo 	pcpu_unit_map = unit_map;
1300fb435d52STejun Heo 	pcpu_unit_offsets = unit_off;
13012f39e637STejun Heo 
13022f39e637STejun Heo 	/* determine basic parameters */
1303fd1e8a1fSTejun Heo 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1304d9b55eebSTejun Heo 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
13056563297cSTejun Heo 	pcpu_atom_size = ai->atom_size;
1306ce3141a2STejun Heo 	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1307ce3141a2STejun Heo 		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1308cafe8816STejun Heo 
1309d9b55eebSTejun Heo 	/*
1310d9b55eebSTejun Heo 	 * Allocate chunk slots.  The additional last slot is for
1311d9b55eebSTejun Heo 	 * empty chunks.
1312d9b55eebSTejun Heo 	 */
1313d9b55eebSTejun Heo 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1314fbf59bc9STejun Heo 	pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1315fbf59bc9STejun Heo 	for (i = 0; i < pcpu_nr_slots; i++)
1316fbf59bc9STejun Heo 		INIT_LIST_HEAD(&pcpu_slot[i]);
1317fbf59bc9STejun Heo 
1318edcb4639STejun Heo 	/*
1319edcb4639STejun Heo 	 * Initialize static chunk.  If reserved_size is zero, the
1320edcb4639STejun Heo 	 * static chunk covers static area + dynamic allocation area
1321edcb4639STejun Heo 	 * in the first chunk.  If reserved_size is not zero, it
1322edcb4639STejun Heo 	 * covers static area + reserved area (mostly used for module
1323edcb4639STejun Heo 	 * static percpu allocation).
1324edcb4639STejun Heo 	 */
13252441d15cSTejun Heo 	schunk = alloc_bootmem(pcpu_chunk_struct_size);
13262441d15cSTejun Heo 	INIT_LIST_HEAD(&schunk->list);
1327bba174f5STejun Heo 	schunk->base_addr = base_addr;
132861ace7faSTejun Heo 	schunk->map = smap;
132961ace7faSTejun Heo 	schunk->map_alloc = ARRAY_SIZE(smap);
133038a6be52STejun Heo 	schunk->immutable = true;
1331ce3141a2STejun Heo 	bitmap_fill(schunk->populated, pcpu_unit_pages);
1332edcb4639STejun Heo 
1333fd1e8a1fSTejun Heo 	if (ai->reserved_size) {
1334fd1e8a1fSTejun Heo 		schunk->free_size = ai->reserved_size;
1335ae9e6bc9STejun Heo 		pcpu_reserved_chunk = schunk;
1336fd1e8a1fSTejun Heo 		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1337edcb4639STejun Heo 	} else {
13382441d15cSTejun Heo 		schunk->free_size = dyn_size;
1339edcb4639STejun Heo 		dyn_size = 0;			/* dynamic area covered */
1340edcb4639STejun Heo 	}
13412441d15cSTejun Heo 	schunk->contig_hint = schunk->free_size;
1342fbf59bc9STejun Heo 
1343fd1e8a1fSTejun Heo 	schunk->map[schunk->map_used++] = -ai->static_size;
134461ace7faSTejun Heo 	if (schunk->free_size)
134561ace7faSTejun Heo 		schunk->map[schunk->map_used++] = schunk->free_size;
134661ace7faSTejun Heo 
1347edcb4639STejun Heo 	/* init dynamic chunk if necessary */
1348edcb4639STejun Heo 	if (dyn_size) {
1349ce3141a2STejun Heo 		dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1350edcb4639STejun Heo 		INIT_LIST_HEAD(&dchunk->list);
1351bba174f5STejun Heo 		dchunk->base_addr = base_addr;
1352edcb4639STejun Heo 		dchunk->map = dmap;
1353edcb4639STejun Heo 		dchunk->map_alloc = ARRAY_SIZE(dmap);
135438a6be52STejun Heo 		dchunk->immutable = true;
1355ce3141a2STejun Heo 		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1356edcb4639STejun Heo 
1357edcb4639STejun Heo 		dchunk->contig_hint = dchunk->free_size = dyn_size;
1358edcb4639STejun Heo 		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1359edcb4639STejun Heo 		dchunk->map[dchunk->map_used++] = dchunk->free_size;
1360edcb4639STejun Heo 	}
1361edcb4639STejun Heo 
13622441d15cSTejun Heo 	/* link the first chunk in */
1363ae9e6bc9STejun Heo 	pcpu_first_chunk = dchunk ?: schunk;
1364ae9e6bc9STejun Heo 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1365fbf59bc9STejun Heo 
1366fbf59bc9STejun Heo 	/* we're done */
1367bba174f5STejun Heo 	pcpu_base_addr = base_addr;
1368fb435d52STejun Heo 	return 0;
1369fbf59bc9STejun Heo }
137066c3a757STejun Heo 
1371bbddff05STejun Heo #ifdef CONFIG_SMP
1372bbddff05STejun Heo 
137317f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1374f58dc01bSTejun Heo 	[PCPU_FC_AUTO]	= "auto",
1375f58dc01bSTejun Heo 	[PCPU_FC_EMBED]	= "embed",
1376f58dc01bSTejun Heo 	[PCPU_FC_PAGE]	= "page",
1377f58dc01bSTejun Heo };
137866c3a757STejun Heo 
1379f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1380f58dc01bSTejun Heo 
1381f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str)
138266c3a757STejun Heo {
1383*5479c78aSCyrill Gorcunov 	if (!str)
1384*5479c78aSCyrill Gorcunov 		return -EINVAL;
1385*5479c78aSCyrill Gorcunov 
1386f58dc01bSTejun Heo 	if (0)
1387f58dc01bSTejun Heo 		/* nada */;
1388f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1389f58dc01bSTejun Heo 	else if (!strcmp(str, "embed"))
1390f58dc01bSTejun Heo 		pcpu_chosen_fc = PCPU_FC_EMBED;
1391f58dc01bSTejun Heo #endif
1392f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1393f58dc01bSTejun Heo 	else if (!strcmp(str, "page"))
1394f58dc01bSTejun Heo 		pcpu_chosen_fc = PCPU_FC_PAGE;
1395f58dc01bSTejun Heo #endif
1396f58dc01bSTejun Heo 	else
1397f58dc01bSTejun Heo 		pr_warning("PERCPU: unknown allocator %s specified\n", str);
139866c3a757STejun Heo 
1399f58dc01bSTejun Heo 	return 0;
140066c3a757STejun Heo }
1401f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup);
140266c3a757STejun Heo 
14033c9a024fSTejun Heo /*
14043c9a024fSTejun Heo  * pcpu_embed_first_chunk() is used by the generic percpu setup.
14053c9a024fSTejun Heo  * Build it if needed by the arch config or the generic setup is going
14063c9a024fSTejun Heo  * to be used.
14073c9a024fSTejun Heo  */
140808fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
140908fc4580STejun Heo 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
14103c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK
14113c9a024fSTejun Heo #endif
14123c9a024fSTejun Heo 
14133c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */
14143c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
14153c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK
14163c9a024fSTejun Heo #endif
14173c9a024fSTejun Heo 
14183c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */
14193c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
14203c9a024fSTejun Heo /**
1421fbf59bc9STejun Heo  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1422fbf59bc9STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
1423fbf59bc9STejun Heo  * @dyn_size: minimum free size for dynamic allocation in bytes
1424fbf59bc9STejun Heo  * @atom_size: allocation atom size
1425fbf59bc9STejun Heo  * @cpu_distance_fn: callback to determine distance between cpus, optional
1426fbf59bc9STejun Heo  *
1427fbf59bc9STejun Heo  * This function determines grouping of units, their mappings to cpus
1428fbf59bc9STejun Heo  * and other parameters considering needed percpu size, allocation
1429fbf59bc9STejun Heo  * atom size and distances between CPUs.
1430fbf59bc9STejun Heo  *
1431fbf59bc9STejun Heo  * Groups are always mutliples of atom size and CPUs which are of
1432fbf59bc9STejun Heo  * LOCAL_DISTANCE both ways are grouped together and share space for
1433fbf59bc9STejun Heo  * units in the same group.  The returned configuration is guaranteed
1434fbf59bc9STejun Heo  * to have CPUs on different nodes on different groups and >=75% usage
1435fbf59bc9STejun Heo  * of allocated virtual address space.
1436fbf59bc9STejun Heo  *
1437fbf59bc9STejun Heo  * RETURNS:
1438fbf59bc9STejun Heo  * On success, pointer to the new allocation_info is returned.  On
1439fbf59bc9STejun Heo  * failure, ERR_PTR value is returned.
1440fbf59bc9STejun Heo  */
1441fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1442fbf59bc9STejun Heo 				size_t reserved_size, size_t dyn_size,
1443fbf59bc9STejun Heo 				size_t atom_size,
1444fbf59bc9STejun Heo 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1445fbf59bc9STejun Heo {
1446fbf59bc9STejun Heo 	static int group_map[NR_CPUS] __initdata;
1447fbf59bc9STejun Heo 	static int group_cnt[NR_CPUS] __initdata;
1448fbf59bc9STejun Heo 	const size_t static_size = __per_cpu_end - __per_cpu_start;
1449fbf59bc9STejun Heo 	int nr_groups = 1, nr_units = 0;
1450fbf59bc9STejun Heo 	size_t size_sum, min_unit_size, alloc_size;
1451fbf59bc9STejun Heo 	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1452fbf59bc9STejun Heo 	int last_allocs, group, unit;
1453fbf59bc9STejun Heo 	unsigned int cpu, tcpu;
1454fbf59bc9STejun Heo 	struct pcpu_alloc_info *ai;
1455fbf59bc9STejun Heo 	unsigned int *cpu_map;
1456fbf59bc9STejun Heo 
1457fbf59bc9STejun Heo 	/* this function may be called multiple times */
1458fbf59bc9STejun Heo 	memset(group_map, 0, sizeof(group_map));
1459fbf59bc9STejun Heo 	memset(group_cnt, 0, sizeof(group_cnt));
1460fbf59bc9STejun Heo 
1461fbf59bc9STejun Heo 	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1462fbf59bc9STejun Heo 	size_sum = PFN_ALIGN(static_size + reserved_size +
1463fbf59bc9STejun Heo 			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1464fbf59bc9STejun Heo 	dyn_size = size_sum - static_size - reserved_size;
1465fbf59bc9STejun Heo 
1466fbf59bc9STejun Heo 	/*
1467fbf59bc9STejun Heo 	 * Determine min_unit_size, alloc_size and max_upa such that
1468fbf59bc9STejun Heo 	 * alloc_size is multiple of atom_size and is the smallest
146925985edcSLucas De Marchi 	 * which can accommodate 4k aligned segments which are equal to
1470fbf59bc9STejun Heo 	 * or larger than min_unit_size.
1471fbf59bc9STejun Heo 	 */
1472fbf59bc9STejun Heo 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1473fbf59bc9STejun Heo 
1474fbf59bc9STejun Heo 	alloc_size = roundup(min_unit_size, atom_size);
1475fbf59bc9STejun Heo 	upa = alloc_size / min_unit_size;
1476fbf59bc9STejun Heo 	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1477fbf59bc9STejun Heo 		upa--;
1478fbf59bc9STejun Heo 	max_upa = upa;
1479fbf59bc9STejun Heo 
1480fbf59bc9STejun Heo 	/* group cpus according to their proximity */
1481fbf59bc9STejun Heo 	for_each_possible_cpu(cpu) {
1482fbf59bc9STejun Heo 		group = 0;
1483fbf59bc9STejun Heo 	next_group:
1484fbf59bc9STejun Heo 		for_each_possible_cpu(tcpu) {
1485fbf59bc9STejun Heo 			if (cpu == tcpu)
1486fbf59bc9STejun Heo 				break;
1487fbf59bc9STejun Heo 			if (group_map[tcpu] == group && cpu_distance_fn &&
1488fbf59bc9STejun Heo 			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1489fbf59bc9STejun Heo 			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1490fbf59bc9STejun Heo 				group++;
1491fbf59bc9STejun Heo 				nr_groups = max(nr_groups, group + 1);
1492fbf59bc9STejun Heo 				goto next_group;
1493fbf59bc9STejun Heo 			}
1494fbf59bc9STejun Heo 		}
1495fbf59bc9STejun Heo 		group_map[cpu] = group;
1496fbf59bc9STejun Heo 		group_cnt[group]++;
1497fbf59bc9STejun Heo 	}
1498fbf59bc9STejun Heo 
1499fbf59bc9STejun Heo 	/*
1500fbf59bc9STejun Heo 	 * Expand unit size until address space usage goes over 75%
1501fbf59bc9STejun Heo 	 * and then as much as possible without using more address
1502fbf59bc9STejun Heo 	 * space.
1503fbf59bc9STejun Heo 	 */
1504fbf59bc9STejun Heo 	last_allocs = INT_MAX;
1505fbf59bc9STejun Heo 	for (upa = max_upa; upa; upa--) {
1506fbf59bc9STejun Heo 		int allocs = 0, wasted = 0;
1507fbf59bc9STejun Heo 
1508fbf59bc9STejun Heo 		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1509fbf59bc9STejun Heo 			continue;
1510fbf59bc9STejun Heo 
1511fbf59bc9STejun Heo 		for (group = 0; group < nr_groups; group++) {
1512fbf59bc9STejun Heo 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1513fbf59bc9STejun Heo 			allocs += this_allocs;
1514fbf59bc9STejun Heo 			wasted += this_allocs * upa - group_cnt[group];
1515fbf59bc9STejun Heo 		}
1516fbf59bc9STejun Heo 
1517fbf59bc9STejun Heo 		/*
1518fbf59bc9STejun Heo 		 * Don't accept if wastage is over 1/3.  The
1519fbf59bc9STejun Heo 		 * greater-than comparison ensures upa==1 always
1520fbf59bc9STejun Heo 		 * passes the following check.
1521fbf59bc9STejun Heo 		 */
1522fbf59bc9STejun Heo 		if (wasted > num_possible_cpus() / 3)
1523fbf59bc9STejun Heo 			continue;
1524fbf59bc9STejun Heo 
1525fbf59bc9STejun Heo 		/* and then don't consume more memory */
1526fbf59bc9STejun Heo 		if (allocs > last_allocs)
1527fbf59bc9STejun Heo 			break;
1528fbf59bc9STejun Heo 		last_allocs = allocs;
1529fbf59bc9STejun Heo 		best_upa = upa;
1530fbf59bc9STejun Heo 	}
1531fbf59bc9STejun Heo 	upa = best_upa;
1532fbf59bc9STejun Heo 
1533fbf59bc9STejun Heo 	/* allocate and fill alloc_info */
1534fbf59bc9STejun Heo 	for (group = 0; group < nr_groups; group++)
1535fbf59bc9STejun Heo 		nr_units += roundup(group_cnt[group], upa);
1536fbf59bc9STejun Heo 
1537fbf59bc9STejun Heo 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1538fbf59bc9STejun Heo 	if (!ai)
1539fbf59bc9STejun Heo 		return ERR_PTR(-ENOMEM);
1540fbf59bc9STejun Heo 	cpu_map = ai->groups[0].cpu_map;
1541fbf59bc9STejun Heo 
1542fbf59bc9STejun Heo 	for (group = 0; group < nr_groups; group++) {
1543fbf59bc9STejun Heo 		ai->groups[group].cpu_map = cpu_map;
1544fbf59bc9STejun Heo 		cpu_map += roundup(group_cnt[group], upa);
1545fbf59bc9STejun Heo 	}
1546fbf59bc9STejun Heo 
1547fbf59bc9STejun Heo 	ai->static_size = static_size;
1548fbf59bc9STejun Heo 	ai->reserved_size = reserved_size;
1549fbf59bc9STejun Heo 	ai->dyn_size = dyn_size;
1550fbf59bc9STejun Heo 	ai->unit_size = alloc_size / upa;
1551fbf59bc9STejun Heo 	ai->atom_size = atom_size;
1552fbf59bc9STejun Heo 	ai->alloc_size = alloc_size;
1553fbf59bc9STejun Heo 
1554fbf59bc9STejun Heo 	for (group = 0, unit = 0; group_cnt[group]; group++) {
1555fbf59bc9STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
1556fbf59bc9STejun Heo 
1557fbf59bc9STejun Heo 		/*
1558fbf59bc9STejun Heo 		 * Initialize base_offset as if all groups are located
1559fbf59bc9STejun Heo 		 * back-to-back.  The caller should update this to
1560fbf59bc9STejun Heo 		 * reflect actual allocation.
1561fbf59bc9STejun Heo 		 */
1562fbf59bc9STejun Heo 		gi->base_offset = unit * ai->unit_size;
1563fbf59bc9STejun Heo 
1564fbf59bc9STejun Heo 		for_each_possible_cpu(cpu)
1565fbf59bc9STejun Heo 			if (group_map[cpu] == group)
1566fbf59bc9STejun Heo 				gi->cpu_map[gi->nr_units++] = cpu;
1567fbf59bc9STejun Heo 		gi->nr_units = roundup(gi->nr_units, upa);
1568fbf59bc9STejun Heo 		unit += gi->nr_units;
1569fbf59bc9STejun Heo 	}
1570fbf59bc9STejun Heo 	BUG_ON(unit != nr_units);
1571fbf59bc9STejun Heo 
1572fbf59bc9STejun Heo 	return ai;
1573fbf59bc9STejun Heo }
15743c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1575fbf59bc9STejun Heo 
15763c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK)
157766c3a757STejun Heo /**
157866c3a757STejun Heo  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
157966c3a757STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
15804ba6ce25STejun Heo  * @dyn_size: minimum free size for dynamic allocation in bytes
1581c8826dd5STejun Heo  * @atom_size: allocation atom size
1582c8826dd5STejun Heo  * @cpu_distance_fn: callback to determine distance between cpus, optional
1583c8826dd5STejun Heo  * @alloc_fn: function to allocate percpu page
158425985edcSLucas De Marchi  * @free_fn: function to free percpu page
158566c3a757STejun Heo  *
158666c3a757STejun Heo  * This is a helper to ease setting up embedded first percpu chunk and
158766c3a757STejun Heo  * can be called where pcpu_setup_first_chunk() is expected.
158866c3a757STejun Heo  *
158966c3a757STejun Heo  * If this function is used to setup the first chunk, it is allocated
1590c8826dd5STejun Heo  * by calling @alloc_fn and used as-is without being mapped into
1591c8826dd5STejun Heo  * vmalloc area.  Allocations are always whole multiples of @atom_size
1592c8826dd5STejun Heo  * aligned to @atom_size.
1593c8826dd5STejun Heo  *
1594c8826dd5STejun Heo  * This enables the first chunk to piggy back on the linear physical
1595c8826dd5STejun Heo  * mapping which often uses larger page size.  Please note that this
1596c8826dd5STejun Heo  * can result in very sparse cpu->unit mapping on NUMA machines thus
1597c8826dd5STejun Heo  * requiring large vmalloc address space.  Don't use this allocator if
1598c8826dd5STejun Heo  * vmalloc space is not orders of magnitude larger than distances
1599c8826dd5STejun Heo  * between node memory addresses (ie. 32bit NUMA machines).
160066c3a757STejun Heo  *
16014ba6ce25STejun Heo  * @dyn_size specifies the minimum dynamic area size.
160266c3a757STejun Heo  *
160366c3a757STejun Heo  * If the needed size is smaller than the minimum or specified unit
1604c8826dd5STejun Heo  * size, the leftover is returned using @free_fn.
160566c3a757STejun Heo  *
160666c3a757STejun Heo  * RETURNS:
1607fb435d52STejun Heo  * 0 on success, -errno on failure.
160866c3a757STejun Heo  */
16094ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1610c8826dd5STejun Heo 				  size_t atom_size,
1611c8826dd5STejun Heo 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1612c8826dd5STejun Heo 				  pcpu_fc_alloc_fn_t alloc_fn,
1613c8826dd5STejun Heo 				  pcpu_fc_free_fn_t free_fn)
161466c3a757STejun Heo {
1615c8826dd5STejun Heo 	void *base = (void *)ULONG_MAX;
1616c8826dd5STejun Heo 	void **areas = NULL;
1617fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
16186ea529a2STejun Heo 	size_t size_sum, areas_size, max_distance;
1619c8826dd5STejun Heo 	int group, i, rc;
162066c3a757STejun Heo 
1621c8826dd5STejun Heo 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1622c8826dd5STejun Heo 				   cpu_distance_fn);
1623fd1e8a1fSTejun Heo 	if (IS_ERR(ai))
1624fd1e8a1fSTejun Heo 		return PTR_ERR(ai);
162566c3a757STejun Heo 
1626fd1e8a1fSTejun Heo 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1627c8826dd5STejun Heo 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
162866c3a757STejun Heo 
1629c8826dd5STejun Heo 	areas = alloc_bootmem_nopanic(areas_size);
1630c8826dd5STejun Heo 	if (!areas) {
1631fb435d52STejun Heo 		rc = -ENOMEM;
1632c8826dd5STejun Heo 		goto out_free;
1633fa8a7094STejun Heo 	}
163466c3a757STejun Heo 
1635c8826dd5STejun Heo 	/* allocate, copy and determine base address */
1636c8826dd5STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
1637c8826dd5STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
1638c8826dd5STejun Heo 		unsigned int cpu = NR_CPUS;
1639c8826dd5STejun Heo 		void *ptr;
164066c3a757STejun Heo 
1641c8826dd5STejun Heo 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1642c8826dd5STejun Heo 			cpu = gi->cpu_map[i];
1643c8826dd5STejun Heo 		BUG_ON(cpu == NR_CPUS);
1644c8826dd5STejun Heo 
1645c8826dd5STejun Heo 		/* allocate space for the whole group */
1646c8826dd5STejun Heo 		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1647c8826dd5STejun Heo 		if (!ptr) {
1648c8826dd5STejun Heo 			rc = -ENOMEM;
1649c8826dd5STejun Heo 			goto out_free_areas;
1650c8826dd5STejun Heo 		}
1651f528f0b8SCatalin Marinas 		/* kmemleak tracks the percpu allocations separately */
1652f528f0b8SCatalin Marinas 		kmemleak_free(ptr);
1653c8826dd5STejun Heo 		areas[group] = ptr;
1654c8826dd5STejun Heo 
1655c8826dd5STejun Heo 		base = min(ptr, base);
165642b64281STejun Heo 	}
165742b64281STejun Heo 
165842b64281STejun Heo 	/*
165942b64281STejun Heo 	 * Copy data and free unused parts.  This should happen after all
166042b64281STejun Heo 	 * allocations are complete; otherwise, we may end up with
166142b64281STejun Heo 	 * overlapping groups.
166242b64281STejun Heo 	 */
166342b64281STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
166442b64281STejun Heo 		struct pcpu_group_info *gi = &ai->groups[group];
166542b64281STejun Heo 		void *ptr = areas[group];
1666c8826dd5STejun Heo 
1667c8826dd5STejun Heo 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1668c8826dd5STejun Heo 			if (gi->cpu_map[i] == NR_CPUS) {
1669c8826dd5STejun Heo 				/* unused unit, free whole */
1670c8826dd5STejun Heo 				free_fn(ptr, ai->unit_size);
1671c8826dd5STejun Heo 				continue;
1672c8826dd5STejun Heo 			}
1673c8826dd5STejun Heo 			/* copy and return the unused part */
1674fd1e8a1fSTejun Heo 			memcpy(ptr, __per_cpu_load, ai->static_size);
1675c8826dd5STejun Heo 			free_fn(ptr + size_sum, ai->unit_size - size_sum);
1676c8826dd5STejun Heo 		}
167766c3a757STejun Heo 	}
167866c3a757STejun Heo 
1679c8826dd5STejun Heo 	/* base address is now known, determine group base offsets */
16806ea529a2STejun Heo 	max_distance = 0;
16816ea529a2STejun Heo 	for (group = 0; group < ai->nr_groups; group++) {
1682c8826dd5STejun Heo 		ai->groups[group].base_offset = areas[group] - base;
16831a0c3298STejun Heo 		max_distance = max_t(size_t, max_distance,
16841a0c3298STejun Heo 				     ai->groups[group].base_offset);
16856ea529a2STejun Heo 	}
16866ea529a2STejun Heo 	max_distance += ai->unit_size;
16876ea529a2STejun Heo 
16886ea529a2STejun Heo 	/* warn if maximum distance is further than 75% of vmalloc space */
16896ea529a2STejun Heo 	if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
16901a0c3298STejun Heo 		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1691787e5b06SMike Frysinger 			   "space 0x%lx\n", max_distance,
1692787e5b06SMike Frysinger 			   (unsigned long)(VMALLOC_END - VMALLOC_START));
16936ea529a2STejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
16946ea529a2STejun Heo 		/* and fail if we have fallback */
16956ea529a2STejun Heo 		rc = -EINVAL;
16966ea529a2STejun Heo 		goto out_free;
16976ea529a2STejun Heo #endif
16986ea529a2STejun Heo 	}
1699c8826dd5STejun Heo 
1700004018e2STejun Heo 	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1701fd1e8a1fSTejun Heo 		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1702fd1e8a1fSTejun Heo 		ai->dyn_size, ai->unit_size);
170366c3a757STejun Heo 
1704fb435d52STejun Heo 	rc = pcpu_setup_first_chunk(ai, base);
1705c8826dd5STejun Heo 	goto out_free;
1706c8826dd5STejun Heo 
1707c8826dd5STejun Heo out_free_areas:
1708c8826dd5STejun Heo 	for (group = 0; group < ai->nr_groups; group++)
1709c8826dd5STejun Heo 		free_fn(areas[group],
1710c8826dd5STejun Heo 			ai->groups[group].nr_units * ai->unit_size);
1711c8826dd5STejun Heo out_free:
1712fd1e8a1fSTejun Heo 	pcpu_free_alloc_info(ai);
1713c8826dd5STejun Heo 	if (areas)
1714c8826dd5STejun Heo 		free_bootmem(__pa(areas), areas_size);
1715fb435d52STejun Heo 	return rc;
1716d4b95f80STejun Heo }
17173c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */
1718d4b95f80STejun Heo 
17193c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK
1720d4b95f80STejun Heo /**
172100ae4064STejun Heo  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1722d4b95f80STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
1723d4b95f80STejun Heo  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
172425985edcSLucas De Marchi  * @free_fn: function to free percpu page, always called with PAGE_SIZE
1725d4b95f80STejun Heo  * @populate_pte_fn: function to populate pte
1726d4b95f80STejun Heo  *
172700ae4064STejun Heo  * This is a helper to ease setting up page-remapped first percpu
172800ae4064STejun Heo  * chunk and can be called where pcpu_setup_first_chunk() is expected.
1729d4b95f80STejun Heo  *
1730d4b95f80STejun Heo  * This is the basic allocator.  Static percpu area is allocated
1731d4b95f80STejun Heo  * page-by-page into vmalloc area.
1732d4b95f80STejun Heo  *
1733d4b95f80STejun Heo  * RETURNS:
1734fb435d52STejun Heo  * 0 on success, -errno on failure.
1735d4b95f80STejun Heo  */
1736fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size,
1737d4b95f80STejun Heo 				 pcpu_fc_alloc_fn_t alloc_fn,
1738d4b95f80STejun Heo 				 pcpu_fc_free_fn_t free_fn,
1739d4b95f80STejun Heo 				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1740d4b95f80STejun Heo {
17418f05a6a6STejun Heo 	static struct vm_struct vm;
1742fd1e8a1fSTejun Heo 	struct pcpu_alloc_info *ai;
174300ae4064STejun Heo 	char psize_str[16];
1744ce3141a2STejun Heo 	int unit_pages;
1745d4b95f80STejun Heo 	size_t pages_size;
1746ce3141a2STejun Heo 	struct page **pages;
1747fb435d52STejun Heo 	int unit, i, j, rc;
1748d4b95f80STejun Heo 
174900ae4064STejun Heo 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
175000ae4064STejun Heo 
17514ba6ce25STejun Heo 	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1752fd1e8a1fSTejun Heo 	if (IS_ERR(ai))
1753fd1e8a1fSTejun Heo 		return PTR_ERR(ai);
1754fd1e8a1fSTejun Heo 	BUG_ON(ai->nr_groups != 1);
1755fd1e8a1fSTejun Heo 	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1756fd1e8a1fSTejun Heo 
1757fd1e8a1fSTejun Heo 	unit_pages = ai->unit_size >> PAGE_SHIFT;
1758d4b95f80STejun Heo 
1759d4b95f80STejun Heo 	/* unaligned allocations can't be freed, round up to page size */
1760fd1e8a1fSTejun Heo 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1761fd1e8a1fSTejun Heo 			       sizeof(pages[0]));
1762ce3141a2STejun Heo 	pages = alloc_bootmem(pages_size);
1763d4b95f80STejun Heo 
17648f05a6a6STejun Heo 	/* allocate pages */
1765d4b95f80STejun Heo 	j = 0;
1766fd1e8a1fSTejun Heo 	for (unit = 0; unit < num_possible_cpus(); unit++)
1767ce3141a2STejun Heo 		for (i = 0; i < unit_pages; i++) {
1768fd1e8a1fSTejun Heo 			unsigned int cpu = ai->groups[0].cpu_map[unit];
1769d4b95f80STejun Heo 			void *ptr;
1770d4b95f80STejun Heo 
17713cbc8565STejun Heo 			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1772d4b95f80STejun Heo 			if (!ptr) {
177300ae4064STejun Heo 				pr_warning("PERCPU: failed to allocate %s page "
177400ae4064STejun Heo 					   "for cpu%u\n", psize_str, cpu);
1775d4b95f80STejun Heo 				goto enomem;
1776d4b95f80STejun Heo 			}
1777f528f0b8SCatalin Marinas 			/* kmemleak tracks the percpu allocations separately */
1778f528f0b8SCatalin Marinas 			kmemleak_free(ptr);
1779ce3141a2STejun Heo 			pages[j++] = virt_to_page(ptr);
1780d4b95f80STejun Heo 		}
1781d4b95f80STejun Heo 
17828f05a6a6STejun Heo 	/* allocate vm area, map the pages and copy static data */
17838f05a6a6STejun Heo 	vm.flags = VM_ALLOC;
1784fd1e8a1fSTejun Heo 	vm.size = num_possible_cpus() * ai->unit_size;
17858f05a6a6STejun Heo 	vm_area_register_early(&vm, PAGE_SIZE);
17868f05a6a6STejun Heo 
1787fd1e8a1fSTejun Heo 	for (unit = 0; unit < num_possible_cpus(); unit++) {
17881d9d3257STejun Heo 		unsigned long unit_addr =
1789fd1e8a1fSTejun Heo 			(unsigned long)vm.addr + unit * ai->unit_size;
17908f05a6a6STejun Heo 
1791ce3141a2STejun Heo 		for (i = 0; i < unit_pages; i++)
17928f05a6a6STejun Heo 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
17938f05a6a6STejun Heo 
17948f05a6a6STejun Heo 		/* pte already populated, the following shouldn't fail */
1795fb435d52STejun Heo 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1796ce3141a2STejun Heo 				      unit_pages);
1797fb435d52STejun Heo 		if (rc < 0)
1798fb435d52STejun Heo 			panic("failed to map percpu area, err=%d\n", rc);
17998f05a6a6STejun Heo 
18008f05a6a6STejun Heo 		/*
18018f05a6a6STejun Heo 		 * FIXME: Archs with virtual cache should flush local
18028f05a6a6STejun Heo 		 * cache for the linear mapping here - something
18038f05a6a6STejun Heo 		 * equivalent to flush_cache_vmap() on the local cpu.
18048f05a6a6STejun Heo 		 * flush_cache_vmap() can't be used as most supporting
18058f05a6a6STejun Heo 		 * data structures are not set up yet.
18068f05a6a6STejun Heo 		 */
18078f05a6a6STejun Heo 
18088f05a6a6STejun Heo 		/* copy static data */
1809fd1e8a1fSTejun Heo 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
181066c3a757STejun Heo 	}
181166c3a757STejun Heo 
181266c3a757STejun Heo 	/* we're ready, commit */
18131d9d3257STejun Heo 	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1814fd1e8a1fSTejun Heo 		unit_pages, psize_str, vm.addr, ai->static_size,
1815fd1e8a1fSTejun Heo 		ai->reserved_size, ai->dyn_size);
181666c3a757STejun Heo 
1817fb435d52STejun Heo 	rc = pcpu_setup_first_chunk(ai, vm.addr);
1818d4b95f80STejun Heo 	goto out_free_ar;
1819d4b95f80STejun Heo 
1820d4b95f80STejun Heo enomem:
1821d4b95f80STejun Heo 	while (--j >= 0)
1822ce3141a2STejun Heo 		free_fn(page_address(pages[j]), PAGE_SIZE);
1823fb435d52STejun Heo 	rc = -ENOMEM;
1824d4b95f80STejun Heo out_free_ar:
1825ce3141a2STejun Heo 	free_bootmem(__pa(pages), pages_size);
1826fd1e8a1fSTejun Heo 	pcpu_free_alloc_info(ai);
1827fb435d52STejun Heo 	return rc;
182866c3a757STejun Heo }
18293c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */
1830d4b95f80STejun Heo 
1831bbddff05STejun Heo #ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
18328c4bfc6eSTejun Heo /*
1833bbddff05STejun Heo  * Generic SMP percpu area setup.
1834e74e3962STejun Heo  *
1835e74e3962STejun Heo  * The embedding helper is used because its behavior closely resembles
1836e74e3962STejun Heo  * the original non-dynamic generic percpu area setup.  This is
1837e74e3962STejun Heo  * important because many archs have addressing restrictions and might
1838e74e3962STejun Heo  * fail if the percpu area is located far away from the previous
1839e74e3962STejun Heo  * location.  As an added bonus, in non-NUMA cases, embedding is
1840e74e3962STejun Heo  * generally a good idea TLB-wise because percpu area can piggy back
1841e74e3962STejun Heo  * on the physical linear memory mapping which uses large page
1842e74e3962STejun Heo  * mappings on applicable archs.
1843e74e3962STejun Heo  */
1844e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1845e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset);
1846e74e3962STejun Heo 
1847c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1848c8826dd5STejun Heo 				       size_t align)
1849c8826dd5STejun Heo {
1850c8826dd5STejun Heo 	return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1851c8826dd5STejun Heo }
1852c8826dd5STejun Heo 
1853c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1854c8826dd5STejun Heo {
1855c8826dd5STejun Heo 	free_bootmem(__pa(ptr), size);
1856c8826dd5STejun Heo }
1857c8826dd5STejun Heo 
1858e74e3962STejun Heo void __init setup_per_cpu_areas(void)
1859e74e3962STejun Heo {
1860e74e3962STejun Heo 	unsigned long delta;
1861e74e3962STejun Heo 	unsigned int cpu;
1862fb435d52STejun Heo 	int rc;
1863e74e3962STejun Heo 
1864e74e3962STejun Heo 	/*
1865e74e3962STejun Heo 	 * Always reserve area for module percpu variables.  That's
1866e74e3962STejun Heo 	 * what the legacy allocator did.
1867e74e3962STejun Heo 	 */
1868fb435d52STejun Heo 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1869c8826dd5STejun Heo 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1870c8826dd5STejun Heo 				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1871fb435d52STejun Heo 	if (rc < 0)
1872bbddff05STejun Heo 		panic("Failed to initialize percpu areas.");
1873e74e3962STejun Heo 
1874e74e3962STejun Heo 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1875e74e3962STejun Heo 	for_each_possible_cpu(cpu)
1876fb435d52STejun Heo 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1877e74e3962STejun Heo }
1878e74e3962STejun Heo #endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1879099a19d9STejun Heo 
1880bbddff05STejun Heo #else	/* CONFIG_SMP */
1881bbddff05STejun Heo 
1882bbddff05STejun Heo /*
1883bbddff05STejun Heo  * UP percpu area setup.
1884bbddff05STejun Heo  *
1885bbddff05STejun Heo  * UP always uses km-based percpu allocator with identity mapping.
1886bbddff05STejun Heo  * Static percpu variables are indistinguishable from the usual static
1887bbddff05STejun Heo  * variables and don't require any special preparation.
1888bbddff05STejun Heo  */
1889bbddff05STejun Heo void __init setup_per_cpu_areas(void)
1890bbddff05STejun Heo {
1891bbddff05STejun Heo 	const size_t unit_size =
1892bbddff05STejun Heo 		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1893bbddff05STejun Heo 					 PERCPU_DYNAMIC_RESERVE));
1894bbddff05STejun Heo 	struct pcpu_alloc_info *ai;
1895bbddff05STejun Heo 	void *fc;
1896bbddff05STejun Heo 
1897bbddff05STejun Heo 	ai = pcpu_alloc_alloc_info(1, 1);
1898bbddff05STejun Heo 	fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1899bbddff05STejun Heo 	if (!ai || !fc)
1900bbddff05STejun Heo 		panic("Failed to allocate memory for percpu areas.");
1901100d13c3SCatalin Marinas 	/* kmemleak tracks the percpu allocations separately */
1902100d13c3SCatalin Marinas 	kmemleak_free(fc);
1903bbddff05STejun Heo 
1904bbddff05STejun Heo 	ai->dyn_size = unit_size;
1905bbddff05STejun Heo 	ai->unit_size = unit_size;
1906bbddff05STejun Heo 	ai->atom_size = unit_size;
1907bbddff05STejun Heo 	ai->alloc_size = unit_size;
1908bbddff05STejun Heo 	ai->groups[0].nr_units = 1;
1909bbddff05STejun Heo 	ai->groups[0].cpu_map[0] = 0;
1910bbddff05STejun Heo 
1911bbddff05STejun Heo 	if (pcpu_setup_first_chunk(ai, fc) < 0)
1912bbddff05STejun Heo 		panic("Failed to initialize percpu areas.");
1913bbddff05STejun Heo }
1914bbddff05STejun Heo 
1915bbddff05STejun Heo #endif	/* CONFIG_SMP */
1916bbddff05STejun Heo 
1917099a19d9STejun Heo /*
1918099a19d9STejun Heo  * First and reserved chunks are initialized with temporary allocation
1919099a19d9STejun Heo  * map in initdata so that they can be used before slab is online.
1920099a19d9STejun Heo  * This function is called after slab is brought up and replaces those
1921099a19d9STejun Heo  * with properly allocated maps.
1922099a19d9STejun Heo  */
1923099a19d9STejun Heo void __init percpu_init_late(void)
1924099a19d9STejun Heo {
1925099a19d9STejun Heo 	struct pcpu_chunk *target_chunks[] =
1926099a19d9STejun Heo 		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1927099a19d9STejun Heo 	struct pcpu_chunk *chunk;
1928099a19d9STejun Heo 	unsigned long flags;
1929099a19d9STejun Heo 	int i;
1930099a19d9STejun Heo 
1931099a19d9STejun Heo 	for (i = 0; (chunk = target_chunks[i]); i++) {
1932099a19d9STejun Heo 		int *map;
1933099a19d9STejun Heo 		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1934099a19d9STejun Heo 
1935099a19d9STejun Heo 		BUILD_BUG_ON(size > PAGE_SIZE);
1936099a19d9STejun Heo 
193790459ce0SBob Liu 		map = pcpu_mem_zalloc(size);
1938099a19d9STejun Heo 		BUG_ON(!map);
1939099a19d9STejun Heo 
1940099a19d9STejun Heo 		spin_lock_irqsave(&pcpu_lock, flags);
1941099a19d9STejun Heo 		memcpy(map, chunk->map, size);
1942099a19d9STejun Heo 		chunk->map = map;
1943099a19d9STejun Heo 		spin_unlock_irqrestore(&pcpu_lock, flags);
1944099a19d9STejun Heo 	}
1945099a19d9STejun Heo }
1946