xref: /linux/mm/percpu.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/percpu.c - percpu memory allocator
4  *
5  * Copyright (C) 2009		SUSE Linux Products GmbH
6  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
7  *
8  * Copyright (C) 2017		Facebook Inc.
9  * Copyright (C) 2017		Dennis Zhou <dennis@kernel.org>
10  *
11  * The percpu allocator handles both static and dynamic areas.  Percpu
12  * areas are allocated in chunks which are divided into units.  There is
13  * a 1-to-1 mapping for units to possible cpus.  These units are grouped
14  * based on NUMA properties of the machine.
15  *
16  *  c0                           c1                         c2
17  *  -------------------          -------------------        ------------
18  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
19  *  -------------------  ......  -------------------  ....  ------------
20  *
21  * Allocation is done by offsets into a unit's address space.  Ie., an
22  * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23  * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
24  * and even sparse.  Access is handled by configuring percpu base
25  * registers according to the cpu to unit mappings and offsetting the
26  * base address using pcpu_unit_size.
27  *
28  * There is special consideration for the first chunk which must handle
29  * the static percpu variables in the kernel image as allocation services
30  * are not online yet.  In short, the first chunk is structured like so:
31  *
32  *                  <Static | [Reserved] | Dynamic>
33  *
34  * The static data is copied from the original section managed by the
35  * linker.  The reserved section, if non-zero, primarily manages static
36  * percpu variables from kernel modules.  Finally, the dynamic section
37  * takes care of normal allocations.
38  *
39  * The allocator organizes chunks into lists according to free size and
40  * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41  * flag should be passed.  All memcg-aware allocations are sharing one set
42  * of chunks and all unaccounted allocations and allocations performed
43  * by processes belonging to the root memory cgroup are using the second set.
44  *
45  * The allocator tries to allocate from the fullest chunk first. Each chunk
46  * is managed by a bitmap with metadata blocks.  The allocation map is updated
47  * on every allocation and free to reflect the current state while the boundary
48  * map is only updated on allocation.  Each metadata block contains
49  * information to help mitigate the need to iterate over large portions
50  * of the bitmap.  The reverse mapping from page to chunk is stored in
51  * the page's index.  Lastly, units are lazily backed and grow in unison.
52  *
53  * There is a unique conversion that goes on here between bytes and bits.
54  * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
55  * tracks the number of pages it is responsible for in nr_pages.  Helper
56  * functions are used to convert from between the bytes, bits, and blocks.
57  * All hints are managed in bits unless explicitly stated.
58  *
59  * To use this allocator, arch code should do the following:
60  *
61  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62  *   regular address to percpu pointer and back if they need to be
63  *   different from the default
64  *
65  * - use pcpu_setup_first_chunk() during percpu area initialization to
66  *   setup the first chunk containing the kernel static percpu area
67  */
68 
69 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70 
71 #include <linux/bitmap.h>
72 #include <linux/cpumask.h>
73 #include <linux/memblock.h>
74 #include <linux/err.h>
75 #include <linux/list.h>
76 #include <linux/log2.h>
77 #include <linux/mm.h>
78 #include <linux/module.h>
79 #include <linux/mutex.h>
80 #include <linux/percpu.h>
81 #include <linux/pfn.h>
82 #include <linux/slab.h>
83 #include <linux/spinlock.h>
84 #include <linux/vmalloc.h>
85 #include <linux/workqueue.h>
86 #include <linux/kmemleak.h>
87 #include <linux/sched.h>
88 #include <linux/sched/mm.h>
89 #include <linux/memcontrol.h>
90 
91 #include <asm/cacheflush.h>
92 #include <asm/sections.h>
93 #include <asm/tlbflush.h>
94 #include <asm/io.h>
95 
96 #define CREATE_TRACE_POINTS
97 #include <trace/events/percpu.h>
98 
99 #include "percpu-internal.h"
100 
101 /*
102  * The slots are sorted by the size of the biggest continuous free area.
103  * 1-31 bytes share the same slot.
104  */
105 #define PCPU_SLOT_BASE_SHIFT		5
106 /* chunks in slots below this are subject to being sidelined on failed alloc */
107 #define PCPU_SLOT_FAIL_THRESHOLD	3
108 
109 #define PCPU_EMPTY_POP_PAGES_LOW	2
110 #define PCPU_EMPTY_POP_PAGES_HIGH	4
111 
112 #ifdef CONFIG_SMP
113 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
114 #ifndef __addr_to_pcpu_ptr
115 #define __addr_to_pcpu_ptr(addr)					\
116 	(void __percpu *)((unsigned long)(addr) -			\
117 			  (unsigned long)pcpu_base_addr	+		\
118 			  (unsigned long)__per_cpu_start)
119 #endif
120 #ifndef __pcpu_ptr_to_addr
121 #define __pcpu_ptr_to_addr(ptr)						\
122 	(void __force *)((unsigned long)(ptr) +				\
123 			 (unsigned long)pcpu_base_addr -		\
124 			 (unsigned long)__per_cpu_start)
125 #endif
126 #else	/* CONFIG_SMP */
127 /* on UP, it's always identity mapped */
128 #define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
129 #define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
130 #endif	/* CONFIG_SMP */
131 
132 static int pcpu_unit_pages __ro_after_init;
133 static int pcpu_unit_size __ro_after_init;
134 static int pcpu_nr_units __ro_after_init;
135 static int pcpu_atom_size __ro_after_init;
136 int pcpu_nr_slots __ro_after_init;
137 static int pcpu_free_slot __ro_after_init;
138 int pcpu_sidelined_slot __ro_after_init;
139 int pcpu_to_depopulate_slot __ro_after_init;
140 static size_t pcpu_chunk_struct_size __ro_after_init;
141 
142 /* cpus with the lowest and highest unit addresses */
143 static unsigned int pcpu_low_unit_cpu __ro_after_init;
144 static unsigned int pcpu_high_unit_cpu __ro_after_init;
145 
146 /* the address of the first chunk which starts with the kernel static area */
147 void *pcpu_base_addr __ro_after_init;
148 
149 static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
150 const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
151 
152 /* group information, used for vm allocation */
153 static int pcpu_nr_groups __ro_after_init;
154 static const unsigned long *pcpu_group_offsets __ro_after_init;
155 static const size_t *pcpu_group_sizes __ro_after_init;
156 
157 /*
158  * The first chunk which always exists.  Note that unlike other
159  * chunks, this one can be allocated and mapped in several different
160  * ways and thus often doesn't live in the vmalloc area.
161  */
162 struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
163 
164 /*
165  * Optional reserved chunk.  This chunk reserves part of the first
166  * chunk and serves it for reserved allocations.  When the reserved
167  * region doesn't exist, the following variable is NULL.
168  */
169 struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
170 
171 DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
172 static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
173 
174 struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
175 
176 /*
177  * The number of empty populated pages, protected by pcpu_lock.
178  * The reserved chunk doesn't contribute to the count.
179  */
180 int pcpu_nr_empty_pop_pages;
181 
182 /*
183  * The number of populated pages in use by the allocator, protected by
184  * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
185  * allocated/deallocated, it is allocated/deallocated in all units of a chunk
186  * and increments/decrements this count by 1).
187  */
188 static unsigned long pcpu_nr_populated;
189 
190 /*
191  * Balance work is used to populate or destroy chunks asynchronously.  We
192  * try to keep the number of populated free pages between
193  * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
194  * empty chunk.
195  */
196 static void pcpu_balance_workfn(struct work_struct *work);
197 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
198 static bool pcpu_async_enabled __read_mostly;
199 static bool pcpu_atomic_alloc_failed;
200 
201 static void pcpu_schedule_balance_work(void)
202 {
203 	if (pcpu_async_enabled)
204 		schedule_work(&pcpu_balance_work);
205 }
206 
207 /**
208  * pcpu_addr_in_chunk - check if the address is served from this chunk
209  * @chunk: chunk of interest
210  * @addr: percpu address
211  *
212  * RETURNS:
213  * True if the address is served from this chunk.
214  */
215 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
216 {
217 	void *start_addr, *end_addr;
218 
219 	if (!chunk)
220 		return false;
221 
222 	start_addr = chunk->base_addr + chunk->start_offset;
223 	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
224 		   chunk->end_offset;
225 
226 	return addr >= start_addr && addr < end_addr;
227 }
228 
229 static int __pcpu_size_to_slot(int size)
230 {
231 	int highbit = fls(size);	/* size is in bytes */
232 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
233 }
234 
235 static int pcpu_size_to_slot(int size)
236 {
237 	if (size == pcpu_unit_size)
238 		return pcpu_free_slot;
239 	return __pcpu_size_to_slot(size);
240 }
241 
242 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
243 {
244 	const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
245 
246 	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
247 	    chunk_md->contig_hint == 0)
248 		return 0;
249 
250 	return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
251 }
252 
253 /* set the pointer to a chunk in a page struct */
254 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
255 {
256 	page->private = (unsigned long)pcpu;
257 }
258 
259 /* obtain pointer to a chunk from a page struct */
260 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
261 {
262 	return (struct pcpu_chunk *)page->private;
263 }
264 
265 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
266 {
267 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
268 }
269 
270 static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
271 {
272 	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
273 }
274 
275 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
276 				     unsigned int cpu, int page_idx)
277 {
278 	return (unsigned long)chunk->base_addr +
279 	       pcpu_unit_page_offset(cpu, page_idx);
280 }
281 
282 /*
283  * The following are helper functions to help access bitmaps and convert
284  * between bitmap offsets to address offsets.
285  */
286 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
287 {
288 	return chunk->alloc_map +
289 	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
290 }
291 
292 static unsigned long pcpu_off_to_block_index(int off)
293 {
294 	return off / PCPU_BITMAP_BLOCK_BITS;
295 }
296 
297 static unsigned long pcpu_off_to_block_off(int off)
298 {
299 	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
300 }
301 
302 static unsigned long pcpu_block_off_to_off(int index, int off)
303 {
304 	return index * PCPU_BITMAP_BLOCK_BITS + off;
305 }
306 
307 /**
308  * pcpu_check_block_hint - check against the contig hint
309  * @block: block of interest
310  * @bits: size of allocation
311  * @align: alignment of area (max PAGE_SIZE)
312  *
313  * Check to see if the allocation can fit in the block's contig hint.
314  * Note, a chunk uses the same hints as a block so this can also check against
315  * the chunk's contig hint.
316  */
317 static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
318 				  size_t align)
319 {
320 	int bit_off = ALIGN(block->contig_hint_start, align) -
321 		block->contig_hint_start;
322 
323 	return bit_off + bits <= block->contig_hint;
324 }
325 
326 /*
327  * pcpu_next_hint - determine which hint to use
328  * @block: block of interest
329  * @alloc_bits: size of allocation
330  *
331  * This determines if we should scan based on the scan_hint or first_free.
332  * In general, we want to scan from first_free to fulfill allocations by
333  * first fit.  However, if we know a scan_hint at position scan_hint_start
334  * cannot fulfill an allocation, we can begin scanning from there knowing
335  * the contig_hint will be our fallback.
336  */
337 static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
338 {
339 	/*
340 	 * The three conditions below determine if we can skip past the
341 	 * scan_hint.  First, does the scan hint exist.  Second, is the
342 	 * contig_hint after the scan_hint (possibly not true iff
343 	 * contig_hint == scan_hint).  Third, is the allocation request
344 	 * larger than the scan_hint.
345 	 */
346 	if (block->scan_hint &&
347 	    block->contig_hint_start > block->scan_hint_start &&
348 	    alloc_bits > block->scan_hint)
349 		return block->scan_hint_start + block->scan_hint;
350 
351 	return block->first_free;
352 }
353 
354 /**
355  * pcpu_next_md_free_region - finds the next hint free area
356  * @chunk: chunk of interest
357  * @bit_off: chunk offset
358  * @bits: size of free area
359  *
360  * Helper function for pcpu_for_each_md_free_region.  It checks
361  * block->contig_hint and performs aggregation across blocks to find the
362  * next hint.  It modifies bit_off and bits in-place to be consumed in the
363  * loop.
364  */
365 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
366 				     int *bits)
367 {
368 	int i = pcpu_off_to_block_index(*bit_off);
369 	int block_off = pcpu_off_to_block_off(*bit_off);
370 	struct pcpu_block_md *block;
371 
372 	*bits = 0;
373 	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
374 	     block++, i++) {
375 		/* handles contig area across blocks */
376 		if (*bits) {
377 			*bits += block->left_free;
378 			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
379 				continue;
380 			return;
381 		}
382 
383 		/*
384 		 * This checks three things.  First is there a contig_hint to
385 		 * check.  Second, have we checked this hint before by
386 		 * comparing the block_off.  Third, is this the same as the
387 		 * right contig hint.  In the last case, it spills over into
388 		 * the next block and should be handled by the contig area
389 		 * across blocks code.
390 		 */
391 		*bits = block->contig_hint;
392 		if (*bits && block->contig_hint_start >= block_off &&
393 		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
394 			*bit_off = pcpu_block_off_to_off(i,
395 					block->contig_hint_start);
396 			return;
397 		}
398 		/* reset to satisfy the second predicate above */
399 		block_off = 0;
400 
401 		*bits = block->right_free;
402 		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
403 	}
404 }
405 
406 /**
407  * pcpu_next_fit_region - finds fit areas for a given allocation request
408  * @chunk: chunk of interest
409  * @alloc_bits: size of allocation
410  * @align: alignment of area (max PAGE_SIZE)
411  * @bit_off: chunk offset
412  * @bits: size of free area
413  *
414  * Finds the next free region that is viable for use with a given size and
415  * alignment.  This only returns if there is a valid area to be used for this
416  * allocation.  block->first_free is returned if the allocation request fits
417  * within the block to see if the request can be fulfilled prior to the contig
418  * hint.
419  */
420 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
421 				 int align, int *bit_off, int *bits)
422 {
423 	int i = pcpu_off_to_block_index(*bit_off);
424 	int block_off = pcpu_off_to_block_off(*bit_off);
425 	struct pcpu_block_md *block;
426 
427 	*bits = 0;
428 	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
429 	     block++, i++) {
430 		/* handles contig area across blocks */
431 		if (*bits) {
432 			*bits += block->left_free;
433 			if (*bits >= alloc_bits)
434 				return;
435 			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
436 				continue;
437 		}
438 
439 		/* check block->contig_hint */
440 		*bits = ALIGN(block->contig_hint_start, align) -
441 			block->contig_hint_start;
442 		/*
443 		 * This uses the block offset to determine if this has been
444 		 * checked in the prior iteration.
445 		 */
446 		if (block->contig_hint &&
447 		    block->contig_hint_start >= block_off &&
448 		    block->contig_hint >= *bits + alloc_bits) {
449 			int start = pcpu_next_hint(block, alloc_bits);
450 
451 			*bits += alloc_bits + block->contig_hint_start -
452 				 start;
453 			*bit_off = pcpu_block_off_to_off(i, start);
454 			return;
455 		}
456 		/* reset to satisfy the second predicate above */
457 		block_off = 0;
458 
459 		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
460 				 align);
461 		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
462 		*bit_off = pcpu_block_off_to_off(i, *bit_off);
463 		if (*bits >= alloc_bits)
464 			return;
465 	}
466 
467 	/* no valid offsets were found - fail condition */
468 	*bit_off = pcpu_chunk_map_bits(chunk);
469 }
470 
471 /*
472  * Metadata free area iterators.  These perform aggregation of free areas
473  * based on the metadata blocks and return the offset @bit_off and size in
474  * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
475  * a fit is found for the allocation request.
476  */
477 #define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
478 	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
479 	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
480 	     (bit_off) += (bits) + 1,					\
481 	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
482 
483 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
484 	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
485 				  &(bits));				      \
486 	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
487 	     (bit_off) += (bits),					      \
488 	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
489 				  &(bits)))
490 
491 /**
492  * pcpu_mem_zalloc - allocate memory
493  * @size: bytes to allocate
494  * @gfp: allocation flags
495  *
496  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
497  * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
498  * This is to facilitate passing through whitelisted flags.  The
499  * returned memory is always zeroed.
500  *
501  * RETURNS:
502  * Pointer to the allocated area on success, NULL on failure.
503  */
504 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
505 {
506 	if (WARN_ON_ONCE(!slab_is_available()))
507 		return NULL;
508 
509 	if (size <= PAGE_SIZE)
510 		return kzalloc(size, gfp);
511 	else
512 		return __vmalloc(size, gfp | __GFP_ZERO);
513 }
514 
515 /**
516  * pcpu_mem_free - free memory
517  * @ptr: memory to free
518  *
519  * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
520  */
521 static void pcpu_mem_free(void *ptr)
522 {
523 	kvfree(ptr);
524 }
525 
526 static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
527 			      bool move_front)
528 {
529 	if (chunk != pcpu_reserved_chunk) {
530 		if (move_front)
531 			list_move(&chunk->list, &pcpu_chunk_lists[slot]);
532 		else
533 			list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
534 	}
535 }
536 
537 static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
538 {
539 	__pcpu_chunk_move(chunk, slot, true);
540 }
541 
542 /**
543  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
544  * @chunk: chunk of interest
545  * @oslot: the previous slot it was on
546  *
547  * This function is called after an allocation or free changed @chunk.
548  * New slot according to the changed state is determined and @chunk is
549  * moved to the slot.  Note that the reserved chunk is never put on
550  * chunk slots.
551  *
552  * CONTEXT:
553  * pcpu_lock.
554  */
555 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
556 {
557 	int nslot = pcpu_chunk_slot(chunk);
558 
559 	/* leave isolated chunks in-place */
560 	if (chunk->isolated)
561 		return;
562 
563 	if (oslot != nslot)
564 		__pcpu_chunk_move(chunk, nslot, oslot < nslot);
565 }
566 
567 static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
568 {
569 	lockdep_assert_held(&pcpu_lock);
570 
571 	if (!chunk->isolated) {
572 		chunk->isolated = true;
573 		pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
574 	}
575 	list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
576 }
577 
578 static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
579 {
580 	lockdep_assert_held(&pcpu_lock);
581 
582 	if (chunk->isolated) {
583 		chunk->isolated = false;
584 		pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
585 		pcpu_chunk_relocate(chunk, -1);
586 	}
587 }
588 
589 /*
590  * pcpu_update_empty_pages - update empty page counters
591  * @chunk: chunk of interest
592  * @nr: nr of empty pages
593  *
594  * This is used to keep track of the empty pages now based on the premise
595  * a md_block covers a page.  The hint update functions recognize if a block
596  * is made full or broken to calculate deltas for keeping track of free pages.
597  */
598 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
599 {
600 	chunk->nr_empty_pop_pages += nr;
601 	if (chunk != pcpu_reserved_chunk && !chunk->isolated)
602 		pcpu_nr_empty_pop_pages += nr;
603 }
604 
605 /*
606  * pcpu_region_overlap - determines if two regions overlap
607  * @a: start of first region, inclusive
608  * @b: end of first region, exclusive
609  * @x: start of second region, inclusive
610  * @y: end of second region, exclusive
611  *
612  * This is used to determine if the hint region [a, b) overlaps with the
613  * allocated region [x, y).
614  */
615 static inline bool pcpu_region_overlap(int a, int b, int x, int y)
616 {
617 	return (a < y) && (x < b);
618 }
619 
620 /**
621  * pcpu_block_update - updates a block given a free area
622  * @block: block of interest
623  * @start: start offset in block
624  * @end: end offset in block
625  *
626  * Updates a block given a known free area.  The region [start, end) is
627  * expected to be the entirety of the free area within a block.  Chooses
628  * the best starting offset if the contig hints are equal.
629  */
630 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
631 {
632 	int contig = end - start;
633 
634 	block->first_free = min(block->first_free, start);
635 	if (start == 0)
636 		block->left_free = contig;
637 
638 	if (end == block->nr_bits)
639 		block->right_free = contig;
640 
641 	if (contig > block->contig_hint) {
642 		/* promote the old contig_hint to be the new scan_hint */
643 		if (start > block->contig_hint_start) {
644 			if (block->contig_hint > block->scan_hint) {
645 				block->scan_hint_start =
646 					block->contig_hint_start;
647 				block->scan_hint = block->contig_hint;
648 			} else if (start < block->scan_hint_start) {
649 				/*
650 				 * The old contig_hint == scan_hint.  But, the
651 				 * new contig is larger so hold the invariant
652 				 * scan_hint_start < contig_hint_start.
653 				 */
654 				block->scan_hint = 0;
655 			}
656 		} else {
657 			block->scan_hint = 0;
658 		}
659 		block->contig_hint_start = start;
660 		block->contig_hint = contig;
661 	} else if (contig == block->contig_hint) {
662 		if (block->contig_hint_start &&
663 		    (!start ||
664 		     __ffs(start) > __ffs(block->contig_hint_start))) {
665 			/* start has a better alignment so use it */
666 			block->contig_hint_start = start;
667 			if (start < block->scan_hint_start &&
668 			    block->contig_hint > block->scan_hint)
669 				block->scan_hint = 0;
670 		} else if (start > block->scan_hint_start ||
671 			   block->contig_hint > block->scan_hint) {
672 			/*
673 			 * Knowing contig == contig_hint, update the scan_hint
674 			 * if it is farther than or larger than the current
675 			 * scan_hint.
676 			 */
677 			block->scan_hint_start = start;
678 			block->scan_hint = contig;
679 		}
680 	} else {
681 		/*
682 		 * The region is smaller than the contig_hint.  So only update
683 		 * the scan_hint if it is larger than or equal and farther than
684 		 * the current scan_hint.
685 		 */
686 		if ((start < block->contig_hint_start &&
687 		     (contig > block->scan_hint ||
688 		      (contig == block->scan_hint &&
689 		       start > block->scan_hint_start)))) {
690 			block->scan_hint_start = start;
691 			block->scan_hint = contig;
692 		}
693 	}
694 }
695 
696 /*
697  * pcpu_block_update_scan - update a block given a free area from a scan
698  * @chunk: chunk of interest
699  * @bit_off: chunk offset
700  * @bits: size of free area
701  *
702  * Finding the final allocation spot first goes through pcpu_find_block_fit()
703  * to find a block that can hold the allocation and then pcpu_alloc_area()
704  * where a scan is used.  When allocations require specific alignments,
705  * we can inadvertently create holes which will not be seen in the alloc
706  * or free paths.
707  *
708  * This takes a given free area hole and updates a block as it may change the
709  * scan_hint.  We need to scan backwards to ensure we don't miss free bits
710  * from alignment.
711  */
712 static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
713 				   int bits)
714 {
715 	int s_off = pcpu_off_to_block_off(bit_off);
716 	int e_off = s_off + bits;
717 	int s_index, l_bit;
718 	struct pcpu_block_md *block;
719 
720 	if (e_off > PCPU_BITMAP_BLOCK_BITS)
721 		return;
722 
723 	s_index = pcpu_off_to_block_index(bit_off);
724 	block = chunk->md_blocks + s_index;
725 
726 	/* scan backwards in case of alignment skipping free bits */
727 	l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
728 	s_off = (s_off == l_bit) ? 0 : l_bit + 1;
729 
730 	pcpu_block_update(block, s_off, e_off);
731 }
732 
733 /**
734  * pcpu_chunk_refresh_hint - updates metadata about a chunk
735  * @chunk: chunk of interest
736  * @full_scan: if we should scan from the beginning
737  *
738  * Iterates over the metadata blocks to find the largest contig area.
739  * A full scan can be avoided on the allocation path as this is triggered
740  * if we broke the contig_hint.  In doing so, the scan_hint will be before
741  * the contig_hint or after if the scan_hint == contig_hint.  This cannot
742  * be prevented on freeing as we want to find the largest area possibly
743  * spanning blocks.
744  */
745 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
746 {
747 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
748 	int bit_off, bits;
749 
750 	/* promote scan_hint to contig_hint */
751 	if (!full_scan && chunk_md->scan_hint) {
752 		bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
753 		chunk_md->contig_hint_start = chunk_md->scan_hint_start;
754 		chunk_md->contig_hint = chunk_md->scan_hint;
755 		chunk_md->scan_hint = 0;
756 	} else {
757 		bit_off = chunk_md->first_free;
758 		chunk_md->contig_hint = 0;
759 	}
760 
761 	bits = 0;
762 	pcpu_for_each_md_free_region(chunk, bit_off, bits)
763 		pcpu_block_update(chunk_md, bit_off, bit_off + bits);
764 }
765 
766 /**
767  * pcpu_block_refresh_hint
768  * @chunk: chunk of interest
769  * @index: index of the metadata block
770  *
771  * Scans over the block beginning at first_free and updates the block
772  * metadata accordingly.
773  */
774 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
775 {
776 	struct pcpu_block_md *block = chunk->md_blocks + index;
777 	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
778 	unsigned int start, end;	/* region start, region end */
779 
780 	/* promote scan_hint to contig_hint */
781 	if (block->scan_hint) {
782 		start = block->scan_hint_start + block->scan_hint;
783 		block->contig_hint_start = block->scan_hint_start;
784 		block->contig_hint = block->scan_hint;
785 		block->scan_hint = 0;
786 	} else {
787 		start = block->first_free;
788 		block->contig_hint = 0;
789 	}
790 
791 	block->right_free = 0;
792 
793 	/* iterate over free areas and update the contig hints */
794 	for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
795 		pcpu_block_update(block, start, end);
796 }
797 
798 /**
799  * pcpu_block_update_hint_alloc - update hint on allocation path
800  * @chunk: chunk of interest
801  * @bit_off: chunk offset
802  * @bits: size of request
803  *
804  * Updates metadata for the allocation path.  The metadata only has to be
805  * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
806  * scans are required if the block's contig hint is broken.
807  */
808 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
809 					 int bits)
810 {
811 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
812 	int nr_empty_pages = 0;
813 	struct pcpu_block_md *s_block, *e_block, *block;
814 	int s_index, e_index;	/* block indexes of the freed allocation */
815 	int s_off, e_off;	/* block offsets of the freed allocation */
816 
817 	/*
818 	 * Calculate per block offsets.
819 	 * The calculation uses an inclusive range, but the resulting offsets
820 	 * are [start, end).  e_index always points to the last block in the
821 	 * range.
822 	 */
823 	s_index = pcpu_off_to_block_index(bit_off);
824 	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
825 	s_off = pcpu_off_to_block_off(bit_off);
826 	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
827 
828 	s_block = chunk->md_blocks + s_index;
829 	e_block = chunk->md_blocks + e_index;
830 
831 	/*
832 	 * Update s_block.
833 	 */
834 	if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
835 		nr_empty_pages++;
836 
837 	/*
838 	 * block->first_free must be updated if the allocation takes its place.
839 	 * If the allocation breaks the contig_hint, a scan is required to
840 	 * restore this hint.
841 	 */
842 	if (s_off == s_block->first_free)
843 		s_block->first_free = find_next_zero_bit(
844 					pcpu_index_alloc_map(chunk, s_index),
845 					PCPU_BITMAP_BLOCK_BITS,
846 					s_off + bits);
847 
848 	if (pcpu_region_overlap(s_block->scan_hint_start,
849 				s_block->scan_hint_start + s_block->scan_hint,
850 				s_off,
851 				s_off + bits))
852 		s_block->scan_hint = 0;
853 
854 	if (pcpu_region_overlap(s_block->contig_hint_start,
855 				s_block->contig_hint_start +
856 				s_block->contig_hint,
857 				s_off,
858 				s_off + bits)) {
859 		/* block contig hint is broken - scan to fix it */
860 		if (!s_off)
861 			s_block->left_free = 0;
862 		pcpu_block_refresh_hint(chunk, s_index);
863 	} else {
864 		/* update left and right contig manually */
865 		s_block->left_free = min(s_block->left_free, s_off);
866 		if (s_index == e_index)
867 			s_block->right_free = min_t(int, s_block->right_free,
868 					PCPU_BITMAP_BLOCK_BITS - e_off);
869 		else
870 			s_block->right_free = 0;
871 	}
872 
873 	/*
874 	 * Update e_block.
875 	 */
876 	if (s_index != e_index) {
877 		if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
878 			nr_empty_pages++;
879 
880 		/*
881 		 * When the allocation is across blocks, the end is along
882 		 * the left part of the e_block.
883 		 */
884 		e_block->first_free = find_next_zero_bit(
885 				pcpu_index_alloc_map(chunk, e_index),
886 				PCPU_BITMAP_BLOCK_BITS, e_off);
887 
888 		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
889 			/* reset the block */
890 			e_block++;
891 		} else {
892 			if (e_off > e_block->scan_hint_start)
893 				e_block->scan_hint = 0;
894 
895 			e_block->left_free = 0;
896 			if (e_off > e_block->contig_hint_start) {
897 				/* contig hint is broken - scan to fix it */
898 				pcpu_block_refresh_hint(chunk, e_index);
899 			} else {
900 				e_block->right_free =
901 					min_t(int, e_block->right_free,
902 					      PCPU_BITMAP_BLOCK_BITS - e_off);
903 			}
904 		}
905 
906 		/* update in-between md_blocks */
907 		nr_empty_pages += (e_index - s_index - 1);
908 		for (block = s_block + 1; block < e_block; block++) {
909 			block->scan_hint = 0;
910 			block->contig_hint = 0;
911 			block->left_free = 0;
912 			block->right_free = 0;
913 		}
914 	}
915 
916 	/*
917 	 * If the allocation is not atomic, some blocks may not be
918 	 * populated with pages, while we account it here.  The number
919 	 * of pages will be added back with pcpu_chunk_populated()
920 	 * when populating pages.
921 	 */
922 	if (nr_empty_pages)
923 		pcpu_update_empty_pages(chunk, -nr_empty_pages);
924 
925 	if (pcpu_region_overlap(chunk_md->scan_hint_start,
926 				chunk_md->scan_hint_start +
927 				chunk_md->scan_hint,
928 				bit_off,
929 				bit_off + bits))
930 		chunk_md->scan_hint = 0;
931 
932 	/*
933 	 * The only time a full chunk scan is required is if the chunk
934 	 * contig hint is broken.  Otherwise, it means a smaller space
935 	 * was used and therefore the chunk contig hint is still correct.
936 	 */
937 	if (pcpu_region_overlap(chunk_md->contig_hint_start,
938 				chunk_md->contig_hint_start +
939 				chunk_md->contig_hint,
940 				bit_off,
941 				bit_off + bits))
942 		pcpu_chunk_refresh_hint(chunk, false);
943 }
944 
945 /**
946  * pcpu_block_update_hint_free - updates the block hints on the free path
947  * @chunk: chunk of interest
948  * @bit_off: chunk offset
949  * @bits: size of request
950  *
951  * Updates metadata for the allocation path.  This avoids a blind block
952  * refresh by making use of the block contig hints.  If this fails, it scans
953  * forward and backward to determine the extent of the free area.  This is
954  * capped at the boundary of blocks.
955  *
956  * A chunk update is triggered if a page becomes free, a block becomes free,
957  * or the free spans across blocks.  This tradeoff is to minimize iterating
958  * over the block metadata to update chunk_md->contig_hint.
959  * chunk_md->contig_hint may be off by up to a page, but it will never be more
960  * than the available space.  If the contig hint is contained in one block, it
961  * will be accurate.
962  */
963 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
964 					int bits)
965 {
966 	int nr_empty_pages = 0;
967 	struct pcpu_block_md *s_block, *e_block, *block;
968 	int s_index, e_index;	/* block indexes of the freed allocation */
969 	int s_off, e_off;	/* block offsets of the freed allocation */
970 	int start, end;		/* start and end of the whole free area */
971 
972 	/*
973 	 * Calculate per block offsets.
974 	 * The calculation uses an inclusive range, but the resulting offsets
975 	 * are [start, end).  e_index always points to the last block in the
976 	 * range.
977 	 */
978 	s_index = pcpu_off_to_block_index(bit_off);
979 	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
980 	s_off = pcpu_off_to_block_off(bit_off);
981 	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
982 
983 	s_block = chunk->md_blocks + s_index;
984 	e_block = chunk->md_blocks + e_index;
985 
986 	/*
987 	 * Check if the freed area aligns with the block->contig_hint.
988 	 * If it does, then the scan to find the beginning/end of the
989 	 * larger free area can be avoided.
990 	 *
991 	 * start and end refer to beginning and end of the free area
992 	 * within each their respective blocks.  This is not necessarily
993 	 * the entire free area as it may span blocks past the beginning
994 	 * or end of the block.
995 	 */
996 	start = s_off;
997 	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
998 		start = s_block->contig_hint_start;
999 	} else {
1000 		/*
1001 		 * Scan backwards to find the extent of the free area.
1002 		 * find_last_bit returns the starting bit, so if the start bit
1003 		 * is returned, that means there was no last bit and the
1004 		 * remainder of the chunk is free.
1005 		 */
1006 		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1007 					  start);
1008 		start = (start == l_bit) ? 0 : l_bit + 1;
1009 	}
1010 
1011 	end = e_off;
1012 	if (e_off == e_block->contig_hint_start)
1013 		end = e_block->contig_hint_start + e_block->contig_hint;
1014 	else
1015 		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1016 				    PCPU_BITMAP_BLOCK_BITS, end);
1017 
1018 	/* update s_block */
1019 	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
1020 	if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1021 		nr_empty_pages++;
1022 	pcpu_block_update(s_block, start, e_off);
1023 
1024 	/* freeing in the same block */
1025 	if (s_index != e_index) {
1026 		/* update e_block */
1027 		if (end == PCPU_BITMAP_BLOCK_BITS)
1028 			nr_empty_pages++;
1029 		pcpu_block_update(e_block, 0, end);
1030 
1031 		/* reset md_blocks in the middle */
1032 		nr_empty_pages += (e_index - s_index - 1);
1033 		for (block = s_block + 1; block < e_block; block++) {
1034 			block->first_free = 0;
1035 			block->scan_hint = 0;
1036 			block->contig_hint_start = 0;
1037 			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1038 			block->left_free = PCPU_BITMAP_BLOCK_BITS;
1039 			block->right_free = PCPU_BITMAP_BLOCK_BITS;
1040 		}
1041 	}
1042 
1043 	if (nr_empty_pages)
1044 		pcpu_update_empty_pages(chunk, nr_empty_pages);
1045 
1046 	/*
1047 	 * Refresh chunk metadata when the free makes a block free or spans
1048 	 * across blocks.  The contig_hint may be off by up to a page, but if
1049 	 * the contig_hint is contained in a block, it will be accurate with
1050 	 * the else condition below.
1051 	 */
1052 	if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1053 		pcpu_chunk_refresh_hint(chunk, true);
1054 	else
1055 		pcpu_block_update(&chunk->chunk_md,
1056 				  pcpu_block_off_to_off(s_index, start),
1057 				  end);
1058 }
1059 
1060 /**
1061  * pcpu_is_populated - determines if the region is populated
1062  * @chunk: chunk of interest
1063  * @bit_off: chunk offset
1064  * @bits: size of area
1065  * @next_off: return value for the next offset to start searching
1066  *
1067  * For atomic allocations, check if the backing pages are populated.
1068  *
1069  * RETURNS:
1070  * Bool if the backing pages are populated.
1071  * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1072  */
1073 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1074 			      int *next_off)
1075 {
1076 	unsigned int start, end;
1077 
1078 	start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1079 	end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1080 
1081 	start = find_next_zero_bit(chunk->populated, end, start);
1082 	if (start >= end)
1083 		return true;
1084 
1085 	end = find_next_bit(chunk->populated, end, start + 1);
1086 
1087 	*next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1088 	return false;
1089 }
1090 
1091 /**
1092  * pcpu_find_block_fit - finds the block index to start searching
1093  * @chunk: chunk of interest
1094  * @alloc_bits: size of request in allocation units
1095  * @align: alignment of area (max PAGE_SIZE bytes)
1096  * @pop_only: use populated regions only
1097  *
1098  * Given a chunk and an allocation spec, find the offset to begin searching
1099  * for a free region.  This iterates over the bitmap metadata blocks to
1100  * find an offset that will be guaranteed to fit the requirements.  It is
1101  * not quite first fit as if the allocation does not fit in the contig hint
1102  * of a block or chunk, it is skipped.  This errs on the side of caution
1103  * to prevent excess iteration.  Poor alignment can cause the allocator to
1104  * skip over blocks and chunks that have valid free areas.
1105  *
1106  * RETURNS:
1107  * The offset in the bitmap to begin searching.
1108  * -1 if no offset is found.
1109  */
1110 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1111 			       size_t align, bool pop_only)
1112 {
1113 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1114 	int bit_off, bits, next_off;
1115 
1116 	/*
1117 	 * This is an optimization to prevent scanning by assuming if the
1118 	 * allocation cannot fit in the global hint, there is memory pressure
1119 	 * and creating a new chunk would happen soon.
1120 	 */
1121 	if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
1122 		return -1;
1123 
1124 	bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1125 	bits = 0;
1126 	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1127 		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1128 						   &next_off))
1129 			break;
1130 
1131 		bit_off = next_off;
1132 		bits = 0;
1133 	}
1134 
1135 	if (bit_off == pcpu_chunk_map_bits(chunk))
1136 		return -1;
1137 
1138 	return bit_off;
1139 }
1140 
1141 /*
1142  * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1143  * @map: the address to base the search on
1144  * @size: the bitmap size in bits
1145  * @start: the bitnumber to start searching at
1146  * @nr: the number of zeroed bits we're looking for
1147  * @align_mask: alignment mask for zero area
1148  * @largest_off: offset of the largest area skipped
1149  * @largest_bits: size of the largest area skipped
1150  *
1151  * The @align_mask should be one less than a power of 2.
1152  *
1153  * This is a modified version of bitmap_find_next_zero_area_off() to remember
1154  * the largest area that was skipped.  This is imperfect, but in general is
1155  * good enough.  The largest remembered region is the largest failed region
1156  * seen.  This does not include anything we possibly skipped due to alignment.
1157  * pcpu_block_update_scan() does scan backwards to try and recover what was
1158  * lost to alignment.  While this can cause scanning to miss earlier possible
1159  * free areas, smaller allocations will eventually fill those holes.
1160  */
1161 static unsigned long pcpu_find_zero_area(unsigned long *map,
1162 					 unsigned long size,
1163 					 unsigned long start,
1164 					 unsigned long nr,
1165 					 unsigned long align_mask,
1166 					 unsigned long *largest_off,
1167 					 unsigned long *largest_bits)
1168 {
1169 	unsigned long index, end, i, area_off, area_bits;
1170 again:
1171 	index = find_next_zero_bit(map, size, start);
1172 
1173 	/* Align allocation */
1174 	index = __ALIGN_MASK(index, align_mask);
1175 	area_off = index;
1176 
1177 	end = index + nr;
1178 	if (end > size)
1179 		return end;
1180 	i = find_next_bit(map, end, index);
1181 	if (i < end) {
1182 		area_bits = i - area_off;
1183 		/* remember largest unused area with best alignment */
1184 		if (area_bits > *largest_bits ||
1185 		    (area_bits == *largest_bits && *largest_off &&
1186 		     (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1187 			*largest_off = area_off;
1188 			*largest_bits = area_bits;
1189 		}
1190 
1191 		start = i + 1;
1192 		goto again;
1193 	}
1194 	return index;
1195 }
1196 
1197 /**
1198  * pcpu_alloc_area - allocates an area from a pcpu_chunk
1199  * @chunk: chunk of interest
1200  * @alloc_bits: size of request in allocation units
1201  * @align: alignment of area (max PAGE_SIZE)
1202  * @start: bit_off to start searching
1203  *
1204  * This function takes in a @start offset to begin searching to fit an
1205  * allocation of @alloc_bits with alignment @align.  It needs to scan
1206  * the allocation map because if it fits within the block's contig hint,
1207  * @start will be block->first_free. This is an attempt to fill the
1208  * allocation prior to breaking the contig hint.  The allocation and
1209  * boundary maps are updated accordingly if it confirms a valid
1210  * free area.
1211  *
1212  * RETURNS:
1213  * Allocated addr offset in @chunk on success.
1214  * -1 if no matching area is found.
1215  */
1216 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1217 			   size_t align, int start)
1218 {
1219 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1220 	size_t align_mask = (align) ? (align - 1) : 0;
1221 	unsigned long area_off = 0, area_bits = 0;
1222 	int bit_off, end, oslot;
1223 
1224 	lockdep_assert_held(&pcpu_lock);
1225 
1226 	oslot = pcpu_chunk_slot(chunk);
1227 
1228 	/*
1229 	 * Search to find a fit.
1230 	 */
1231 	end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1232 		    pcpu_chunk_map_bits(chunk));
1233 	bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1234 				      align_mask, &area_off, &area_bits);
1235 	if (bit_off >= end)
1236 		return -1;
1237 
1238 	if (area_bits)
1239 		pcpu_block_update_scan(chunk, area_off, area_bits);
1240 
1241 	/* update alloc map */
1242 	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1243 
1244 	/* update boundary map */
1245 	set_bit(bit_off, chunk->bound_map);
1246 	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1247 	set_bit(bit_off + alloc_bits, chunk->bound_map);
1248 
1249 	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1250 
1251 	/* update first free bit */
1252 	if (bit_off == chunk_md->first_free)
1253 		chunk_md->first_free = find_next_zero_bit(
1254 					chunk->alloc_map,
1255 					pcpu_chunk_map_bits(chunk),
1256 					bit_off + alloc_bits);
1257 
1258 	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1259 
1260 	pcpu_chunk_relocate(chunk, oslot);
1261 
1262 	return bit_off * PCPU_MIN_ALLOC_SIZE;
1263 }
1264 
1265 /**
1266  * pcpu_free_area - frees the corresponding offset
1267  * @chunk: chunk of interest
1268  * @off: addr offset into chunk
1269  *
1270  * This function determines the size of an allocation to free using
1271  * the boundary bitmap and clears the allocation map.
1272  *
1273  * RETURNS:
1274  * Number of freed bytes.
1275  */
1276 static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1277 {
1278 	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1279 	int bit_off, bits, end, oslot, freed;
1280 
1281 	lockdep_assert_held(&pcpu_lock);
1282 	pcpu_stats_area_dealloc(chunk);
1283 
1284 	oslot = pcpu_chunk_slot(chunk);
1285 
1286 	bit_off = off / PCPU_MIN_ALLOC_SIZE;
1287 
1288 	/* find end index */
1289 	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1290 			    bit_off + 1);
1291 	bits = end - bit_off;
1292 	bitmap_clear(chunk->alloc_map, bit_off, bits);
1293 
1294 	freed = bits * PCPU_MIN_ALLOC_SIZE;
1295 
1296 	/* update metadata */
1297 	chunk->free_bytes += freed;
1298 
1299 	/* update first free bit */
1300 	chunk_md->first_free = min(chunk_md->first_free, bit_off);
1301 
1302 	pcpu_block_update_hint_free(chunk, bit_off, bits);
1303 
1304 	pcpu_chunk_relocate(chunk, oslot);
1305 
1306 	return freed;
1307 }
1308 
1309 static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1310 {
1311 	block->scan_hint = 0;
1312 	block->contig_hint = nr_bits;
1313 	block->left_free = nr_bits;
1314 	block->right_free = nr_bits;
1315 	block->first_free = 0;
1316 	block->nr_bits = nr_bits;
1317 }
1318 
1319 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1320 {
1321 	struct pcpu_block_md *md_block;
1322 
1323 	/* init the chunk's block */
1324 	pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1325 
1326 	for (md_block = chunk->md_blocks;
1327 	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1328 	     md_block++)
1329 		pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1330 }
1331 
1332 /**
1333  * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1334  * @tmp_addr: the start of the region served
1335  * @map_size: size of the region served
1336  *
1337  * This is responsible for creating the chunks that serve the first chunk.  The
1338  * base_addr is page aligned down of @tmp_addr while the region end is page
1339  * aligned up.  Offsets are kept track of to determine the region served. All
1340  * this is done to appease the bitmap allocator in avoiding partial blocks.
1341  *
1342  * RETURNS:
1343  * Chunk serving the region at @tmp_addr of @map_size.
1344  */
1345 static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1346 							 int map_size)
1347 {
1348 	struct pcpu_chunk *chunk;
1349 	unsigned long aligned_addr;
1350 	int start_offset, offset_bits, region_size, region_bits;
1351 	size_t alloc_size;
1352 
1353 	/* region calculations */
1354 	aligned_addr = tmp_addr & PAGE_MASK;
1355 
1356 	start_offset = tmp_addr - aligned_addr;
1357 	region_size = ALIGN(start_offset + map_size, PAGE_SIZE);
1358 
1359 	/* allocate chunk */
1360 	alloc_size = struct_size(chunk, populated,
1361 				 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1362 	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1363 	if (!chunk)
1364 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1365 		      alloc_size);
1366 
1367 	INIT_LIST_HEAD(&chunk->list);
1368 
1369 	chunk->base_addr = (void *)aligned_addr;
1370 	chunk->start_offset = start_offset;
1371 	chunk->end_offset = region_size - chunk->start_offset - map_size;
1372 
1373 	chunk->nr_pages = region_size >> PAGE_SHIFT;
1374 	region_bits = pcpu_chunk_map_bits(chunk);
1375 
1376 	alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1377 	chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1378 	if (!chunk->alloc_map)
1379 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1380 		      alloc_size);
1381 
1382 	alloc_size =
1383 		BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1384 	chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1385 	if (!chunk->bound_map)
1386 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1387 		      alloc_size);
1388 
1389 	alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1390 	chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1391 	if (!chunk->md_blocks)
1392 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1393 		      alloc_size);
1394 
1395 #ifdef NEED_PCPUOBJ_EXT
1396 	/* first chunk is free to use */
1397 	chunk->obj_exts = NULL;
1398 #endif
1399 	pcpu_init_md_blocks(chunk);
1400 
1401 	/* manage populated page bitmap */
1402 	chunk->immutable = true;
1403 	bitmap_fill(chunk->populated, chunk->nr_pages);
1404 	chunk->nr_populated = chunk->nr_pages;
1405 	chunk->nr_empty_pop_pages = chunk->nr_pages;
1406 
1407 	chunk->free_bytes = map_size;
1408 
1409 	if (chunk->start_offset) {
1410 		/* hide the beginning of the bitmap */
1411 		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1412 		bitmap_set(chunk->alloc_map, 0, offset_bits);
1413 		set_bit(0, chunk->bound_map);
1414 		set_bit(offset_bits, chunk->bound_map);
1415 
1416 		chunk->chunk_md.first_free = offset_bits;
1417 
1418 		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1419 	}
1420 
1421 	if (chunk->end_offset) {
1422 		/* hide the end of the bitmap */
1423 		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1424 		bitmap_set(chunk->alloc_map,
1425 			   pcpu_chunk_map_bits(chunk) - offset_bits,
1426 			   offset_bits);
1427 		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1428 			chunk->bound_map);
1429 		set_bit(region_bits, chunk->bound_map);
1430 
1431 		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1432 					     - offset_bits, offset_bits);
1433 	}
1434 
1435 	return chunk;
1436 }
1437 
1438 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1439 {
1440 	struct pcpu_chunk *chunk;
1441 	int region_bits;
1442 
1443 	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1444 	if (!chunk)
1445 		return NULL;
1446 
1447 	INIT_LIST_HEAD(&chunk->list);
1448 	chunk->nr_pages = pcpu_unit_pages;
1449 	region_bits = pcpu_chunk_map_bits(chunk);
1450 
1451 	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1452 					   sizeof(chunk->alloc_map[0]), gfp);
1453 	if (!chunk->alloc_map)
1454 		goto alloc_map_fail;
1455 
1456 	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1457 					   sizeof(chunk->bound_map[0]), gfp);
1458 	if (!chunk->bound_map)
1459 		goto bound_map_fail;
1460 
1461 	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1462 					   sizeof(chunk->md_blocks[0]), gfp);
1463 	if (!chunk->md_blocks)
1464 		goto md_blocks_fail;
1465 
1466 #ifdef NEED_PCPUOBJ_EXT
1467 	if (need_pcpuobj_ext()) {
1468 		chunk->obj_exts =
1469 			pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1470 					sizeof(struct pcpuobj_ext), gfp);
1471 		if (!chunk->obj_exts)
1472 			goto objcg_fail;
1473 	}
1474 #endif
1475 
1476 	pcpu_init_md_blocks(chunk);
1477 
1478 	/* init metadata */
1479 	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1480 
1481 	return chunk;
1482 
1483 #ifdef NEED_PCPUOBJ_EXT
1484 objcg_fail:
1485 	pcpu_mem_free(chunk->md_blocks);
1486 #endif
1487 md_blocks_fail:
1488 	pcpu_mem_free(chunk->bound_map);
1489 bound_map_fail:
1490 	pcpu_mem_free(chunk->alloc_map);
1491 alloc_map_fail:
1492 	pcpu_mem_free(chunk);
1493 
1494 	return NULL;
1495 }
1496 
1497 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1498 {
1499 	if (!chunk)
1500 		return;
1501 #ifdef NEED_PCPUOBJ_EXT
1502 	pcpu_mem_free(chunk->obj_exts);
1503 #endif
1504 	pcpu_mem_free(chunk->md_blocks);
1505 	pcpu_mem_free(chunk->bound_map);
1506 	pcpu_mem_free(chunk->alloc_map);
1507 	pcpu_mem_free(chunk);
1508 }
1509 
1510 /**
1511  * pcpu_chunk_populated - post-population bookkeeping
1512  * @chunk: pcpu_chunk which got populated
1513  * @page_start: the start page
1514  * @page_end: the end page
1515  *
1516  * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1517  * the bookkeeping information accordingly.  Must be called after each
1518  * successful population.
1519  */
1520 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1521 				 int page_end)
1522 {
1523 	int nr = page_end - page_start;
1524 
1525 	lockdep_assert_held(&pcpu_lock);
1526 
1527 	bitmap_set(chunk->populated, page_start, nr);
1528 	chunk->nr_populated += nr;
1529 	pcpu_nr_populated += nr;
1530 
1531 	pcpu_update_empty_pages(chunk, nr);
1532 }
1533 
1534 /**
1535  * pcpu_chunk_depopulated - post-depopulation bookkeeping
1536  * @chunk: pcpu_chunk which got depopulated
1537  * @page_start: the start page
1538  * @page_end: the end page
1539  *
1540  * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1541  * Update the bookkeeping information accordingly.  Must be called after
1542  * each successful depopulation.
1543  */
1544 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1545 				   int page_start, int page_end)
1546 {
1547 	int nr = page_end - page_start;
1548 
1549 	lockdep_assert_held(&pcpu_lock);
1550 
1551 	bitmap_clear(chunk->populated, page_start, nr);
1552 	chunk->nr_populated -= nr;
1553 	pcpu_nr_populated -= nr;
1554 
1555 	pcpu_update_empty_pages(chunk, -nr);
1556 }
1557 
1558 /*
1559  * Chunk management implementation.
1560  *
1561  * To allow different implementations, chunk alloc/free and
1562  * [de]population are implemented in a separate file which is pulled
1563  * into this file and compiled together.  The following functions
1564  * should be implemented.
1565  *
1566  * pcpu_populate_chunk		- populate the specified range of a chunk
1567  * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
1568  * pcpu_post_unmap_tlb_flush	- flush tlb for the specified range of a chunk
1569  * pcpu_create_chunk		- create a new chunk
1570  * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
1571  * pcpu_addr_to_page		- translate address to physical address
1572  * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
1573  */
1574 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1575 			       int page_start, int page_end, gfp_t gfp);
1576 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1577 				  int page_start, int page_end);
1578 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1579 				      int page_start, int page_end);
1580 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1581 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1582 static struct page *pcpu_addr_to_page(void *addr);
1583 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1584 
1585 #ifdef CONFIG_NEED_PER_CPU_KM
1586 #include "percpu-km.c"
1587 #else
1588 #include "percpu-vm.c"
1589 #endif
1590 
1591 /**
1592  * pcpu_chunk_addr_search - determine chunk containing specified address
1593  * @addr: address for which the chunk needs to be determined.
1594  *
1595  * This is an internal function that handles all but static allocations.
1596  * Static percpu address values should never be passed into the allocator.
1597  *
1598  * RETURNS:
1599  * The address of the found chunk.
1600  */
1601 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1602 {
1603 	/* is it in the dynamic region (first chunk)? */
1604 	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1605 		return pcpu_first_chunk;
1606 
1607 	/* is it in the reserved region? */
1608 	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1609 		return pcpu_reserved_chunk;
1610 
1611 	/*
1612 	 * The address is relative to unit0 which might be unused and
1613 	 * thus unmapped.  Offset the address to the unit space of the
1614 	 * current processor before looking it up in the vmalloc
1615 	 * space.  Note that any possible cpu id can be used here, so
1616 	 * there's no need to worry about preemption or cpu hotplug.
1617 	 */
1618 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
1619 	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1620 }
1621 
1622 #ifdef CONFIG_MEMCG
1623 static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1624 				      struct obj_cgroup **objcgp)
1625 {
1626 	struct obj_cgroup *objcg;
1627 
1628 	if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT))
1629 		return true;
1630 
1631 	objcg = current_obj_cgroup();
1632 	if (!objcg)
1633 		return true;
1634 
1635 	if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size)))
1636 		return false;
1637 
1638 	*objcgp = objcg;
1639 	return true;
1640 }
1641 
1642 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1643 				       struct pcpu_chunk *chunk, int off,
1644 				       size_t size)
1645 {
1646 	if (!objcg)
1647 		return;
1648 
1649 	if (likely(chunk && chunk->obj_exts)) {
1650 		obj_cgroup_get(objcg);
1651 		chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = objcg;
1652 
1653 		rcu_read_lock();
1654 		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1655 				pcpu_obj_full_size(size));
1656 		rcu_read_unlock();
1657 	} else {
1658 		obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
1659 	}
1660 }
1661 
1662 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1663 {
1664 	struct obj_cgroup *objcg;
1665 
1666 	if (unlikely(!chunk->obj_exts))
1667 		return;
1668 
1669 	objcg = chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup;
1670 	if (!objcg)
1671 		return;
1672 	chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = NULL;
1673 
1674 	obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
1675 
1676 	rcu_read_lock();
1677 	mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1678 			-pcpu_obj_full_size(size));
1679 	rcu_read_unlock();
1680 
1681 	obj_cgroup_put(objcg);
1682 }
1683 
1684 #else /* CONFIG_MEMCG */
1685 static bool
1686 pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1687 {
1688 	return true;
1689 }
1690 
1691 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1692 				       struct pcpu_chunk *chunk, int off,
1693 				       size_t size)
1694 {
1695 }
1696 
1697 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1698 {
1699 }
1700 #endif /* CONFIG_MEMCG */
1701 
1702 #ifdef CONFIG_MEM_ALLOC_PROFILING
1703 static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
1704 				      size_t size)
1705 {
1706 	if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts)) {
1707 		alloc_tag_add(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag,
1708 			      current->alloc_tag, size);
1709 	}
1710 }
1711 
1712 static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1713 {
1714 	if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts))
1715 		alloc_tag_sub(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag, size);
1716 }
1717 #else
1718 static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
1719 				      size_t size)
1720 {
1721 }
1722 
1723 static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1724 {
1725 }
1726 #endif
1727 
1728 /**
1729  * pcpu_alloc - the percpu allocator
1730  * @size: size of area to allocate in bytes
1731  * @align: alignment of area (max PAGE_SIZE)
1732  * @reserved: allocate from the reserved chunk if available
1733  * @gfp: allocation flags
1734  *
1735  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
1736  * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1737  * then no warning will be triggered on invalid or failed allocation
1738  * requests.
1739  *
1740  * RETURNS:
1741  * Percpu pointer to the allocated area on success, NULL on failure.
1742  */
1743 void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
1744 				 gfp_t gfp)
1745 {
1746 	gfp_t pcpu_gfp;
1747 	bool is_atomic;
1748 	bool do_warn;
1749 	struct obj_cgroup *objcg = NULL;
1750 	static int warn_limit = 10;
1751 	struct pcpu_chunk *chunk, *next;
1752 	const char *err;
1753 	int slot, off, cpu, ret;
1754 	unsigned long flags;
1755 	void __percpu *ptr;
1756 	size_t bits, bit_align;
1757 
1758 	gfp = current_gfp_context(gfp);
1759 	/* whitelisted flags that can be passed to the backing allocators */
1760 	pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1761 	is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1762 	do_warn = !(gfp & __GFP_NOWARN);
1763 
1764 	/*
1765 	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1766 	 * therefore alignment must be a minimum of that many bytes.
1767 	 * An allocation may have internal fragmentation from rounding up
1768 	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1769 	 */
1770 	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1771 		align = PCPU_MIN_ALLOC_SIZE;
1772 
1773 	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1774 	bits = size >> PCPU_MIN_ALLOC_SHIFT;
1775 	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1776 
1777 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1778 		     !is_power_of_2(align))) {
1779 		WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1780 		     size, align);
1781 		return NULL;
1782 	}
1783 
1784 	if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
1785 		return NULL;
1786 
1787 	if (!is_atomic) {
1788 		/*
1789 		 * pcpu_balance_workfn() allocates memory under this mutex,
1790 		 * and it may wait for memory reclaim. Allow current task
1791 		 * to become OOM victim, in case of memory pressure.
1792 		 */
1793 		if (gfp & __GFP_NOFAIL) {
1794 			mutex_lock(&pcpu_alloc_mutex);
1795 		} else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1796 			pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1797 			return NULL;
1798 		}
1799 	}
1800 
1801 	spin_lock_irqsave(&pcpu_lock, flags);
1802 
1803 	/* serve reserved allocations from the reserved chunk if available */
1804 	if (reserved && pcpu_reserved_chunk) {
1805 		chunk = pcpu_reserved_chunk;
1806 
1807 		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1808 		if (off < 0) {
1809 			err = "alloc from reserved chunk failed";
1810 			goto fail_unlock;
1811 		}
1812 
1813 		off = pcpu_alloc_area(chunk, bits, bit_align, off);
1814 		if (off >= 0)
1815 			goto area_found;
1816 
1817 		err = "alloc from reserved chunk failed";
1818 		goto fail_unlock;
1819 	}
1820 
1821 restart:
1822 	/* search through normal chunks */
1823 	for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
1824 		list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1825 					 list) {
1826 			off = pcpu_find_block_fit(chunk, bits, bit_align,
1827 						  is_atomic);
1828 			if (off < 0) {
1829 				if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1830 					pcpu_chunk_move(chunk, 0);
1831 				continue;
1832 			}
1833 
1834 			off = pcpu_alloc_area(chunk, bits, bit_align, off);
1835 			if (off >= 0) {
1836 				pcpu_reintegrate_chunk(chunk);
1837 				goto area_found;
1838 			}
1839 		}
1840 	}
1841 
1842 	spin_unlock_irqrestore(&pcpu_lock, flags);
1843 
1844 	if (is_atomic) {
1845 		err = "atomic alloc failed, no space left";
1846 		goto fail;
1847 	}
1848 
1849 	/* No space left.  Create a new chunk. */
1850 	if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1851 		chunk = pcpu_create_chunk(pcpu_gfp);
1852 		if (!chunk) {
1853 			err = "failed to allocate new chunk";
1854 			goto fail;
1855 		}
1856 
1857 		spin_lock_irqsave(&pcpu_lock, flags);
1858 		pcpu_chunk_relocate(chunk, -1);
1859 	} else {
1860 		spin_lock_irqsave(&pcpu_lock, flags);
1861 	}
1862 
1863 	goto restart;
1864 
1865 area_found:
1866 	pcpu_stats_area_alloc(chunk, size);
1867 
1868 	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1869 		pcpu_schedule_balance_work();
1870 
1871 	spin_unlock_irqrestore(&pcpu_lock, flags);
1872 
1873 	/* populate if not all pages are already there */
1874 	if (!is_atomic) {
1875 		unsigned int page_end, rs, re;
1876 
1877 		rs = PFN_DOWN(off);
1878 		page_end = PFN_UP(off + size);
1879 
1880 		for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
1881 			WARN_ON(chunk->immutable);
1882 
1883 			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1884 
1885 			spin_lock_irqsave(&pcpu_lock, flags);
1886 			if (ret) {
1887 				pcpu_free_area(chunk, off);
1888 				err = "failed to populate";
1889 				goto fail_unlock;
1890 			}
1891 			pcpu_chunk_populated(chunk, rs, re);
1892 			spin_unlock_irqrestore(&pcpu_lock, flags);
1893 		}
1894 
1895 		mutex_unlock(&pcpu_alloc_mutex);
1896 	}
1897 
1898 	/* clear the areas and return address relative to base address */
1899 	for_each_possible_cpu(cpu)
1900 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1901 
1902 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1903 	kmemleak_alloc_percpu(ptr, size, gfp);
1904 
1905 	trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
1906 				  chunk->base_addr, off, ptr,
1907 				  pcpu_obj_full_size(size), gfp);
1908 
1909 	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1910 
1911 	pcpu_alloc_tag_alloc_hook(chunk, off, size);
1912 
1913 	return ptr;
1914 
1915 fail_unlock:
1916 	spin_unlock_irqrestore(&pcpu_lock, flags);
1917 fail:
1918 	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1919 
1920 	if (do_warn && warn_limit) {
1921 		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1922 			size, align, is_atomic, err);
1923 		if (!is_atomic)
1924 			dump_stack();
1925 		if (!--warn_limit)
1926 			pr_info("limit reached, disable warning\n");
1927 	}
1928 
1929 	if (is_atomic) {
1930 		/* see the flag handling in pcpu_balance_workfn() */
1931 		pcpu_atomic_alloc_failed = true;
1932 		pcpu_schedule_balance_work();
1933 	} else {
1934 		mutex_unlock(&pcpu_alloc_mutex);
1935 	}
1936 
1937 	pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1938 
1939 	return NULL;
1940 }
1941 EXPORT_SYMBOL_GPL(pcpu_alloc_noprof);
1942 
1943 /**
1944  * pcpu_balance_free - manage the amount of free chunks
1945  * @empty_only: free chunks only if there are no populated pages
1946  *
1947  * If empty_only is %false, reclaim all fully free chunks regardless of the
1948  * number of populated pages.  Otherwise, only reclaim chunks that have no
1949  * populated pages.
1950  *
1951  * CONTEXT:
1952  * pcpu_lock (can be dropped temporarily)
1953  */
1954 static void pcpu_balance_free(bool empty_only)
1955 {
1956 	LIST_HEAD(to_free);
1957 	struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
1958 	struct pcpu_chunk *chunk, *next;
1959 
1960 	lockdep_assert_held(&pcpu_lock);
1961 
1962 	/*
1963 	 * There's no reason to keep around multiple unused chunks and VM
1964 	 * areas can be scarce.  Destroy all free chunks except for one.
1965 	 */
1966 	list_for_each_entry_safe(chunk, next, free_head, list) {
1967 		WARN_ON(chunk->immutable);
1968 
1969 		/* spare the first one */
1970 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1971 			continue;
1972 
1973 		if (!empty_only || chunk->nr_empty_pop_pages == 0)
1974 			list_move(&chunk->list, &to_free);
1975 	}
1976 
1977 	if (list_empty(&to_free))
1978 		return;
1979 
1980 	spin_unlock_irq(&pcpu_lock);
1981 	list_for_each_entry_safe(chunk, next, &to_free, list) {
1982 		unsigned int rs, re;
1983 
1984 		for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
1985 			pcpu_depopulate_chunk(chunk, rs, re);
1986 			spin_lock_irq(&pcpu_lock);
1987 			pcpu_chunk_depopulated(chunk, rs, re);
1988 			spin_unlock_irq(&pcpu_lock);
1989 		}
1990 		pcpu_destroy_chunk(chunk);
1991 		cond_resched();
1992 	}
1993 	spin_lock_irq(&pcpu_lock);
1994 }
1995 
1996 /**
1997  * pcpu_balance_populated - manage the amount of populated pages
1998  *
1999  * Maintain a certain amount of populated pages to satisfy atomic allocations.
2000  * It is possible that this is called when physical memory is scarce causing
2001  * OOM killer to be triggered.  We should avoid doing so until an actual
2002  * allocation causes the failure as it is possible that requests can be
2003  * serviced from already backed regions.
2004  *
2005  * CONTEXT:
2006  * pcpu_lock (can be dropped temporarily)
2007  */
2008 static void pcpu_balance_populated(void)
2009 {
2010 	/* gfp flags passed to underlying allocators */
2011 	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
2012 	struct pcpu_chunk *chunk;
2013 	int slot, nr_to_pop, ret;
2014 
2015 	lockdep_assert_held(&pcpu_lock);
2016 
2017 	/*
2018 	 * Ensure there are certain number of free populated pages for
2019 	 * atomic allocs.  Fill up from the most packed so that atomic
2020 	 * allocs don't increase fragmentation.  If atomic allocation
2021 	 * failed previously, always populate the maximum amount.  This
2022 	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
2023 	 * failing indefinitely; however, large atomic allocs are not
2024 	 * something we support properly and can be highly unreliable and
2025 	 * inefficient.
2026 	 */
2027 retry_pop:
2028 	if (pcpu_atomic_alloc_failed) {
2029 		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
2030 		/* best effort anyway, don't worry about synchronization */
2031 		pcpu_atomic_alloc_failed = false;
2032 	} else {
2033 		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2034 				  pcpu_nr_empty_pop_pages,
2035 				  0, PCPU_EMPTY_POP_PAGES_HIGH);
2036 	}
2037 
2038 	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
2039 		unsigned int nr_unpop = 0, rs, re;
2040 
2041 		if (!nr_to_pop)
2042 			break;
2043 
2044 		list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
2045 			nr_unpop = chunk->nr_pages - chunk->nr_populated;
2046 			if (nr_unpop)
2047 				break;
2048 		}
2049 
2050 		if (!nr_unpop)
2051 			continue;
2052 
2053 		/* @chunk can't go away while pcpu_alloc_mutex is held */
2054 		for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
2055 			int nr = min_t(int, re - rs, nr_to_pop);
2056 
2057 			spin_unlock_irq(&pcpu_lock);
2058 			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2059 			cond_resched();
2060 			spin_lock_irq(&pcpu_lock);
2061 			if (!ret) {
2062 				nr_to_pop -= nr;
2063 				pcpu_chunk_populated(chunk, rs, rs + nr);
2064 			} else {
2065 				nr_to_pop = 0;
2066 			}
2067 
2068 			if (!nr_to_pop)
2069 				break;
2070 		}
2071 	}
2072 
2073 	if (nr_to_pop) {
2074 		/* ran out of chunks to populate, create a new one and retry */
2075 		spin_unlock_irq(&pcpu_lock);
2076 		chunk = pcpu_create_chunk(gfp);
2077 		cond_resched();
2078 		spin_lock_irq(&pcpu_lock);
2079 		if (chunk) {
2080 			pcpu_chunk_relocate(chunk, -1);
2081 			goto retry_pop;
2082 		}
2083 	}
2084 }
2085 
2086 /**
2087  * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
2088  *
2089  * Scan over chunks in the depopulate list and try to release unused populated
2090  * pages back to the system.  Depopulated chunks are sidelined to prevent
2091  * repopulating these pages unless required.  Fully free chunks are reintegrated
2092  * and freed accordingly (1 is kept around).  If we drop below the empty
2093  * populated pages threshold, reintegrate the chunk if it has empty free pages.
2094  * Each chunk is scanned in the reverse order to keep populated pages close to
2095  * the beginning of the chunk.
2096  *
2097  * CONTEXT:
2098  * pcpu_lock (can be dropped temporarily)
2099  *
2100  */
2101 static void pcpu_reclaim_populated(void)
2102 {
2103 	struct pcpu_chunk *chunk;
2104 	struct pcpu_block_md *block;
2105 	int freed_page_start, freed_page_end;
2106 	int i, end;
2107 	bool reintegrate;
2108 
2109 	lockdep_assert_held(&pcpu_lock);
2110 
2111 	/*
2112 	 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2113 	 * longer discoverable to allocations whom may populate pages.  The only
2114 	 * other accessor is the free path which only returns area back to the
2115 	 * allocator not touching the populated bitmap.
2116 	 */
2117 	while ((chunk = list_first_entry_or_null(
2118 			&pcpu_chunk_lists[pcpu_to_depopulate_slot],
2119 			struct pcpu_chunk, list))) {
2120 		WARN_ON(chunk->immutable);
2121 
2122 		/*
2123 		 * Scan chunk's pages in the reverse order to keep populated
2124 		 * pages close to the beginning of the chunk.
2125 		 */
2126 		freed_page_start = chunk->nr_pages;
2127 		freed_page_end = 0;
2128 		reintegrate = false;
2129 		for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2130 			/* no more work to do */
2131 			if (chunk->nr_empty_pop_pages == 0)
2132 				break;
2133 
2134 			/* reintegrate chunk to prevent atomic alloc failures */
2135 			if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
2136 				reintegrate = true;
2137 				break;
2138 			}
2139 
2140 			/*
2141 			 * If the page is empty and populated, start or
2142 			 * extend the (i, end) range.  If i == 0, decrease
2143 			 * i and perform the depopulation to cover the last
2144 			 * (first) page in the chunk.
2145 			 */
2146 			block = chunk->md_blocks + i;
2147 			if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2148 			    test_bit(i, chunk->populated)) {
2149 				if (end == -1)
2150 					end = i;
2151 				if (i > 0)
2152 					continue;
2153 				i--;
2154 			}
2155 
2156 			/* depopulate if there is an active range */
2157 			if (end == -1)
2158 				continue;
2159 
2160 			spin_unlock_irq(&pcpu_lock);
2161 			pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2162 			cond_resched();
2163 			spin_lock_irq(&pcpu_lock);
2164 
2165 			pcpu_chunk_depopulated(chunk, i + 1, end + 1);
2166 			freed_page_start = min(freed_page_start, i + 1);
2167 			freed_page_end = max(freed_page_end, end + 1);
2168 
2169 			/* reset the range and continue */
2170 			end = -1;
2171 		}
2172 
2173 		/* batch tlb flush per chunk to amortize cost */
2174 		if (freed_page_start < freed_page_end) {
2175 			spin_unlock_irq(&pcpu_lock);
2176 			pcpu_post_unmap_tlb_flush(chunk,
2177 						  freed_page_start,
2178 						  freed_page_end);
2179 			cond_resched();
2180 			spin_lock_irq(&pcpu_lock);
2181 		}
2182 
2183 		if (reintegrate || chunk->free_bytes == pcpu_unit_size)
2184 			pcpu_reintegrate_chunk(chunk);
2185 		else
2186 			list_move_tail(&chunk->list,
2187 				       &pcpu_chunk_lists[pcpu_sidelined_slot]);
2188 	}
2189 }
2190 
2191 /**
2192  * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2193  * @work: unused
2194  *
2195  * For each chunk type, manage the number of fully free chunks and the number of
2196  * populated pages.  An important thing to consider is when pages are freed and
2197  * how they contribute to the global counts.
2198  */
2199 static void pcpu_balance_workfn(struct work_struct *work)
2200 {
2201 	/*
2202 	 * pcpu_balance_free() is called twice because the first time we may
2203 	 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2204 	 * to grow other chunks.  This then gives pcpu_reclaim_populated() time
2205 	 * to move fully free chunks to the active list to be freed if
2206 	 * appropriate.
2207 	 */
2208 	mutex_lock(&pcpu_alloc_mutex);
2209 	spin_lock_irq(&pcpu_lock);
2210 
2211 	pcpu_balance_free(false);
2212 	pcpu_reclaim_populated();
2213 	pcpu_balance_populated();
2214 	pcpu_balance_free(true);
2215 
2216 	spin_unlock_irq(&pcpu_lock);
2217 	mutex_unlock(&pcpu_alloc_mutex);
2218 }
2219 
2220 /**
2221  * free_percpu - free percpu area
2222  * @ptr: pointer to area to free
2223  *
2224  * Free percpu area @ptr.
2225  *
2226  * CONTEXT:
2227  * Can be called from atomic context.
2228  */
2229 void free_percpu(void __percpu *ptr)
2230 {
2231 	void *addr;
2232 	struct pcpu_chunk *chunk;
2233 	unsigned long flags;
2234 	int size, off;
2235 	bool need_balance = false;
2236 
2237 	if (!ptr)
2238 		return;
2239 
2240 	kmemleak_free_percpu(ptr);
2241 
2242 	addr = __pcpu_ptr_to_addr(ptr);
2243 	chunk = pcpu_chunk_addr_search(addr);
2244 	off = addr - chunk->base_addr;
2245 
2246 	spin_lock_irqsave(&pcpu_lock, flags);
2247 	size = pcpu_free_area(chunk, off);
2248 
2249 	pcpu_alloc_tag_free_hook(chunk, off, size);
2250 
2251 	pcpu_memcg_free_hook(chunk, off, size);
2252 
2253 	/*
2254 	 * If there are more than one fully free chunks, wake up grim reaper.
2255 	 * If the chunk is isolated, it may be in the process of being
2256 	 * reclaimed.  Let reclaim manage cleaning up of that chunk.
2257 	 */
2258 	if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
2259 		struct pcpu_chunk *pos;
2260 
2261 		list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
2262 			if (pos != chunk) {
2263 				need_balance = true;
2264 				break;
2265 			}
2266 	} else if (pcpu_should_reclaim_chunk(chunk)) {
2267 		pcpu_isolate_chunk(chunk);
2268 		need_balance = true;
2269 	}
2270 
2271 	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2272 
2273 	spin_unlock_irqrestore(&pcpu_lock, flags);
2274 
2275 	if (need_balance)
2276 		pcpu_schedule_balance_work();
2277 }
2278 EXPORT_SYMBOL_GPL(free_percpu);
2279 
2280 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2281 {
2282 #ifdef CONFIG_SMP
2283 	const size_t static_size = __per_cpu_end - __per_cpu_start;
2284 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2285 	unsigned int cpu;
2286 
2287 	for_each_possible_cpu(cpu) {
2288 		void *start = per_cpu_ptr(base, cpu);
2289 		void *va = (void *)addr;
2290 
2291 		if (va >= start && va < start + static_size) {
2292 			if (can_addr) {
2293 				*can_addr = (unsigned long) (va - start);
2294 				*can_addr += (unsigned long)
2295 					per_cpu_ptr(base, get_boot_cpu_id());
2296 			}
2297 			return true;
2298 		}
2299 	}
2300 #endif
2301 	/* on UP, can't distinguish from other static vars, always false */
2302 	return false;
2303 }
2304 
2305 /**
2306  * is_kernel_percpu_address - test whether address is from static percpu area
2307  * @addr: address to test
2308  *
2309  * Test whether @addr belongs to in-kernel static percpu area.  Module
2310  * static percpu areas are not considered.  For those, use
2311  * is_module_percpu_address().
2312  *
2313  * RETURNS:
2314  * %true if @addr is from in-kernel static percpu area, %false otherwise.
2315  */
2316 bool is_kernel_percpu_address(unsigned long addr)
2317 {
2318 	return __is_kernel_percpu_address(addr, NULL);
2319 }
2320 
2321 /**
2322  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2323  * @addr: the address to be converted to physical address
2324  *
2325  * Given @addr which is dereferenceable address obtained via one of
2326  * percpu access macros, this function translates it into its physical
2327  * address.  The caller is responsible for ensuring @addr stays valid
2328  * until this function finishes.
2329  *
2330  * percpu allocator has special setup for the first chunk, which currently
2331  * supports either embedding in linear address space or vmalloc mapping,
2332  * and, from the second one, the backing allocator (currently either vm or
2333  * km) provides translation.
2334  *
2335  * The addr can be translated simply without checking if it falls into the
2336  * first chunk. But the current code reflects better how percpu allocator
2337  * actually works, and the verification can discover both bugs in percpu
2338  * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2339  * code.
2340  *
2341  * RETURNS:
2342  * The physical address for @addr.
2343  */
2344 phys_addr_t per_cpu_ptr_to_phys(void *addr)
2345 {
2346 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2347 	bool in_first_chunk = false;
2348 	unsigned long first_low, first_high;
2349 	unsigned int cpu;
2350 
2351 	/*
2352 	 * The following test on unit_low/high isn't strictly
2353 	 * necessary but will speed up lookups of addresses which
2354 	 * aren't in the first chunk.
2355 	 *
2356 	 * The address check is against full chunk sizes.  pcpu_base_addr
2357 	 * points to the beginning of the first chunk including the
2358 	 * static region.  Assumes good intent as the first chunk may
2359 	 * not be full (ie. < pcpu_unit_pages in size).
2360 	 */
2361 	first_low = (unsigned long)pcpu_base_addr +
2362 		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2363 	first_high = (unsigned long)pcpu_base_addr +
2364 		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2365 	if ((unsigned long)addr >= first_low &&
2366 	    (unsigned long)addr < first_high) {
2367 		for_each_possible_cpu(cpu) {
2368 			void *start = per_cpu_ptr(base, cpu);
2369 
2370 			if (addr >= start && addr < start + pcpu_unit_size) {
2371 				in_first_chunk = true;
2372 				break;
2373 			}
2374 		}
2375 	}
2376 
2377 	if (in_first_chunk) {
2378 		if (!is_vmalloc_addr(addr))
2379 			return __pa(addr);
2380 		else
2381 			return page_to_phys(vmalloc_to_page(addr)) +
2382 			       offset_in_page(addr);
2383 	} else
2384 		return page_to_phys(pcpu_addr_to_page(addr)) +
2385 		       offset_in_page(addr);
2386 }
2387 
2388 /**
2389  * pcpu_alloc_alloc_info - allocate percpu allocation info
2390  * @nr_groups: the number of groups
2391  * @nr_units: the number of units
2392  *
2393  * Allocate ai which is large enough for @nr_groups groups containing
2394  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
2395  * cpu_map array which is long enough for @nr_units and filled with
2396  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
2397  * pointer of other groups.
2398  *
2399  * RETURNS:
2400  * Pointer to the allocated pcpu_alloc_info on success, NULL on
2401  * failure.
2402  */
2403 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2404 						      int nr_units)
2405 {
2406 	struct pcpu_alloc_info *ai;
2407 	size_t base_size, ai_size;
2408 	void *ptr;
2409 	int unit;
2410 
2411 	base_size = ALIGN(struct_size(ai, groups, nr_groups),
2412 			  __alignof__(ai->groups[0].cpu_map[0]));
2413 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2414 
2415 	ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2416 	if (!ptr)
2417 		return NULL;
2418 	ai = ptr;
2419 	ptr += base_size;
2420 
2421 	ai->groups[0].cpu_map = ptr;
2422 
2423 	for (unit = 0; unit < nr_units; unit++)
2424 		ai->groups[0].cpu_map[unit] = NR_CPUS;
2425 
2426 	ai->nr_groups = nr_groups;
2427 	ai->__ai_size = PFN_ALIGN(ai_size);
2428 
2429 	return ai;
2430 }
2431 
2432 /**
2433  * pcpu_free_alloc_info - free percpu allocation info
2434  * @ai: pcpu_alloc_info to free
2435  *
2436  * Free @ai which was allocated by pcpu_alloc_alloc_info().
2437  */
2438 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2439 {
2440 	memblock_free(ai, ai->__ai_size);
2441 }
2442 
2443 /**
2444  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2445  * @lvl: loglevel
2446  * @ai: allocation info to dump
2447  *
2448  * Print out information about @ai using loglevel @lvl.
2449  */
2450 static void pcpu_dump_alloc_info(const char *lvl,
2451 				 const struct pcpu_alloc_info *ai)
2452 {
2453 	int group_width = 1, cpu_width = 1, width;
2454 	char empty_str[] = "--------";
2455 	int alloc = 0, alloc_end = 0;
2456 	int group, v;
2457 	int upa, apl;	/* units per alloc, allocs per line */
2458 
2459 	v = ai->nr_groups;
2460 	while (v /= 10)
2461 		group_width++;
2462 
2463 	v = num_possible_cpus();
2464 	while (v /= 10)
2465 		cpu_width++;
2466 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2467 
2468 	upa = ai->alloc_size / ai->unit_size;
2469 	width = upa * (cpu_width + 1) + group_width + 3;
2470 	apl = rounddown_pow_of_two(max(60 / width, 1));
2471 
2472 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2473 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2474 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2475 
2476 	for (group = 0; group < ai->nr_groups; group++) {
2477 		const struct pcpu_group_info *gi = &ai->groups[group];
2478 		int unit = 0, unit_end = 0;
2479 
2480 		BUG_ON(gi->nr_units % upa);
2481 		for (alloc_end += gi->nr_units / upa;
2482 		     alloc < alloc_end; alloc++) {
2483 			if (!(alloc % apl)) {
2484 				pr_cont("\n");
2485 				printk("%spcpu-alloc: ", lvl);
2486 			}
2487 			pr_cont("[%0*d] ", group_width, group);
2488 
2489 			for (unit_end += upa; unit < unit_end; unit++)
2490 				if (gi->cpu_map[unit] != NR_CPUS)
2491 					pr_cont("%0*d ",
2492 						cpu_width, gi->cpu_map[unit]);
2493 				else
2494 					pr_cont("%s ", empty_str);
2495 		}
2496 	}
2497 	pr_cont("\n");
2498 }
2499 
2500 /**
2501  * pcpu_setup_first_chunk - initialize the first percpu chunk
2502  * @ai: pcpu_alloc_info describing how to percpu area is shaped
2503  * @base_addr: mapped address
2504  *
2505  * Initialize the first percpu chunk which contains the kernel static
2506  * percpu area.  This function is to be called from arch percpu area
2507  * setup path.
2508  *
2509  * @ai contains all information necessary to initialize the first
2510  * chunk and prime the dynamic percpu allocator.
2511  *
2512  * @ai->static_size is the size of static percpu area.
2513  *
2514  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2515  * reserve after the static area in the first chunk.  This reserves
2516  * the first chunk such that it's available only through reserved
2517  * percpu allocation.  This is primarily used to serve module percpu
2518  * static areas on architectures where the addressing model has
2519  * limited offset range for symbol relocations to guarantee module
2520  * percpu symbols fall inside the relocatable range.
2521  *
2522  * @ai->dyn_size determines the number of bytes available for dynamic
2523  * allocation in the first chunk.  The area between @ai->static_size +
2524  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2525  *
2526  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2527  * and equal to or larger than @ai->static_size + @ai->reserved_size +
2528  * @ai->dyn_size.
2529  *
2530  * @ai->atom_size is the allocation atom size and used as alignment
2531  * for vm areas.
2532  *
2533  * @ai->alloc_size is the allocation size and always multiple of
2534  * @ai->atom_size.  This is larger than @ai->atom_size if
2535  * @ai->unit_size is larger than @ai->atom_size.
2536  *
2537  * @ai->nr_groups and @ai->groups describe virtual memory layout of
2538  * percpu areas.  Units which should be colocated are put into the
2539  * same group.  Dynamic VM areas will be allocated according to these
2540  * groupings.  If @ai->nr_groups is zero, a single group containing
2541  * all units is assumed.
2542  *
2543  * The caller should have mapped the first chunk at @base_addr and
2544  * copied static data to each unit.
2545  *
2546  * The first chunk will always contain a static and a dynamic region.
2547  * However, the static region is not managed by any chunk.  If the first
2548  * chunk also contains a reserved region, it is served by two chunks -
2549  * one for the reserved region and one for the dynamic region.  They
2550  * share the same vm, but use offset regions in the area allocation map.
2551  * The chunk serving the dynamic region is circulated in the chunk slots
2552  * and available for dynamic allocation like any other chunk.
2553  */
2554 void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2555 				   void *base_addr)
2556 {
2557 	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2558 	size_t static_size, dyn_size;
2559 	unsigned long *group_offsets;
2560 	size_t *group_sizes;
2561 	unsigned long *unit_off;
2562 	unsigned int cpu;
2563 	int *unit_map;
2564 	int group, unit, i;
2565 	unsigned long tmp_addr;
2566 	size_t alloc_size;
2567 
2568 #define PCPU_SETUP_BUG_ON(cond)	do {					\
2569 	if (unlikely(cond)) {						\
2570 		pr_emerg("failed to initialize, %s\n", #cond);		\
2571 		pr_emerg("cpu_possible_mask=%*pb\n",			\
2572 			 cpumask_pr_args(cpu_possible_mask));		\
2573 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
2574 		BUG();							\
2575 	}								\
2576 } while (0)
2577 
2578 	/* sanity checks */
2579 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2580 #ifdef CONFIG_SMP
2581 	PCPU_SETUP_BUG_ON(!ai->static_size);
2582 	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2583 #endif
2584 	PCPU_SETUP_BUG_ON(!base_addr);
2585 	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2586 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2587 	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2588 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2589 	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2590 	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2591 	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2592 	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2593 			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2594 	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2595 
2596 	/* process group information and build config tables accordingly */
2597 	alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2598 	group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2599 	if (!group_offsets)
2600 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2601 		      alloc_size);
2602 
2603 	alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2604 	group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2605 	if (!group_sizes)
2606 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2607 		      alloc_size);
2608 
2609 	alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2610 	unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2611 	if (!unit_map)
2612 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2613 		      alloc_size);
2614 
2615 	alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2616 	unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2617 	if (!unit_off)
2618 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2619 		      alloc_size);
2620 
2621 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2622 		unit_map[cpu] = UINT_MAX;
2623 
2624 	pcpu_low_unit_cpu = NR_CPUS;
2625 	pcpu_high_unit_cpu = NR_CPUS;
2626 
2627 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2628 		const struct pcpu_group_info *gi = &ai->groups[group];
2629 
2630 		group_offsets[group] = gi->base_offset;
2631 		group_sizes[group] = gi->nr_units * ai->unit_size;
2632 
2633 		for (i = 0; i < gi->nr_units; i++) {
2634 			cpu = gi->cpu_map[i];
2635 			if (cpu == NR_CPUS)
2636 				continue;
2637 
2638 			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2639 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2640 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2641 
2642 			unit_map[cpu] = unit + i;
2643 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2644 
2645 			/* determine low/high unit_cpu */
2646 			if (pcpu_low_unit_cpu == NR_CPUS ||
2647 			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2648 				pcpu_low_unit_cpu = cpu;
2649 			if (pcpu_high_unit_cpu == NR_CPUS ||
2650 			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2651 				pcpu_high_unit_cpu = cpu;
2652 		}
2653 	}
2654 	pcpu_nr_units = unit;
2655 
2656 	for_each_possible_cpu(cpu)
2657 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2658 
2659 	/* we're done parsing the input, undefine BUG macro and dump config */
2660 #undef PCPU_SETUP_BUG_ON
2661 	pcpu_dump_alloc_info(KERN_DEBUG, ai);
2662 
2663 	pcpu_nr_groups = ai->nr_groups;
2664 	pcpu_group_offsets = group_offsets;
2665 	pcpu_group_sizes = group_sizes;
2666 	pcpu_unit_map = unit_map;
2667 	pcpu_unit_offsets = unit_off;
2668 
2669 	/* determine basic parameters */
2670 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2671 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2672 	pcpu_atom_size = ai->atom_size;
2673 	pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
2674 					     BITS_TO_LONGS(pcpu_unit_pages));
2675 
2676 	pcpu_stats_save_ai(ai);
2677 
2678 	/*
2679 	 * Allocate chunk slots.  The slots after the active slots are:
2680 	 *   sidelined_slot - isolated, depopulated chunks
2681 	 *   free_slot - fully free chunks
2682 	 *   to_depopulate_slot - isolated, chunks to depopulate
2683 	 */
2684 	pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2685 	pcpu_free_slot = pcpu_sidelined_slot + 1;
2686 	pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2687 	pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
2688 	pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2689 					  sizeof(pcpu_chunk_lists[0]),
2690 					  SMP_CACHE_BYTES);
2691 	if (!pcpu_chunk_lists)
2692 		panic("%s: Failed to allocate %zu bytes\n", __func__,
2693 		      pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
2694 
2695 	for (i = 0; i < pcpu_nr_slots; i++)
2696 		INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
2697 
2698 	/*
2699 	 * The end of the static region needs to be aligned with the
2700 	 * minimum allocation size as this offsets the reserved and
2701 	 * dynamic region.  The first chunk ends page aligned by
2702 	 * expanding the dynamic region, therefore the dynamic region
2703 	 * can be shrunk to compensate while still staying above the
2704 	 * configured sizes.
2705 	 */
2706 	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2707 	dyn_size = ai->dyn_size - (static_size - ai->static_size);
2708 
2709 	/*
2710 	 * Initialize first chunk:
2711 	 * This chunk is broken up into 3 parts:
2712 	 *		< static | [reserved] | dynamic >
2713 	 * - static - there is no backing chunk because these allocations can
2714 	 *   never be freed.
2715 	 * - reserved (pcpu_reserved_chunk) - exists primarily to serve
2716 	 *   allocations from module load.
2717 	 * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first
2718 	 *   chunk.
2719 	 */
2720 	tmp_addr = (unsigned long)base_addr + static_size;
2721 	if (ai->reserved_size)
2722 		pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr,
2723 						ai->reserved_size);
2724 	tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
2725 	pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size);
2726 
2727 	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2728 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
2729 
2730 	/* include all regions of the first chunk */
2731 	pcpu_nr_populated += PFN_DOWN(size_sum);
2732 
2733 	pcpu_stats_chunk_alloc();
2734 	trace_percpu_create_chunk(base_addr);
2735 
2736 	/* we're done */
2737 	pcpu_base_addr = base_addr;
2738 }
2739 
2740 #ifdef CONFIG_SMP
2741 
2742 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2743 	[PCPU_FC_AUTO]	= "auto",
2744 	[PCPU_FC_EMBED]	= "embed",
2745 	[PCPU_FC_PAGE]	= "page",
2746 };
2747 
2748 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2749 
2750 static int __init percpu_alloc_setup(char *str)
2751 {
2752 	if (!str)
2753 		return -EINVAL;
2754 
2755 	if (0)
2756 		/* nada */;
2757 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2758 	else if (!strcmp(str, "embed"))
2759 		pcpu_chosen_fc = PCPU_FC_EMBED;
2760 #endif
2761 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2762 	else if (!strcmp(str, "page"))
2763 		pcpu_chosen_fc = PCPU_FC_PAGE;
2764 #endif
2765 	else
2766 		pr_warn("unknown allocator %s specified\n", str);
2767 
2768 	return 0;
2769 }
2770 early_param("percpu_alloc", percpu_alloc_setup);
2771 
2772 /*
2773  * pcpu_embed_first_chunk() is used by the generic percpu setup.
2774  * Build it if needed by the arch config or the generic setup is going
2775  * to be used.
2776  */
2777 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2778 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2779 #define BUILD_EMBED_FIRST_CHUNK
2780 #endif
2781 
2782 /* build pcpu_page_first_chunk() iff needed by the arch config */
2783 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2784 #define BUILD_PAGE_FIRST_CHUNK
2785 #endif
2786 
2787 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2788 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2789 /**
2790  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2791  * @reserved_size: the size of reserved percpu area in bytes
2792  * @dyn_size: minimum free size for dynamic allocation in bytes
2793  * @atom_size: allocation atom size
2794  * @cpu_distance_fn: callback to determine distance between cpus, optional
2795  *
2796  * This function determines grouping of units, their mappings to cpus
2797  * and other parameters considering needed percpu size, allocation
2798  * atom size and distances between CPUs.
2799  *
2800  * Groups are always multiples of atom size and CPUs which are of
2801  * LOCAL_DISTANCE both ways are grouped together and share space for
2802  * units in the same group.  The returned configuration is guaranteed
2803  * to have CPUs on different nodes on different groups and >=75% usage
2804  * of allocated virtual address space.
2805  *
2806  * RETURNS:
2807  * On success, pointer to the new allocation_info is returned.  On
2808  * failure, ERR_PTR value is returned.
2809  */
2810 static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
2811 				size_t reserved_size, size_t dyn_size,
2812 				size_t atom_size,
2813 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2814 {
2815 	static int group_map[NR_CPUS] __initdata;
2816 	static int group_cnt[NR_CPUS] __initdata;
2817 	static struct cpumask mask __initdata;
2818 	const size_t static_size = __per_cpu_end - __per_cpu_start;
2819 	int nr_groups = 1, nr_units = 0;
2820 	size_t size_sum, min_unit_size, alloc_size;
2821 	int upa, max_upa, best_upa;	/* units_per_alloc */
2822 	int last_allocs, group, unit;
2823 	unsigned int cpu, tcpu;
2824 	struct pcpu_alloc_info *ai;
2825 	unsigned int *cpu_map;
2826 
2827 	/* this function may be called multiple times */
2828 	memset(group_map, 0, sizeof(group_map));
2829 	memset(group_cnt, 0, sizeof(group_cnt));
2830 	cpumask_clear(&mask);
2831 
2832 	/* calculate size_sum and ensure dyn_size is enough for early alloc */
2833 	size_sum = PFN_ALIGN(static_size + reserved_size +
2834 			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2835 	dyn_size = size_sum - static_size - reserved_size;
2836 
2837 	/*
2838 	 * Determine min_unit_size, alloc_size and max_upa such that
2839 	 * alloc_size is multiple of atom_size and is the smallest
2840 	 * which can accommodate 4k aligned segments which are equal to
2841 	 * or larger than min_unit_size.
2842 	 */
2843 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2844 
2845 	/* determine the maximum # of units that can fit in an allocation */
2846 	alloc_size = roundup(min_unit_size, atom_size);
2847 	upa = alloc_size / min_unit_size;
2848 	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2849 		upa--;
2850 	max_upa = upa;
2851 
2852 	cpumask_copy(&mask, cpu_possible_mask);
2853 
2854 	/* group cpus according to their proximity */
2855 	for (group = 0; !cpumask_empty(&mask); group++) {
2856 		/* pop the group's first cpu */
2857 		cpu = cpumask_first(&mask);
2858 		group_map[cpu] = group;
2859 		group_cnt[group]++;
2860 		cpumask_clear_cpu(cpu, &mask);
2861 
2862 		for_each_cpu(tcpu, &mask) {
2863 			if (!cpu_distance_fn ||
2864 			    (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2865 			     cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2866 				group_map[tcpu] = group;
2867 				group_cnt[group]++;
2868 				cpumask_clear_cpu(tcpu, &mask);
2869 			}
2870 		}
2871 	}
2872 	nr_groups = group;
2873 
2874 	/*
2875 	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2876 	 * Expand the unit_size until we use >= 75% of the units allocated.
2877 	 * Related to atom_size, which could be much larger than the unit_size.
2878 	 */
2879 	last_allocs = INT_MAX;
2880 	best_upa = 0;
2881 	for (upa = max_upa; upa; upa--) {
2882 		int allocs = 0, wasted = 0;
2883 
2884 		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2885 			continue;
2886 
2887 		for (group = 0; group < nr_groups; group++) {
2888 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2889 			allocs += this_allocs;
2890 			wasted += this_allocs * upa - group_cnt[group];
2891 		}
2892 
2893 		/*
2894 		 * Don't accept if wastage is over 1/3.  The
2895 		 * greater-than comparison ensures upa==1 always
2896 		 * passes the following check.
2897 		 */
2898 		if (wasted > num_possible_cpus() / 3)
2899 			continue;
2900 
2901 		/* and then don't consume more memory */
2902 		if (allocs > last_allocs)
2903 			break;
2904 		last_allocs = allocs;
2905 		best_upa = upa;
2906 	}
2907 	BUG_ON(!best_upa);
2908 	upa = best_upa;
2909 
2910 	/* allocate and fill alloc_info */
2911 	for (group = 0; group < nr_groups; group++)
2912 		nr_units += roundup(group_cnt[group], upa);
2913 
2914 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2915 	if (!ai)
2916 		return ERR_PTR(-ENOMEM);
2917 	cpu_map = ai->groups[0].cpu_map;
2918 
2919 	for (group = 0; group < nr_groups; group++) {
2920 		ai->groups[group].cpu_map = cpu_map;
2921 		cpu_map += roundup(group_cnt[group], upa);
2922 	}
2923 
2924 	ai->static_size = static_size;
2925 	ai->reserved_size = reserved_size;
2926 	ai->dyn_size = dyn_size;
2927 	ai->unit_size = alloc_size / upa;
2928 	ai->atom_size = atom_size;
2929 	ai->alloc_size = alloc_size;
2930 
2931 	for (group = 0, unit = 0; group < nr_groups; group++) {
2932 		struct pcpu_group_info *gi = &ai->groups[group];
2933 
2934 		/*
2935 		 * Initialize base_offset as if all groups are located
2936 		 * back-to-back.  The caller should update this to
2937 		 * reflect actual allocation.
2938 		 */
2939 		gi->base_offset = unit * ai->unit_size;
2940 
2941 		for_each_possible_cpu(cpu)
2942 			if (group_map[cpu] == group)
2943 				gi->cpu_map[gi->nr_units++] = cpu;
2944 		gi->nr_units = roundup(gi->nr_units, upa);
2945 		unit += gi->nr_units;
2946 	}
2947 	BUG_ON(unit != nr_units);
2948 
2949 	return ai;
2950 }
2951 
2952 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
2953 				   pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
2954 {
2955 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
2956 #ifdef CONFIG_NUMA
2957 	int node = NUMA_NO_NODE;
2958 	void *ptr;
2959 
2960 	if (cpu_to_nd_fn)
2961 		node = cpu_to_nd_fn(cpu);
2962 
2963 	if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
2964 		ptr = memblock_alloc_from(size, align, goal);
2965 		pr_info("cpu %d has no node %d or node-local memory\n",
2966 			cpu, node);
2967 		pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
2968 			 cpu, size, (u64)__pa(ptr));
2969 	} else {
2970 		ptr = memblock_alloc_try_nid(size, align, goal,
2971 					     MEMBLOCK_ALLOC_ACCESSIBLE,
2972 					     node);
2973 
2974 		pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n",
2975 			 cpu, size, node, (u64)__pa(ptr));
2976 	}
2977 	return ptr;
2978 #else
2979 	return memblock_alloc_from(size, align, goal);
2980 #endif
2981 }
2982 
2983 static void __init pcpu_fc_free(void *ptr, size_t size)
2984 {
2985 	memblock_free(ptr, size);
2986 }
2987 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2988 
2989 #if defined(BUILD_EMBED_FIRST_CHUNK)
2990 /**
2991  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2992  * @reserved_size: the size of reserved percpu area in bytes
2993  * @dyn_size: minimum free size for dynamic allocation in bytes
2994  * @atom_size: allocation atom size
2995  * @cpu_distance_fn: callback to determine distance between cpus, optional
2996  * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
2997  *
2998  * This is a helper to ease setting up embedded first percpu chunk and
2999  * can be called where pcpu_setup_first_chunk() is expected.
3000  *
3001  * If this function is used to setup the first chunk, it is allocated
3002  * by calling pcpu_fc_alloc and used as-is without being mapped into
3003  * vmalloc area.  Allocations are always whole multiples of @atom_size
3004  * aligned to @atom_size.
3005  *
3006  * This enables the first chunk to piggy back on the linear physical
3007  * mapping which often uses larger page size.  Please note that this
3008  * can result in very sparse cpu->unit mapping on NUMA machines thus
3009  * requiring large vmalloc address space.  Don't use this allocator if
3010  * vmalloc space is not orders of magnitude larger than distances
3011  * between node memory addresses (ie. 32bit NUMA machines).
3012  *
3013  * @dyn_size specifies the minimum dynamic area size.
3014  *
3015  * If the needed size is smaller than the minimum or specified unit
3016  * size, the leftover is returned using pcpu_fc_free.
3017  *
3018  * RETURNS:
3019  * 0 on success, -errno on failure.
3020  */
3021 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
3022 				  size_t atom_size,
3023 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
3024 				  pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
3025 {
3026 	void *base = (void *)ULONG_MAX;
3027 	void **areas = NULL;
3028 	struct pcpu_alloc_info *ai;
3029 	size_t size_sum, areas_size;
3030 	unsigned long max_distance;
3031 	int group, i, highest_group, rc = 0;
3032 
3033 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3034 				   cpu_distance_fn);
3035 	if (IS_ERR(ai))
3036 		return PTR_ERR(ai);
3037 
3038 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3039 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
3040 
3041 	areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
3042 	if (!areas) {
3043 		rc = -ENOMEM;
3044 		goto out_free;
3045 	}
3046 
3047 	/* allocate, copy and determine base address & max_distance */
3048 	highest_group = 0;
3049 	for (group = 0; group < ai->nr_groups; group++) {
3050 		struct pcpu_group_info *gi = &ai->groups[group];
3051 		unsigned int cpu = NR_CPUS;
3052 		void *ptr;
3053 
3054 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3055 			cpu = gi->cpu_map[i];
3056 		BUG_ON(cpu == NR_CPUS);
3057 
3058 		/* allocate space for the whole group */
3059 		ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
3060 		if (!ptr) {
3061 			rc = -ENOMEM;
3062 			goto out_free_areas;
3063 		}
3064 		/* kmemleak tracks the percpu allocations separately */
3065 		kmemleak_ignore_phys(__pa(ptr));
3066 		areas[group] = ptr;
3067 
3068 		base = min(ptr, base);
3069 		if (ptr > areas[highest_group])
3070 			highest_group = group;
3071 	}
3072 	max_distance = areas[highest_group] - base;
3073 	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3074 
3075 	/* warn if maximum distance is further than 75% of vmalloc space */
3076 	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
3077 		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3078 				max_distance, VMALLOC_TOTAL);
3079 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3080 		/* and fail if we have fallback */
3081 		rc = -EINVAL;
3082 		goto out_free_areas;
3083 #endif
3084 	}
3085 
3086 	/*
3087 	 * Copy data and free unused parts.  This should happen after all
3088 	 * allocations are complete; otherwise, we may end up with
3089 	 * overlapping groups.
3090 	 */
3091 	for (group = 0; group < ai->nr_groups; group++) {
3092 		struct pcpu_group_info *gi = &ai->groups[group];
3093 		void *ptr = areas[group];
3094 
3095 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3096 			if (gi->cpu_map[i] == NR_CPUS) {
3097 				/* unused unit, free whole */
3098 				pcpu_fc_free(ptr, ai->unit_size);
3099 				continue;
3100 			}
3101 			/* copy and return the unused part */
3102 			memcpy(ptr, __per_cpu_load, ai->static_size);
3103 			pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
3104 		}
3105 	}
3106 
3107 	/* base address is now known, determine group base offsets */
3108 	for (group = 0; group < ai->nr_groups; group++) {
3109 		ai->groups[group].base_offset = areas[group] - base;
3110 	}
3111 
3112 	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3113 		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3114 		ai->dyn_size, ai->unit_size);
3115 
3116 	pcpu_setup_first_chunk(ai, base);
3117 	goto out_free;
3118 
3119 out_free_areas:
3120 	for (group = 0; group < ai->nr_groups; group++)
3121 		if (areas[group])
3122 			pcpu_fc_free(areas[group],
3123 				ai->groups[group].nr_units * ai->unit_size);
3124 out_free:
3125 	pcpu_free_alloc_info(ai);
3126 	if (areas)
3127 		memblock_free(areas, areas_size);
3128 	return rc;
3129 }
3130 #endif /* BUILD_EMBED_FIRST_CHUNK */
3131 
3132 #ifdef BUILD_PAGE_FIRST_CHUNK
3133 #include <asm/pgalloc.h>
3134 
3135 #ifndef P4D_TABLE_SIZE
3136 #define P4D_TABLE_SIZE PAGE_SIZE
3137 #endif
3138 
3139 #ifndef PUD_TABLE_SIZE
3140 #define PUD_TABLE_SIZE PAGE_SIZE
3141 #endif
3142 
3143 #ifndef PMD_TABLE_SIZE
3144 #define PMD_TABLE_SIZE PAGE_SIZE
3145 #endif
3146 
3147 #ifndef PTE_TABLE_SIZE
3148 #define PTE_TABLE_SIZE PAGE_SIZE
3149 #endif
3150 void __init __weak pcpu_populate_pte(unsigned long addr)
3151 {
3152 	pgd_t *pgd = pgd_offset_k(addr);
3153 	p4d_t *p4d;
3154 	pud_t *pud;
3155 	pmd_t *pmd;
3156 
3157 	if (pgd_none(*pgd)) {
3158 		p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
3159 		if (!p4d)
3160 			goto err_alloc;
3161 		pgd_populate(&init_mm, pgd, p4d);
3162 	}
3163 
3164 	p4d = p4d_offset(pgd, addr);
3165 	if (p4d_none(*p4d)) {
3166 		pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
3167 		if (!pud)
3168 			goto err_alloc;
3169 		p4d_populate(&init_mm, p4d, pud);
3170 	}
3171 
3172 	pud = pud_offset(p4d, addr);
3173 	if (pud_none(*pud)) {
3174 		pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
3175 		if (!pmd)
3176 			goto err_alloc;
3177 		pud_populate(&init_mm, pud, pmd);
3178 	}
3179 
3180 	pmd = pmd_offset(pud, addr);
3181 	if (!pmd_present(*pmd)) {
3182 		pte_t *new;
3183 
3184 		new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
3185 		if (!new)
3186 			goto err_alloc;
3187 		pmd_populate_kernel(&init_mm, pmd, new);
3188 	}
3189 
3190 	return;
3191 
3192 err_alloc:
3193 	panic("%s: Failed to allocate memory\n", __func__);
3194 }
3195 
3196 /**
3197  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3198  * @reserved_size: the size of reserved percpu area in bytes
3199  * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
3200  *
3201  * This is a helper to ease setting up page-remapped first percpu
3202  * chunk and can be called where pcpu_setup_first_chunk() is expected.
3203  *
3204  * This is the basic allocator.  Static percpu area is allocated
3205  * page-by-page into vmalloc area.
3206  *
3207  * RETURNS:
3208  * 0 on success, -errno on failure.
3209  */
3210 int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
3211 {
3212 	static struct vm_struct vm;
3213 	struct pcpu_alloc_info *ai;
3214 	char psize_str[16];
3215 	int unit_pages;
3216 	size_t pages_size;
3217 	struct page **pages;
3218 	int unit, i, j, rc = 0;
3219 	int upa;
3220 	int nr_g0_units;
3221 
3222 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
3223 
3224 	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3225 	if (IS_ERR(ai))
3226 		return PTR_ERR(ai);
3227 	BUG_ON(ai->nr_groups != 1);
3228 	upa = ai->alloc_size/ai->unit_size;
3229 	nr_g0_units = roundup(num_possible_cpus(), upa);
3230 	if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
3231 		pcpu_free_alloc_info(ai);
3232 		return -EINVAL;
3233 	}
3234 
3235 	unit_pages = ai->unit_size >> PAGE_SHIFT;
3236 
3237 	/* unaligned allocations can't be freed, round up to page size */
3238 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3239 			       sizeof(pages[0]));
3240 	pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
3241 	if (!pages)
3242 		panic("%s: Failed to allocate %zu bytes\n", __func__,
3243 		      pages_size);
3244 
3245 	/* allocate pages */
3246 	j = 0;
3247 	for (unit = 0; unit < num_possible_cpus(); unit++) {
3248 		unsigned int cpu = ai->groups[0].cpu_map[unit];
3249 		for (i = 0; i < unit_pages; i++) {
3250 			void *ptr;
3251 
3252 			ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
3253 			if (!ptr) {
3254 				pr_warn("failed to allocate %s page for cpu%u\n",
3255 						psize_str, cpu);
3256 				goto enomem;
3257 			}
3258 			/* kmemleak tracks the percpu allocations separately */
3259 			kmemleak_ignore_phys(__pa(ptr));
3260 			pages[j++] = virt_to_page(ptr);
3261 		}
3262 	}
3263 
3264 	/* allocate vm area, map the pages and copy static data */
3265 	vm.flags = VM_ALLOC;
3266 	vm.size = num_possible_cpus() * ai->unit_size;
3267 	vm_area_register_early(&vm, PAGE_SIZE);
3268 
3269 	for (unit = 0; unit < num_possible_cpus(); unit++) {
3270 		unsigned long unit_addr =
3271 			(unsigned long)vm.addr + unit * ai->unit_size;
3272 
3273 		for (i = 0; i < unit_pages; i++)
3274 			pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
3275 
3276 		/* pte already populated, the following shouldn't fail */
3277 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3278 				      unit_pages);
3279 		if (rc < 0)
3280 			panic("failed to map percpu area, err=%d\n", rc);
3281 
3282 		flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
3283 
3284 		/* copy static data */
3285 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3286 	}
3287 
3288 	/* we're ready, commit */
3289 	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3290 		unit_pages, psize_str, ai->static_size,
3291 		ai->reserved_size, ai->dyn_size);
3292 
3293 	pcpu_setup_first_chunk(ai, vm.addr);
3294 	goto out_free_ar;
3295 
3296 enomem:
3297 	while (--j >= 0)
3298 		pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
3299 	rc = -ENOMEM;
3300 out_free_ar:
3301 	memblock_free(pages, pages_size);
3302 	pcpu_free_alloc_info(ai);
3303 	return rc;
3304 }
3305 #endif /* BUILD_PAGE_FIRST_CHUNK */
3306 
3307 #ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
3308 /*
3309  * Generic SMP percpu area setup.
3310  *
3311  * The embedding helper is used because its behavior closely resembles
3312  * the original non-dynamic generic percpu area setup.  This is
3313  * important because many archs have addressing restrictions and might
3314  * fail if the percpu area is located far away from the previous
3315  * location.  As an added bonus, in non-NUMA cases, embedding is
3316  * generally a good idea TLB-wise because percpu area can piggy back
3317  * on the physical linear memory mapping which uses large page
3318  * mappings on applicable archs.
3319  */
3320 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3321 EXPORT_SYMBOL(__per_cpu_offset);
3322 
3323 void __init setup_per_cpu_areas(void)
3324 {
3325 	unsigned long delta;
3326 	unsigned int cpu;
3327 	int rc;
3328 
3329 	/*
3330 	 * Always reserve area for module percpu variables.  That's
3331 	 * what the legacy allocator did.
3332 	 */
3333 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE,
3334 				    PAGE_SIZE, NULL, NULL);
3335 	if (rc < 0)
3336 		panic("Failed to initialize percpu areas.");
3337 
3338 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3339 	for_each_possible_cpu(cpu)
3340 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3341 }
3342 #endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3343 
3344 #else	/* CONFIG_SMP */
3345 
3346 /*
3347  * UP percpu area setup.
3348  *
3349  * UP always uses km-based percpu allocator with identity mapping.
3350  * Static percpu variables are indistinguishable from the usual static
3351  * variables and don't require any special preparation.
3352  */
3353 void __init setup_per_cpu_areas(void)
3354 {
3355 	const size_t unit_size =
3356 		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3357 					 PERCPU_DYNAMIC_RESERVE));
3358 	struct pcpu_alloc_info *ai;
3359 	void *fc;
3360 
3361 	ai = pcpu_alloc_alloc_info(1, 1);
3362 	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3363 	if (!ai || !fc)
3364 		panic("Failed to allocate memory for percpu areas.");
3365 	/* kmemleak tracks the percpu allocations separately */
3366 	kmemleak_ignore_phys(__pa(fc));
3367 
3368 	ai->dyn_size = unit_size;
3369 	ai->unit_size = unit_size;
3370 	ai->atom_size = unit_size;
3371 	ai->alloc_size = unit_size;
3372 	ai->groups[0].nr_units = 1;
3373 	ai->groups[0].cpu_map[0] = 0;
3374 
3375 	pcpu_setup_first_chunk(ai, fc);
3376 	pcpu_free_alloc_info(ai);
3377 }
3378 
3379 #endif	/* CONFIG_SMP */
3380 
3381 /*
3382  * pcpu_nr_pages - calculate total number of populated backing pages
3383  *
3384  * This reflects the number of pages populated to back chunks.  Metadata is
3385  * excluded in the number exposed in meminfo as the number of backing pages
3386  * scales with the number of cpus and can quickly outweigh the memory used for
3387  * metadata.  It also keeps this calculation nice and simple.
3388  *
3389  * RETURNS:
3390  * Total number of populated backing pages in use by the allocator.
3391  */
3392 unsigned long pcpu_nr_pages(void)
3393 {
3394 	return pcpu_nr_populated * pcpu_nr_units;
3395 }
3396 
3397 /*
3398  * Percpu allocator is initialized early during boot when neither slab or
3399  * workqueue is available.  Plug async management until everything is up
3400  * and running.
3401  */
3402 static int __init percpu_enable_async(void)
3403 {
3404 	pcpu_async_enabled = true;
3405 	return 0;
3406 }
3407 subsys_initcall(percpu_enable_async);
3408