xref: /linux/mm/percpu.c (revision 8c4bfc6e8801616ab2e01c38140b2159b388d2ff)
1fbf59bc9STejun Heo /*
2fbf59bc9STejun Heo  * linux/mm/percpu.c - percpu memory allocator
3fbf59bc9STejun Heo  *
4fbf59bc9STejun Heo  * Copyright (C) 2009		SUSE Linux Products GmbH
5fbf59bc9STejun Heo  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6fbf59bc9STejun Heo  *
7fbf59bc9STejun Heo  * This file is released under the GPLv2.
8fbf59bc9STejun Heo  *
9fbf59bc9STejun Heo  * This is percpu allocator which can handle both static and dynamic
10fbf59bc9STejun Heo  * areas.  Percpu areas are allocated in chunks in vmalloc area.  Each
11fbf59bc9STejun Heo  * chunk is consisted of num_possible_cpus() units and the first chunk
12fbf59bc9STejun Heo  * is used for static percpu variables in the kernel image (special
13fbf59bc9STejun Heo  * boot time alloc/init handling necessary as these areas need to be
14fbf59bc9STejun Heo  * brought up before allocation services are running).  Unit grows as
15fbf59bc9STejun Heo  * necessary and all units grow or shrink in unison.  When a chunk is
16fbf59bc9STejun Heo  * filled up, another chunk is allocated.  ie. in vmalloc area
17fbf59bc9STejun Heo  *
18fbf59bc9STejun Heo  *  c0                           c1                         c2
19fbf59bc9STejun Heo  *  -------------------          -------------------        ------------
20fbf59bc9STejun Heo  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
21fbf59bc9STejun Heo  *  -------------------  ......  -------------------  ....  ------------
22fbf59bc9STejun Heo  *
23fbf59bc9STejun Heo  * Allocation is done in offset-size areas of single unit space.  Ie,
24fbf59bc9STejun Heo  * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25fbf59bc9STejun Heo  * c1:u1, c1:u2 and c1:u3.  Percpu access can be done by configuring
26e1b9aa3fSChristoph Lameter  * percpu base registers pcpu_unit_size apart.
27fbf59bc9STejun Heo  *
28fbf59bc9STejun Heo  * There are usually many small percpu allocations many of them as
29fbf59bc9STejun Heo  * small as 4 bytes.  The allocator organizes chunks into lists
30fbf59bc9STejun Heo  * according to free size and tries to allocate from the fullest one.
31fbf59bc9STejun Heo  * Each chunk keeps the maximum contiguous area size hint which is
32fbf59bc9STejun Heo  * guaranteed to be eqaul to or larger than the maximum contiguous
33fbf59bc9STejun Heo  * area in the chunk.  This helps the allocator not to iterate the
34fbf59bc9STejun Heo  * chunk maps unnecessarily.
35fbf59bc9STejun Heo  *
36fbf59bc9STejun Heo  * Allocation state in each chunk is kept using an array of integers
37fbf59bc9STejun Heo  * on chunk->map.  A positive value in the map represents a free
38fbf59bc9STejun Heo  * region and negative allocated.  Allocation inside a chunk is done
39fbf59bc9STejun Heo  * by scanning this map sequentially and serving the first matching
40fbf59bc9STejun Heo  * entry.  This is mostly copied from the percpu_modalloc() allocator.
41e1b9aa3fSChristoph Lameter  * Chunks can be determined from the address using the index field
42e1b9aa3fSChristoph Lameter  * in the page struct. The index field contains a pointer to the chunk.
43fbf59bc9STejun Heo  *
44fbf59bc9STejun Heo  * To use this allocator, arch code should do the followings.
45fbf59bc9STejun Heo  *
46e74e3962STejun Heo  * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
47fbf59bc9STejun Heo  *
48fbf59bc9STejun Heo  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49e0100983STejun Heo  *   regular address to percpu pointer and back if they need to be
50e0100983STejun Heo  *   different from the default
51fbf59bc9STejun Heo  *
528d408b4bSTejun Heo  * - use pcpu_setup_first_chunk() during percpu area initialization to
538d408b4bSTejun Heo  *   setup the first chunk containing the kernel static percpu area
54fbf59bc9STejun Heo  */
55fbf59bc9STejun Heo 
56fbf59bc9STejun Heo #include <linux/bitmap.h>
57fbf59bc9STejun Heo #include <linux/bootmem.h>
58fbf59bc9STejun Heo #include <linux/list.h>
59fbf59bc9STejun Heo #include <linux/mm.h>
60fbf59bc9STejun Heo #include <linux/module.h>
61fbf59bc9STejun Heo #include <linux/mutex.h>
62fbf59bc9STejun Heo #include <linux/percpu.h>
63fbf59bc9STejun Heo #include <linux/pfn.h>
64fbf59bc9STejun Heo #include <linux/slab.h>
65ccea34b5STejun Heo #include <linux/spinlock.h>
66fbf59bc9STejun Heo #include <linux/vmalloc.h>
67a56dbddfSTejun Heo #include <linux/workqueue.h>
68fbf59bc9STejun Heo 
69fbf59bc9STejun Heo #include <asm/cacheflush.h>
70e0100983STejun Heo #include <asm/sections.h>
71fbf59bc9STejun Heo #include <asm/tlbflush.h>
72fbf59bc9STejun Heo 
73fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
74fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
75fbf59bc9STejun Heo 
76e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
77e0100983STejun Heo #ifndef __addr_to_pcpu_ptr
78e0100983STejun Heo #define __addr_to_pcpu_ptr(addr)					\
79e0100983STejun Heo 	(void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr	\
80e0100983STejun Heo 		 + (unsigned long)__per_cpu_start)
81e0100983STejun Heo #endif
82e0100983STejun Heo #ifndef __pcpu_ptr_to_addr
83e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr)						\
84e0100983STejun Heo 	(void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr	\
85e0100983STejun Heo 		 - (unsigned long)__per_cpu_start)
86e0100983STejun Heo #endif
87e0100983STejun Heo 
88fbf59bc9STejun Heo struct pcpu_chunk {
89fbf59bc9STejun Heo 	struct list_head	list;		/* linked to pcpu_slot lists */
90fbf59bc9STejun Heo 	int			free_size;	/* free bytes in the chunk */
91fbf59bc9STejun Heo 	int			contig_hint;	/* max contiguous size hint */
92fbf59bc9STejun Heo 	struct vm_struct	*vm;		/* mapped vmalloc region */
93fbf59bc9STejun Heo 	int			map_used;	/* # of map entries used */
94fbf59bc9STejun Heo 	int			map_alloc;	/* # of map entries allocated */
95fbf59bc9STejun Heo 	int			*map;		/* allocation map */
968d408b4bSTejun Heo 	bool			immutable;	/* no [de]population allowed */
973e24aa58STejun Heo 	struct page		**page;		/* points to page array */
983e24aa58STejun Heo 	struct page		*page_ar[];	/* #cpus * UNIT_PAGES */
99fbf59bc9STejun Heo };
100fbf59bc9STejun Heo 
10140150d37STejun Heo static int pcpu_unit_pages __read_mostly;
10240150d37STejun Heo static int pcpu_unit_size __read_mostly;
10340150d37STejun Heo static int pcpu_chunk_size __read_mostly;
10440150d37STejun Heo static int pcpu_nr_slots __read_mostly;
10540150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly;
106fbf59bc9STejun Heo 
107fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */
10840150d37STejun Heo void *pcpu_base_addr __read_mostly;
109fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr);
110fbf59bc9STejun Heo 
111ae9e6bc9STejun Heo /*
112ae9e6bc9STejun Heo  * The first chunk which always exists.  Note that unlike other
113ae9e6bc9STejun Heo  * chunks, this one can be allocated and mapped in several different
114ae9e6bc9STejun Heo  * ways and thus often doesn't live in the vmalloc area.
115ae9e6bc9STejun Heo  */
116ae9e6bc9STejun Heo static struct pcpu_chunk *pcpu_first_chunk;
117ae9e6bc9STejun Heo 
118ae9e6bc9STejun Heo /*
119ae9e6bc9STejun Heo  * Optional reserved chunk.  This chunk reserves part of the first
120ae9e6bc9STejun Heo  * chunk and serves it for reserved allocations.  The amount of
121ae9e6bc9STejun Heo  * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
122ae9e6bc9STejun Heo  * area doesn't exist, the following variables contain NULL and 0
123ae9e6bc9STejun Heo  * respectively.
124ae9e6bc9STejun Heo  */
125edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk;
126edcb4639STejun Heo static int pcpu_reserved_chunk_limit;
127edcb4639STejun Heo 
128fbf59bc9STejun Heo /*
129ccea34b5STejun Heo  * Synchronization rules.
130fbf59bc9STejun Heo  *
131ccea34b5STejun Heo  * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
132ccea34b5STejun Heo  * protects allocation/reclaim paths, chunks and chunk->page arrays.
133ccea34b5STejun Heo  * The latter is a spinlock and protects the index data structures -
134e1b9aa3fSChristoph Lameter  * chunk slots, chunks and area maps in chunks.
135fbf59bc9STejun Heo  *
136ccea34b5STejun Heo  * During allocation, pcpu_alloc_mutex is kept locked all the time and
137ccea34b5STejun Heo  * pcpu_lock is grabbed and released as necessary.  All actual memory
138ccea34b5STejun Heo  * allocations are done using GFP_KERNEL with pcpu_lock released.
139ccea34b5STejun Heo  *
140ccea34b5STejun Heo  * Free path accesses and alters only the index data structures, so it
141ccea34b5STejun Heo  * can be safely called from atomic context.  When memory needs to be
142ccea34b5STejun Heo  * returned to the system, free path schedules reclaim_work which
143ccea34b5STejun Heo  * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
144ccea34b5STejun Heo  * reclaimed, release both locks and frees the chunks.  Note that it's
145ccea34b5STejun Heo  * necessary to grab both locks to remove a chunk from circulation as
146ccea34b5STejun Heo  * allocation path might be referencing the chunk with only
147ccea34b5STejun Heo  * pcpu_alloc_mutex locked.
148fbf59bc9STejun Heo  */
149ccea34b5STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
150ccea34b5STejun Heo static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
151fbf59bc9STejun Heo 
15240150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
153fbf59bc9STejun Heo 
154a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */
155a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work);
156a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
157a56dbddfSTejun Heo 
158d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size)
159fbf59bc9STejun Heo {
160cae3aeb8STejun Heo 	int highbit = fls(size);	/* size is in bytes */
161fbf59bc9STejun Heo 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
162fbf59bc9STejun Heo }
163fbf59bc9STejun Heo 
164d9b55eebSTejun Heo static int pcpu_size_to_slot(int size)
165d9b55eebSTejun Heo {
166d9b55eebSTejun Heo 	if (size == pcpu_unit_size)
167d9b55eebSTejun Heo 		return pcpu_nr_slots - 1;
168d9b55eebSTejun Heo 	return __pcpu_size_to_slot(size);
169d9b55eebSTejun Heo }
170d9b55eebSTejun Heo 
171fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
172fbf59bc9STejun Heo {
173fbf59bc9STejun Heo 	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
174fbf59bc9STejun Heo 		return 0;
175fbf59bc9STejun Heo 
176fbf59bc9STejun Heo 	return pcpu_size_to_slot(chunk->free_size);
177fbf59bc9STejun Heo }
178fbf59bc9STejun Heo 
179fbf59bc9STejun Heo static int pcpu_page_idx(unsigned int cpu, int page_idx)
180fbf59bc9STejun Heo {
181d9b55eebSTejun Heo 	return cpu * pcpu_unit_pages + page_idx;
182fbf59bc9STejun Heo }
183fbf59bc9STejun Heo 
184fbf59bc9STejun Heo static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
185fbf59bc9STejun Heo 				      unsigned int cpu, int page_idx)
186fbf59bc9STejun Heo {
187fbf59bc9STejun Heo 	return &chunk->page[pcpu_page_idx(cpu, page_idx)];
188fbf59bc9STejun Heo }
189fbf59bc9STejun Heo 
190fbf59bc9STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
191fbf59bc9STejun Heo 				     unsigned int cpu, int page_idx)
192fbf59bc9STejun Heo {
193fbf59bc9STejun Heo 	return (unsigned long)chunk->vm->addr +
194fbf59bc9STejun Heo 		(pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
195fbf59bc9STejun Heo }
196fbf59bc9STejun Heo 
197fbf59bc9STejun Heo static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
198fbf59bc9STejun Heo 				     int page_idx)
199fbf59bc9STejun Heo {
200fbf59bc9STejun Heo 	return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
201fbf59bc9STejun Heo }
202fbf59bc9STejun Heo 
203e1b9aa3fSChristoph Lameter /* set the pointer to a chunk in a page struct */
204e1b9aa3fSChristoph Lameter static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
205e1b9aa3fSChristoph Lameter {
206e1b9aa3fSChristoph Lameter 	page->index = (unsigned long)pcpu;
207e1b9aa3fSChristoph Lameter }
208e1b9aa3fSChristoph Lameter 
209e1b9aa3fSChristoph Lameter /* obtain pointer to a chunk from a page struct */
210e1b9aa3fSChristoph Lameter static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
211e1b9aa3fSChristoph Lameter {
212e1b9aa3fSChristoph Lameter 	return (struct pcpu_chunk *)page->index;
213e1b9aa3fSChristoph Lameter }
214e1b9aa3fSChristoph Lameter 
215fbf59bc9STejun Heo /**
2161880d93bSTejun Heo  * pcpu_mem_alloc - allocate memory
2171880d93bSTejun Heo  * @size: bytes to allocate
218fbf59bc9STejun Heo  *
2191880d93bSTejun Heo  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
2201880d93bSTejun Heo  * kzalloc() is used; otherwise, vmalloc() is used.  The returned
2211880d93bSTejun Heo  * memory is always zeroed.
222fbf59bc9STejun Heo  *
223ccea34b5STejun Heo  * CONTEXT:
224ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
225ccea34b5STejun Heo  *
226fbf59bc9STejun Heo  * RETURNS:
2271880d93bSTejun Heo  * Pointer to the allocated area on success, NULL on failure.
228fbf59bc9STejun Heo  */
2291880d93bSTejun Heo static void *pcpu_mem_alloc(size_t size)
230fbf59bc9STejun Heo {
231fbf59bc9STejun Heo 	if (size <= PAGE_SIZE)
2321880d93bSTejun Heo 		return kzalloc(size, GFP_KERNEL);
2331880d93bSTejun Heo 	else {
2341880d93bSTejun Heo 		void *ptr = vmalloc(size);
2351880d93bSTejun Heo 		if (ptr)
2361880d93bSTejun Heo 			memset(ptr, 0, size);
2371880d93bSTejun Heo 		return ptr;
2381880d93bSTejun Heo 	}
2391880d93bSTejun Heo }
240fbf59bc9STejun Heo 
2411880d93bSTejun Heo /**
2421880d93bSTejun Heo  * pcpu_mem_free - free memory
2431880d93bSTejun Heo  * @ptr: memory to free
2441880d93bSTejun Heo  * @size: size of the area
2451880d93bSTejun Heo  *
2461880d93bSTejun Heo  * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
2471880d93bSTejun Heo  */
2481880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size)
2491880d93bSTejun Heo {
2501880d93bSTejun Heo 	if (size <= PAGE_SIZE)
2511880d93bSTejun Heo 		kfree(ptr);
2521880d93bSTejun Heo 	else
2531880d93bSTejun Heo 		vfree(ptr);
254fbf59bc9STejun Heo }
255fbf59bc9STejun Heo 
256fbf59bc9STejun Heo /**
257fbf59bc9STejun Heo  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
258fbf59bc9STejun Heo  * @chunk: chunk of interest
259fbf59bc9STejun Heo  * @oslot: the previous slot it was on
260fbf59bc9STejun Heo  *
261fbf59bc9STejun Heo  * This function is called after an allocation or free changed @chunk.
262fbf59bc9STejun Heo  * New slot according to the changed state is determined and @chunk is
263edcb4639STejun Heo  * moved to the slot.  Note that the reserved chunk is never put on
264edcb4639STejun Heo  * chunk slots.
265ccea34b5STejun Heo  *
266ccea34b5STejun Heo  * CONTEXT:
267ccea34b5STejun Heo  * pcpu_lock.
268fbf59bc9STejun Heo  */
269fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
270fbf59bc9STejun Heo {
271fbf59bc9STejun Heo 	int nslot = pcpu_chunk_slot(chunk);
272fbf59bc9STejun Heo 
273edcb4639STejun Heo 	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
274fbf59bc9STejun Heo 		if (oslot < nslot)
275fbf59bc9STejun Heo 			list_move(&chunk->list, &pcpu_slot[nslot]);
276fbf59bc9STejun Heo 		else
277fbf59bc9STejun Heo 			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
278fbf59bc9STejun Heo 	}
279fbf59bc9STejun Heo }
280fbf59bc9STejun Heo 
281fbf59bc9STejun Heo /**
282e1b9aa3fSChristoph Lameter  * pcpu_chunk_addr_search - determine chunk containing specified address
283e1b9aa3fSChristoph Lameter  * @addr: address for which the chunk needs to be determined.
284ccea34b5STejun Heo  *
285fbf59bc9STejun Heo  * RETURNS:
286fbf59bc9STejun Heo  * The address of the found chunk.
287fbf59bc9STejun Heo  */
288fbf59bc9STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
289fbf59bc9STejun Heo {
290ae9e6bc9STejun Heo 	void *first_start = pcpu_first_chunk->vm->addr;
291fbf59bc9STejun Heo 
292ae9e6bc9STejun Heo 	/* is it in the first chunk? */
29379ba6ac8STejun Heo 	if (addr >= first_start && addr < first_start + pcpu_unit_size) {
294ae9e6bc9STejun Heo 		/* is it in the reserved area? */
295ae9e6bc9STejun Heo 		if (addr < first_start + pcpu_reserved_chunk_limit)
296edcb4639STejun Heo 			return pcpu_reserved_chunk;
297ae9e6bc9STejun Heo 		return pcpu_first_chunk;
298edcb4639STejun Heo 	}
299edcb4639STejun Heo 
300e1b9aa3fSChristoph Lameter 	return pcpu_get_page_chunk(vmalloc_to_page(addr));
301fbf59bc9STejun Heo }
302fbf59bc9STejun Heo 
303fbf59bc9STejun Heo /**
3049f7dcf22STejun Heo  * pcpu_extend_area_map - extend area map for allocation
3059f7dcf22STejun Heo  * @chunk: target chunk
3069f7dcf22STejun Heo  *
3079f7dcf22STejun Heo  * Extend area map of @chunk so that it can accomodate an allocation.
3089f7dcf22STejun Heo  * A single allocation can split an area into three areas, so this
3099f7dcf22STejun Heo  * function makes sure that @chunk->map has at least two extra slots.
3109f7dcf22STejun Heo  *
311ccea34b5STejun Heo  * CONTEXT:
312ccea34b5STejun Heo  * pcpu_alloc_mutex, pcpu_lock.  pcpu_lock is released and reacquired
313ccea34b5STejun Heo  * if area map is extended.
314ccea34b5STejun Heo  *
3159f7dcf22STejun Heo  * RETURNS:
3169f7dcf22STejun Heo  * 0 if noop, 1 if successfully extended, -errno on failure.
3179f7dcf22STejun Heo  */
3189f7dcf22STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
3199f7dcf22STejun Heo {
3209f7dcf22STejun Heo 	int new_alloc;
3219f7dcf22STejun Heo 	int *new;
3229f7dcf22STejun Heo 	size_t size;
3239f7dcf22STejun Heo 
3249f7dcf22STejun Heo 	/* has enough? */
3259f7dcf22STejun Heo 	if (chunk->map_alloc >= chunk->map_used + 2)
3269f7dcf22STejun Heo 		return 0;
3279f7dcf22STejun Heo 
328ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
329ccea34b5STejun Heo 
3309f7dcf22STejun Heo 	new_alloc = PCPU_DFL_MAP_ALLOC;
3319f7dcf22STejun Heo 	while (new_alloc < chunk->map_used + 2)
3329f7dcf22STejun Heo 		new_alloc *= 2;
3339f7dcf22STejun Heo 
3349f7dcf22STejun Heo 	new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
335ccea34b5STejun Heo 	if (!new) {
336ccea34b5STejun Heo 		spin_lock_irq(&pcpu_lock);
3379f7dcf22STejun Heo 		return -ENOMEM;
338ccea34b5STejun Heo 	}
339ccea34b5STejun Heo 
340ccea34b5STejun Heo 	/*
341ccea34b5STejun Heo 	 * Acquire pcpu_lock and switch to new area map.  Only free
342ccea34b5STejun Heo 	 * could have happened inbetween, so map_used couldn't have
343ccea34b5STejun Heo 	 * grown.
344ccea34b5STejun Heo 	 */
345ccea34b5STejun Heo 	spin_lock_irq(&pcpu_lock);
346ccea34b5STejun Heo 	BUG_ON(new_alloc < chunk->map_used + 2);
3479f7dcf22STejun Heo 
3489f7dcf22STejun Heo 	size = chunk->map_alloc * sizeof(chunk->map[0]);
3499f7dcf22STejun Heo 	memcpy(new, chunk->map, size);
3509f7dcf22STejun Heo 
3519f7dcf22STejun Heo 	/*
3529f7dcf22STejun Heo 	 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
3539f7dcf22STejun Heo 	 * one of the first chunks and still using static map.
3549f7dcf22STejun Heo 	 */
3559f7dcf22STejun Heo 	if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
3569f7dcf22STejun Heo 		pcpu_mem_free(chunk->map, size);
3579f7dcf22STejun Heo 
3589f7dcf22STejun Heo 	chunk->map_alloc = new_alloc;
3599f7dcf22STejun Heo 	chunk->map = new;
3609f7dcf22STejun Heo 	return 0;
3619f7dcf22STejun Heo }
3629f7dcf22STejun Heo 
3639f7dcf22STejun Heo /**
364fbf59bc9STejun Heo  * pcpu_split_block - split a map block
365fbf59bc9STejun Heo  * @chunk: chunk of interest
366fbf59bc9STejun Heo  * @i: index of map block to split
367cae3aeb8STejun Heo  * @head: head size in bytes (can be 0)
368cae3aeb8STejun Heo  * @tail: tail size in bytes (can be 0)
369fbf59bc9STejun Heo  *
370fbf59bc9STejun Heo  * Split the @i'th map block into two or three blocks.  If @head is
371fbf59bc9STejun Heo  * non-zero, @head bytes block is inserted before block @i moving it
372fbf59bc9STejun Heo  * to @i+1 and reducing its size by @head bytes.
373fbf59bc9STejun Heo  *
374fbf59bc9STejun Heo  * If @tail is non-zero, the target block, which can be @i or @i+1
375fbf59bc9STejun Heo  * depending on @head, is reduced by @tail bytes and @tail byte block
376fbf59bc9STejun Heo  * is inserted after the target block.
377fbf59bc9STejun Heo  *
3789f7dcf22STejun Heo  * @chunk->map must have enough free slots to accomodate the split.
379ccea34b5STejun Heo  *
380ccea34b5STejun Heo  * CONTEXT:
381ccea34b5STejun Heo  * pcpu_lock.
382fbf59bc9STejun Heo  */
3839f7dcf22STejun Heo static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
3849f7dcf22STejun Heo 			     int head, int tail)
385fbf59bc9STejun Heo {
386fbf59bc9STejun Heo 	int nr_extra = !!head + !!tail;
387fbf59bc9STejun Heo 
3889f7dcf22STejun Heo 	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
389fbf59bc9STejun Heo 
3909f7dcf22STejun Heo 	/* insert new subblocks */
391fbf59bc9STejun Heo 	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
392fbf59bc9STejun Heo 		sizeof(chunk->map[0]) * (chunk->map_used - i));
393fbf59bc9STejun Heo 	chunk->map_used += nr_extra;
394fbf59bc9STejun Heo 
395fbf59bc9STejun Heo 	if (head) {
396fbf59bc9STejun Heo 		chunk->map[i + 1] = chunk->map[i] - head;
397fbf59bc9STejun Heo 		chunk->map[i++] = head;
398fbf59bc9STejun Heo 	}
399fbf59bc9STejun Heo 	if (tail) {
400fbf59bc9STejun Heo 		chunk->map[i++] -= tail;
401fbf59bc9STejun Heo 		chunk->map[i] = tail;
402fbf59bc9STejun Heo 	}
403fbf59bc9STejun Heo }
404fbf59bc9STejun Heo 
405fbf59bc9STejun Heo /**
406fbf59bc9STejun Heo  * pcpu_alloc_area - allocate area from a pcpu_chunk
407fbf59bc9STejun Heo  * @chunk: chunk of interest
408cae3aeb8STejun Heo  * @size: wanted size in bytes
409fbf59bc9STejun Heo  * @align: wanted align
410fbf59bc9STejun Heo  *
411fbf59bc9STejun Heo  * Try to allocate @size bytes area aligned at @align from @chunk.
412fbf59bc9STejun Heo  * Note that this function only allocates the offset.  It doesn't
413fbf59bc9STejun Heo  * populate or map the area.
414fbf59bc9STejun Heo  *
4159f7dcf22STejun Heo  * @chunk->map must have at least two free slots.
4169f7dcf22STejun Heo  *
417ccea34b5STejun Heo  * CONTEXT:
418ccea34b5STejun Heo  * pcpu_lock.
419ccea34b5STejun Heo  *
420fbf59bc9STejun Heo  * RETURNS:
4219f7dcf22STejun Heo  * Allocated offset in @chunk on success, -1 if no matching area is
4229f7dcf22STejun Heo  * found.
423fbf59bc9STejun Heo  */
424fbf59bc9STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
425fbf59bc9STejun Heo {
426fbf59bc9STejun Heo 	int oslot = pcpu_chunk_slot(chunk);
427fbf59bc9STejun Heo 	int max_contig = 0;
428fbf59bc9STejun Heo 	int i, off;
429fbf59bc9STejun Heo 
430fbf59bc9STejun Heo 	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
431fbf59bc9STejun Heo 		bool is_last = i + 1 == chunk->map_used;
432fbf59bc9STejun Heo 		int head, tail;
433fbf59bc9STejun Heo 
434fbf59bc9STejun Heo 		/* extra for alignment requirement */
435fbf59bc9STejun Heo 		head = ALIGN(off, align) - off;
436fbf59bc9STejun Heo 		BUG_ON(i == 0 && head != 0);
437fbf59bc9STejun Heo 
438fbf59bc9STejun Heo 		if (chunk->map[i] < 0)
439fbf59bc9STejun Heo 			continue;
440fbf59bc9STejun Heo 		if (chunk->map[i] < head + size) {
441fbf59bc9STejun Heo 			max_contig = max(chunk->map[i], max_contig);
442fbf59bc9STejun Heo 			continue;
443fbf59bc9STejun Heo 		}
444fbf59bc9STejun Heo 
445fbf59bc9STejun Heo 		/*
446fbf59bc9STejun Heo 		 * If head is small or the previous block is free,
447fbf59bc9STejun Heo 		 * merge'em.  Note that 'small' is defined as smaller
448fbf59bc9STejun Heo 		 * than sizeof(int), which is very small but isn't too
449fbf59bc9STejun Heo 		 * uncommon for percpu allocations.
450fbf59bc9STejun Heo 		 */
451fbf59bc9STejun Heo 		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
452fbf59bc9STejun Heo 			if (chunk->map[i - 1] > 0)
453fbf59bc9STejun Heo 				chunk->map[i - 1] += head;
454fbf59bc9STejun Heo 			else {
455fbf59bc9STejun Heo 				chunk->map[i - 1] -= head;
456fbf59bc9STejun Heo 				chunk->free_size -= head;
457fbf59bc9STejun Heo 			}
458fbf59bc9STejun Heo 			chunk->map[i] -= head;
459fbf59bc9STejun Heo 			off += head;
460fbf59bc9STejun Heo 			head = 0;
461fbf59bc9STejun Heo 		}
462fbf59bc9STejun Heo 
463fbf59bc9STejun Heo 		/* if tail is small, just keep it around */
464fbf59bc9STejun Heo 		tail = chunk->map[i] - head - size;
465fbf59bc9STejun Heo 		if (tail < sizeof(int))
466fbf59bc9STejun Heo 			tail = 0;
467fbf59bc9STejun Heo 
468fbf59bc9STejun Heo 		/* split if warranted */
469fbf59bc9STejun Heo 		if (head || tail) {
4709f7dcf22STejun Heo 			pcpu_split_block(chunk, i, head, tail);
471fbf59bc9STejun Heo 			if (head) {
472fbf59bc9STejun Heo 				i++;
473fbf59bc9STejun Heo 				off += head;
474fbf59bc9STejun Heo 				max_contig = max(chunk->map[i - 1], max_contig);
475fbf59bc9STejun Heo 			}
476fbf59bc9STejun Heo 			if (tail)
477fbf59bc9STejun Heo 				max_contig = max(chunk->map[i + 1], max_contig);
478fbf59bc9STejun Heo 		}
479fbf59bc9STejun Heo 
480fbf59bc9STejun Heo 		/* update hint and mark allocated */
481fbf59bc9STejun Heo 		if (is_last)
482fbf59bc9STejun Heo 			chunk->contig_hint = max_contig; /* fully scanned */
483fbf59bc9STejun Heo 		else
484fbf59bc9STejun Heo 			chunk->contig_hint = max(chunk->contig_hint,
485fbf59bc9STejun Heo 						 max_contig);
486fbf59bc9STejun Heo 
487fbf59bc9STejun Heo 		chunk->free_size -= chunk->map[i];
488fbf59bc9STejun Heo 		chunk->map[i] = -chunk->map[i];
489fbf59bc9STejun Heo 
490fbf59bc9STejun Heo 		pcpu_chunk_relocate(chunk, oslot);
491fbf59bc9STejun Heo 		return off;
492fbf59bc9STejun Heo 	}
493fbf59bc9STejun Heo 
494fbf59bc9STejun Heo 	chunk->contig_hint = max_contig;	/* fully scanned */
495fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, oslot);
496fbf59bc9STejun Heo 
4979f7dcf22STejun Heo 	/* tell the upper layer that this chunk has no matching area */
4989f7dcf22STejun Heo 	return -1;
499fbf59bc9STejun Heo }
500fbf59bc9STejun Heo 
501fbf59bc9STejun Heo /**
502fbf59bc9STejun Heo  * pcpu_free_area - free area to a pcpu_chunk
503fbf59bc9STejun Heo  * @chunk: chunk of interest
504fbf59bc9STejun Heo  * @freeme: offset of area to free
505fbf59bc9STejun Heo  *
506fbf59bc9STejun Heo  * Free area starting from @freeme to @chunk.  Note that this function
507fbf59bc9STejun Heo  * only modifies the allocation map.  It doesn't depopulate or unmap
508fbf59bc9STejun Heo  * the area.
509ccea34b5STejun Heo  *
510ccea34b5STejun Heo  * CONTEXT:
511ccea34b5STejun Heo  * pcpu_lock.
512fbf59bc9STejun Heo  */
513fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
514fbf59bc9STejun Heo {
515fbf59bc9STejun Heo 	int oslot = pcpu_chunk_slot(chunk);
516fbf59bc9STejun Heo 	int i, off;
517fbf59bc9STejun Heo 
518fbf59bc9STejun Heo 	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
519fbf59bc9STejun Heo 		if (off == freeme)
520fbf59bc9STejun Heo 			break;
521fbf59bc9STejun Heo 	BUG_ON(off != freeme);
522fbf59bc9STejun Heo 	BUG_ON(chunk->map[i] > 0);
523fbf59bc9STejun Heo 
524fbf59bc9STejun Heo 	chunk->map[i] = -chunk->map[i];
525fbf59bc9STejun Heo 	chunk->free_size += chunk->map[i];
526fbf59bc9STejun Heo 
527fbf59bc9STejun Heo 	/* merge with previous? */
528fbf59bc9STejun Heo 	if (i > 0 && chunk->map[i - 1] >= 0) {
529fbf59bc9STejun Heo 		chunk->map[i - 1] += chunk->map[i];
530fbf59bc9STejun Heo 		chunk->map_used--;
531fbf59bc9STejun Heo 		memmove(&chunk->map[i], &chunk->map[i + 1],
532fbf59bc9STejun Heo 			(chunk->map_used - i) * sizeof(chunk->map[0]));
533fbf59bc9STejun Heo 		i--;
534fbf59bc9STejun Heo 	}
535fbf59bc9STejun Heo 	/* merge with next? */
536fbf59bc9STejun Heo 	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
537fbf59bc9STejun Heo 		chunk->map[i] += chunk->map[i + 1];
538fbf59bc9STejun Heo 		chunk->map_used--;
539fbf59bc9STejun Heo 		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
540fbf59bc9STejun Heo 			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
541fbf59bc9STejun Heo 	}
542fbf59bc9STejun Heo 
543fbf59bc9STejun Heo 	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
544fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, oslot);
545fbf59bc9STejun Heo }
546fbf59bc9STejun Heo 
547fbf59bc9STejun Heo /**
548fbf59bc9STejun Heo  * pcpu_unmap - unmap pages out of a pcpu_chunk
549fbf59bc9STejun Heo  * @chunk: chunk of interest
550fbf59bc9STejun Heo  * @page_start: page index of the first page to unmap
551fbf59bc9STejun Heo  * @page_end: page index of the last page to unmap + 1
55285ae87c1STejun Heo  * @flush_tlb: whether to flush tlb or not
553fbf59bc9STejun Heo  *
554fbf59bc9STejun Heo  * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
555fbf59bc9STejun Heo  * If @flush is true, vcache is flushed before unmapping and tlb
556fbf59bc9STejun Heo  * after.
557fbf59bc9STejun Heo  */
558fbf59bc9STejun Heo static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
55985ae87c1STejun Heo 		       bool flush_tlb)
560fbf59bc9STejun Heo {
561fbf59bc9STejun Heo 	unsigned int last = num_possible_cpus() - 1;
562fbf59bc9STejun Heo 	unsigned int cpu;
563fbf59bc9STejun Heo 
5648d408b4bSTejun Heo 	/* unmap must not be done on immutable chunk */
5658d408b4bSTejun Heo 	WARN_ON(chunk->immutable);
5668d408b4bSTejun Heo 
567fbf59bc9STejun Heo 	/*
568fbf59bc9STejun Heo 	 * Each flushing trial can be very expensive, issue flush on
569fbf59bc9STejun Heo 	 * the whole region at once rather than doing it for each cpu.
570fbf59bc9STejun Heo 	 * This could be an overkill but is more scalable.
571fbf59bc9STejun Heo 	 */
572fbf59bc9STejun Heo 	flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
573fbf59bc9STejun Heo 			   pcpu_chunk_addr(chunk, last, page_end));
574fbf59bc9STejun Heo 
575fbf59bc9STejun Heo 	for_each_possible_cpu(cpu)
576fbf59bc9STejun Heo 		unmap_kernel_range_noflush(
577fbf59bc9STejun Heo 				pcpu_chunk_addr(chunk, cpu, page_start),
578fbf59bc9STejun Heo 				(page_end - page_start) << PAGE_SHIFT);
579fbf59bc9STejun Heo 
580fbf59bc9STejun Heo 	/* ditto as flush_cache_vunmap() */
58185ae87c1STejun Heo 	if (flush_tlb)
582fbf59bc9STejun Heo 		flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
583fbf59bc9STejun Heo 				       pcpu_chunk_addr(chunk, last, page_end));
584fbf59bc9STejun Heo }
585fbf59bc9STejun Heo 
586fbf59bc9STejun Heo /**
587fbf59bc9STejun Heo  * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
588fbf59bc9STejun Heo  * @chunk: chunk to depopulate
589fbf59bc9STejun Heo  * @off: offset to the area to depopulate
590cae3aeb8STejun Heo  * @size: size of the area to depopulate in bytes
591fbf59bc9STejun Heo  * @flush: whether to flush cache and tlb or not
592fbf59bc9STejun Heo  *
593fbf59bc9STejun Heo  * For each cpu, depopulate and unmap pages [@page_start,@page_end)
594fbf59bc9STejun Heo  * from @chunk.  If @flush is true, vcache is flushed before unmapping
595fbf59bc9STejun Heo  * and tlb after.
596ccea34b5STejun Heo  *
597ccea34b5STejun Heo  * CONTEXT:
598ccea34b5STejun Heo  * pcpu_alloc_mutex.
599fbf59bc9STejun Heo  */
600cae3aeb8STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
601cae3aeb8STejun Heo 				  bool flush)
602fbf59bc9STejun Heo {
603fbf59bc9STejun Heo 	int page_start = PFN_DOWN(off);
604fbf59bc9STejun Heo 	int page_end = PFN_UP(off + size);
605fbf59bc9STejun Heo 	int unmap_start = -1;
606fbf59bc9STejun Heo 	int uninitialized_var(unmap_end);
607fbf59bc9STejun Heo 	unsigned int cpu;
608fbf59bc9STejun Heo 	int i;
609fbf59bc9STejun Heo 
610fbf59bc9STejun Heo 	for (i = page_start; i < page_end; i++) {
611fbf59bc9STejun Heo 		for_each_possible_cpu(cpu) {
612fbf59bc9STejun Heo 			struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
613fbf59bc9STejun Heo 
614fbf59bc9STejun Heo 			if (!*pagep)
615fbf59bc9STejun Heo 				continue;
616fbf59bc9STejun Heo 
617fbf59bc9STejun Heo 			__free_page(*pagep);
618fbf59bc9STejun Heo 
619fbf59bc9STejun Heo 			/*
620fbf59bc9STejun Heo 			 * If it's partial depopulation, it might get
621fbf59bc9STejun Heo 			 * populated or depopulated again.  Mark the
622fbf59bc9STejun Heo 			 * page gone.
623fbf59bc9STejun Heo 			 */
624fbf59bc9STejun Heo 			*pagep = NULL;
625fbf59bc9STejun Heo 
626fbf59bc9STejun Heo 			unmap_start = unmap_start < 0 ? i : unmap_start;
627fbf59bc9STejun Heo 			unmap_end = i + 1;
628fbf59bc9STejun Heo 		}
629fbf59bc9STejun Heo 	}
630fbf59bc9STejun Heo 
631fbf59bc9STejun Heo 	if (unmap_start >= 0)
632fbf59bc9STejun Heo 		pcpu_unmap(chunk, unmap_start, unmap_end, flush);
633fbf59bc9STejun Heo }
634fbf59bc9STejun Heo 
6358f05a6a6STejun Heo static int __pcpu_map_pages(unsigned long addr, struct page **pages,
6368f05a6a6STejun Heo 			    int nr_pages)
6378f05a6a6STejun Heo {
6388f05a6a6STejun Heo 	return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
6398f05a6a6STejun Heo 					PAGE_KERNEL, pages);
6408f05a6a6STejun Heo }
6418f05a6a6STejun Heo 
642fbf59bc9STejun Heo /**
643fbf59bc9STejun Heo  * pcpu_map - map pages into a pcpu_chunk
644fbf59bc9STejun Heo  * @chunk: chunk of interest
645fbf59bc9STejun Heo  * @page_start: page index of the first page to map
646fbf59bc9STejun Heo  * @page_end: page index of the last page to map + 1
647fbf59bc9STejun Heo  *
648fbf59bc9STejun Heo  * For each cpu, map pages [@page_start,@page_end) into @chunk.
649fbf59bc9STejun Heo  * vcache is flushed afterwards.
650fbf59bc9STejun Heo  */
651fbf59bc9STejun Heo static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
652fbf59bc9STejun Heo {
653fbf59bc9STejun Heo 	unsigned int last = num_possible_cpus() - 1;
654fbf59bc9STejun Heo 	unsigned int cpu;
655fbf59bc9STejun Heo 	int err;
656fbf59bc9STejun Heo 
6578d408b4bSTejun Heo 	/* map must not be done on immutable chunk */
6588d408b4bSTejun Heo 	WARN_ON(chunk->immutable);
6598d408b4bSTejun Heo 
660fbf59bc9STejun Heo 	for_each_possible_cpu(cpu) {
6618f05a6a6STejun Heo 		err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
6628f05a6a6STejun Heo 				       pcpu_chunk_pagep(chunk, cpu, page_start),
6638f05a6a6STejun Heo 				       page_end - page_start);
664fbf59bc9STejun Heo 		if (err < 0)
665fbf59bc9STejun Heo 			return err;
666fbf59bc9STejun Heo 	}
667fbf59bc9STejun Heo 
668fbf59bc9STejun Heo 	/* flush at once, please read comments in pcpu_unmap() */
669fbf59bc9STejun Heo 	flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
670fbf59bc9STejun Heo 			 pcpu_chunk_addr(chunk, last, page_end));
671fbf59bc9STejun Heo 	return 0;
672fbf59bc9STejun Heo }
673fbf59bc9STejun Heo 
674fbf59bc9STejun Heo /**
675fbf59bc9STejun Heo  * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
676fbf59bc9STejun Heo  * @chunk: chunk of interest
677fbf59bc9STejun Heo  * @off: offset to the area to populate
678cae3aeb8STejun Heo  * @size: size of the area to populate in bytes
679fbf59bc9STejun Heo  *
680fbf59bc9STejun Heo  * For each cpu, populate and map pages [@page_start,@page_end) into
681fbf59bc9STejun Heo  * @chunk.  The area is cleared on return.
682ccea34b5STejun Heo  *
683ccea34b5STejun Heo  * CONTEXT:
684ccea34b5STejun Heo  * pcpu_alloc_mutex, does GFP_KERNEL allocation.
685fbf59bc9STejun Heo  */
686fbf59bc9STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
687fbf59bc9STejun Heo {
688fbf59bc9STejun Heo 	const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
689fbf59bc9STejun Heo 	int page_start = PFN_DOWN(off);
690fbf59bc9STejun Heo 	int page_end = PFN_UP(off + size);
691fbf59bc9STejun Heo 	int map_start = -1;
69202d51fdfSTejun Heo 	int uninitialized_var(map_end);
693fbf59bc9STejun Heo 	unsigned int cpu;
694fbf59bc9STejun Heo 	int i;
695fbf59bc9STejun Heo 
696fbf59bc9STejun Heo 	for (i = page_start; i < page_end; i++) {
697fbf59bc9STejun Heo 		if (pcpu_chunk_page_occupied(chunk, i)) {
698fbf59bc9STejun Heo 			if (map_start >= 0) {
699fbf59bc9STejun Heo 				if (pcpu_map(chunk, map_start, map_end))
700fbf59bc9STejun Heo 					goto err;
701fbf59bc9STejun Heo 				map_start = -1;
702fbf59bc9STejun Heo 			}
703fbf59bc9STejun Heo 			continue;
704fbf59bc9STejun Heo 		}
705fbf59bc9STejun Heo 
706fbf59bc9STejun Heo 		map_start = map_start < 0 ? i : map_start;
707fbf59bc9STejun Heo 		map_end = i + 1;
708fbf59bc9STejun Heo 
709fbf59bc9STejun Heo 		for_each_possible_cpu(cpu) {
710fbf59bc9STejun Heo 			struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
711fbf59bc9STejun Heo 
712fbf59bc9STejun Heo 			*pagep = alloc_pages_node(cpu_to_node(cpu),
713fbf59bc9STejun Heo 						  alloc_mask, 0);
714fbf59bc9STejun Heo 			if (!*pagep)
715fbf59bc9STejun Heo 				goto err;
716e1b9aa3fSChristoph Lameter 			pcpu_set_page_chunk(*pagep, chunk);
717fbf59bc9STejun Heo 		}
718fbf59bc9STejun Heo 	}
719fbf59bc9STejun Heo 
720fbf59bc9STejun Heo 	if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
721fbf59bc9STejun Heo 		goto err;
722fbf59bc9STejun Heo 
723fbf59bc9STejun Heo 	for_each_possible_cpu(cpu)
724d9b55eebSTejun Heo 		memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
725fbf59bc9STejun Heo 		       size);
726fbf59bc9STejun Heo 
727fbf59bc9STejun Heo 	return 0;
728fbf59bc9STejun Heo err:
729fbf59bc9STejun Heo 	/* likely under heavy memory pressure, give memory back */
730fbf59bc9STejun Heo 	pcpu_depopulate_chunk(chunk, off, size, true);
731fbf59bc9STejun Heo 	return -ENOMEM;
732fbf59bc9STejun Heo }
733fbf59bc9STejun Heo 
734fbf59bc9STejun Heo static void free_pcpu_chunk(struct pcpu_chunk *chunk)
735fbf59bc9STejun Heo {
736fbf59bc9STejun Heo 	if (!chunk)
737fbf59bc9STejun Heo 		return;
738fbf59bc9STejun Heo 	if (chunk->vm)
739fbf59bc9STejun Heo 		free_vm_area(chunk->vm);
7401880d93bSTejun Heo 	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
741fbf59bc9STejun Heo 	kfree(chunk);
742fbf59bc9STejun Heo }
743fbf59bc9STejun Heo 
744fbf59bc9STejun Heo static struct pcpu_chunk *alloc_pcpu_chunk(void)
745fbf59bc9STejun Heo {
746fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
747fbf59bc9STejun Heo 
748fbf59bc9STejun Heo 	chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
749fbf59bc9STejun Heo 	if (!chunk)
750fbf59bc9STejun Heo 		return NULL;
751fbf59bc9STejun Heo 
7521880d93bSTejun Heo 	chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
753fbf59bc9STejun Heo 	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
754fbf59bc9STejun Heo 	chunk->map[chunk->map_used++] = pcpu_unit_size;
7553e24aa58STejun Heo 	chunk->page = chunk->page_ar;
756fbf59bc9STejun Heo 
757fbf59bc9STejun Heo 	chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
758fbf59bc9STejun Heo 	if (!chunk->vm) {
759fbf59bc9STejun Heo 		free_pcpu_chunk(chunk);
760fbf59bc9STejun Heo 		return NULL;
761fbf59bc9STejun Heo 	}
762fbf59bc9STejun Heo 
763fbf59bc9STejun Heo 	INIT_LIST_HEAD(&chunk->list);
764fbf59bc9STejun Heo 	chunk->free_size = pcpu_unit_size;
765fbf59bc9STejun Heo 	chunk->contig_hint = pcpu_unit_size;
766fbf59bc9STejun Heo 
767fbf59bc9STejun Heo 	return chunk;
768fbf59bc9STejun Heo }
769fbf59bc9STejun Heo 
770fbf59bc9STejun Heo /**
771edcb4639STejun Heo  * pcpu_alloc - the percpu allocator
772cae3aeb8STejun Heo  * @size: size of area to allocate in bytes
773fbf59bc9STejun Heo  * @align: alignment of area (max PAGE_SIZE)
774edcb4639STejun Heo  * @reserved: allocate from the reserved chunk if available
775fbf59bc9STejun Heo  *
776ccea34b5STejun Heo  * Allocate percpu area of @size bytes aligned at @align.
777ccea34b5STejun Heo  *
778ccea34b5STejun Heo  * CONTEXT:
779ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
780fbf59bc9STejun Heo  *
781fbf59bc9STejun Heo  * RETURNS:
782fbf59bc9STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
783fbf59bc9STejun Heo  */
784edcb4639STejun Heo static void *pcpu_alloc(size_t size, size_t align, bool reserved)
785fbf59bc9STejun Heo {
786fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
787fbf59bc9STejun Heo 	int slot, off;
788fbf59bc9STejun Heo 
7898d408b4bSTejun Heo 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
790fbf59bc9STejun Heo 		WARN(true, "illegal size (%zu) or align (%zu) for "
791fbf59bc9STejun Heo 		     "percpu allocation\n", size, align);
792fbf59bc9STejun Heo 		return NULL;
793fbf59bc9STejun Heo 	}
794fbf59bc9STejun Heo 
795ccea34b5STejun Heo 	mutex_lock(&pcpu_alloc_mutex);
796ccea34b5STejun Heo 	spin_lock_irq(&pcpu_lock);
797fbf59bc9STejun Heo 
798edcb4639STejun Heo 	/* serve reserved allocations from the reserved chunk if available */
799edcb4639STejun Heo 	if (reserved && pcpu_reserved_chunk) {
800edcb4639STejun Heo 		chunk = pcpu_reserved_chunk;
8019f7dcf22STejun Heo 		if (size > chunk->contig_hint ||
8029f7dcf22STejun Heo 		    pcpu_extend_area_map(chunk) < 0)
803ccea34b5STejun Heo 			goto fail_unlock;
804edcb4639STejun Heo 		off = pcpu_alloc_area(chunk, size, align);
805edcb4639STejun Heo 		if (off >= 0)
806edcb4639STejun Heo 			goto area_found;
807ccea34b5STejun Heo 		goto fail_unlock;
808edcb4639STejun Heo 	}
809edcb4639STejun Heo 
810ccea34b5STejun Heo restart:
811edcb4639STejun Heo 	/* search through normal chunks */
812fbf59bc9STejun Heo 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
813fbf59bc9STejun Heo 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
814fbf59bc9STejun Heo 			if (size > chunk->contig_hint)
815fbf59bc9STejun Heo 				continue;
816ccea34b5STejun Heo 
817ccea34b5STejun Heo 			switch (pcpu_extend_area_map(chunk)) {
818ccea34b5STejun Heo 			case 0:
819ccea34b5STejun Heo 				break;
820ccea34b5STejun Heo 			case 1:
821ccea34b5STejun Heo 				goto restart;	/* pcpu_lock dropped, restart */
822ccea34b5STejun Heo 			default:
823ccea34b5STejun Heo 				goto fail_unlock;
824ccea34b5STejun Heo 			}
825ccea34b5STejun Heo 
826fbf59bc9STejun Heo 			off = pcpu_alloc_area(chunk, size, align);
827fbf59bc9STejun Heo 			if (off >= 0)
828fbf59bc9STejun Heo 				goto area_found;
829fbf59bc9STejun Heo 		}
830fbf59bc9STejun Heo 	}
831fbf59bc9STejun Heo 
832fbf59bc9STejun Heo 	/* hmmm... no space left, create a new chunk */
833ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
834ccea34b5STejun Heo 
835fbf59bc9STejun Heo 	chunk = alloc_pcpu_chunk();
836fbf59bc9STejun Heo 	if (!chunk)
837ccea34b5STejun Heo 		goto fail_unlock_mutex;
838ccea34b5STejun Heo 
839ccea34b5STejun Heo 	spin_lock_irq(&pcpu_lock);
840fbf59bc9STejun Heo 	pcpu_chunk_relocate(chunk, -1);
841ccea34b5STejun Heo 	goto restart;
842fbf59bc9STejun Heo 
843fbf59bc9STejun Heo area_found:
844ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
845ccea34b5STejun Heo 
846fbf59bc9STejun Heo 	/* populate, map and clear the area */
847fbf59bc9STejun Heo 	if (pcpu_populate_chunk(chunk, off, size)) {
848ccea34b5STejun Heo 		spin_lock_irq(&pcpu_lock);
849fbf59bc9STejun Heo 		pcpu_free_area(chunk, off);
850ccea34b5STejun Heo 		goto fail_unlock;
851fbf59bc9STejun Heo 	}
852fbf59bc9STejun Heo 
853ccea34b5STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
854ccea34b5STejun Heo 
855ccea34b5STejun Heo 	return __addr_to_pcpu_ptr(chunk->vm->addr + off);
856ccea34b5STejun Heo 
857ccea34b5STejun Heo fail_unlock:
858ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
859ccea34b5STejun Heo fail_unlock_mutex:
860ccea34b5STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
861ccea34b5STejun Heo 	return NULL;
862fbf59bc9STejun Heo }
863edcb4639STejun Heo 
864edcb4639STejun Heo /**
865edcb4639STejun Heo  * __alloc_percpu - allocate dynamic percpu area
866edcb4639STejun Heo  * @size: size of area to allocate in bytes
867edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
868edcb4639STejun Heo  *
869edcb4639STejun Heo  * Allocate percpu area of @size bytes aligned at @align.  Might
870edcb4639STejun Heo  * sleep.  Might trigger writeouts.
871edcb4639STejun Heo  *
872ccea34b5STejun Heo  * CONTEXT:
873ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
874ccea34b5STejun Heo  *
875edcb4639STejun Heo  * RETURNS:
876edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
877edcb4639STejun Heo  */
878edcb4639STejun Heo void *__alloc_percpu(size_t size, size_t align)
879edcb4639STejun Heo {
880edcb4639STejun Heo 	return pcpu_alloc(size, align, false);
881edcb4639STejun Heo }
882fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu);
883fbf59bc9STejun Heo 
884edcb4639STejun Heo /**
885edcb4639STejun Heo  * __alloc_reserved_percpu - allocate reserved percpu area
886edcb4639STejun Heo  * @size: size of area to allocate in bytes
887edcb4639STejun Heo  * @align: alignment of area (max PAGE_SIZE)
888edcb4639STejun Heo  *
889edcb4639STejun Heo  * Allocate percpu area of @size bytes aligned at @align from reserved
890edcb4639STejun Heo  * percpu area if arch has set it up; otherwise, allocation is served
891edcb4639STejun Heo  * from the same dynamic area.  Might sleep.  Might trigger writeouts.
892edcb4639STejun Heo  *
893ccea34b5STejun Heo  * CONTEXT:
894ccea34b5STejun Heo  * Does GFP_KERNEL allocation.
895ccea34b5STejun Heo  *
896edcb4639STejun Heo  * RETURNS:
897edcb4639STejun Heo  * Percpu pointer to the allocated area on success, NULL on failure.
898edcb4639STejun Heo  */
899edcb4639STejun Heo void *__alloc_reserved_percpu(size_t size, size_t align)
900edcb4639STejun Heo {
901edcb4639STejun Heo 	return pcpu_alloc(size, align, true);
902edcb4639STejun Heo }
903edcb4639STejun Heo 
904a56dbddfSTejun Heo /**
905a56dbddfSTejun Heo  * pcpu_reclaim - reclaim fully free chunks, workqueue function
906a56dbddfSTejun Heo  * @work: unused
907a56dbddfSTejun Heo  *
908a56dbddfSTejun Heo  * Reclaim all fully free chunks except for the first one.
909ccea34b5STejun Heo  *
910ccea34b5STejun Heo  * CONTEXT:
911ccea34b5STejun Heo  * workqueue context.
912a56dbddfSTejun Heo  */
913a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work)
914fbf59bc9STejun Heo {
915a56dbddfSTejun Heo 	LIST_HEAD(todo);
916a56dbddfSTejun Heo 	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
917a56dbddfSTejun Heo 	struct pcpu_chunk *chunk, *next;
918a56dbddfSTejun Heo 
919ccea34b5STejun Heo 	mutex_lock(&pcpu_alloc_mutex);
920ccea34b5STejun Heo 	spin_lock_irq(&pcpu_lock);
921a56dbddfSTejun Heo 
922a56dbddfSTejun Heo 	list_for_each_entry_safe(chunk, next, head, list) {
9238d408b4bSTejun Heo 		WARN_ON(chunk->immutable);
924a56dbddfSTejun Heo 
925a56dbddfSTejun Heo 		/* spare the first one */
926a56dbddfSTejun Heo 		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
927a56dbddfSTejun Heo 			continue;
928a56dbddfSTejun Heo 
929a56dbddfSTejun Heo 		list_move(&chunk->list, &todo);
930a56dbddfSTejun Heo 	}
931a56dbddfSTejun Heo 
932ccea34b5STejun Heo 	spin_unlock_irq(&pcpu_lock);
933ccea34b5STejun Heo 	mutex_unlock(&pcpu_alloc_mutex);
934a56dbddfSTejun Heo 
935a56dbddfSTejun Heo 	list_for_each_entry_safe(chunk, next, &todo, list) {
936a56dbddfSTejun Heo 		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
937fbf59bc9STejun Heo 		free_pcpu_chunk(chunk);
938fbf59bc9STejun Heo 	}
939a56dbddfSTejun Heo }
940fbf59bc9STejun Heo 
941fbf59bc9STejun Heo /**
942fbf59bc9STejun Heo  * free_percpu - free percpu area
943fbf59bc9STejun Heo  * @ptr: pointer to area to free
944fbf59bc9STejun Heo  *
945ccea34b5STejun Heo  * Free percpu area @ptr.
946ccea34b5STejun Heo  *
947ccea34b5STejun Heo  * CONTEXT:
948ccea34b5STejun Heo  * Can be called from atomic context.
949fbf59bc9STejun Heo  */
950fbf59bc9STejun Heo void free_percpu(void *ptr)
951fbf59bc9STejun Heo {
952fbf59bc9STejun Heo 	void *addr = __pcpu_ptr_to_addr(ptr);
953fbf59bc9STejun Heo 	struct pcpu_chunk *chunk;
954ccea34b5STejun Heo 	unsigned long flags;
955fbf59bc9STejun Heo 	int off;
956fbf59bc9STejun Heo 
957fbf59bc9STejun Heo 	if (!ptr)
958fbf59bc9STejun Heo 		return;
959fbf59bc9STejun Heo 
960ccea34b5STejun Heo 	spin_lock_irqsave(&pcpu_lock, flags);
961fbf59bc9STejun Heo 
962fbf59bc9STejun Heo 	chunk = pcpu_chunk_addr_search(addr);
963fbf59bc9STejun Heo 	off = addr - chunk->vm->addr;
964fbf59bc9STejun Heo 
965fbf59bc9STejun Heo 	pcpu_free_area(chunk, off);
966fbf59bc9STejun Heo 
967a56dbddfSTejun Heo 	/* if there are more than one fully free chunks, wake up grim reaper */
968fbf59bc9STejun Heo 	if (chunk->free_size == pcpu_unit_size) {
969fbf59bc9STejun Heo 		struct pcpu_chunk *pos;
970fbf59bc9STejun Heo 
971a56dbddfSTejun Heo 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
972fbf59bc9STejun Heo 			if (pos != chunk) {
973a56dbddfSTejun Heo 				schedule_work(&pcpu_reclaim_work);
974fbf59bc9STejun Heo 				break;
975fbf59bc9STejun Heo 			}
976fbf59bc9STejun Heo 	}
977fbf59bc9STejun Heo 
978ccea34b5STejun Heo 	spin_unlock_irqrestore(&pcpu_lock, flags);
979fbf59bc9STejun Heo }
980fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu);
981fbf59bc9STejun Heo 
982fbf59bc9STejun Heo /**
9838d408b4bSTejun Heo  * pcpu_setup_first_chunk - initialize the first percpu chunk
9848d408b4bSTejun Heo  * @get_page_fn: callback to fetch page pointer
9858d408b4bSTejun Heo  * @static_size: the size of static percpu area in bytes
986edcb4639STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
987cafe8816STejun Heo  * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
9886074d5b0STejun Heo  * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
9898d408b4bSTejun Heo  * @base_addr: mapped address, NULL for auto
9908d408b4bSTejun Heo  * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
991fbf59bc9STejun Heo  *
9928d408b4bSTejun Heo  * Initialize the first percpu chunk which contains the kernel static
9938d408b4bSTejun Heo  * perpcu area.  This function is to be called from arch percpu area
9948d408b4bSTejun Heo  * setup path.  The first two parameters are mandatory.  The rest are
9958d408b4bSTejun Heo  * optional.
9968d408b4bSTejun Heo  *
9978d408b4bSTejun Heo  * @get_page_fn() should return pointer to percpu page given cpu
9988d408b4bSTejun Heo  * number and page number.  It should at least return enough pages to
9998d408b4bSTejun Heo  * cover the static area.  The returned pages for static area should
10008d408b4bSTejun Heo  * have been initialized with valid data.  If @unit_size is specified,
10018d408b4bSTejun Heo  * it can also return pages after the static area.  NULL return
10028d408b4bSTejun Heo  * indicates end of pages for the cpu.  Note that @get_page_fn() must
10038d408b4bSTejun Heo  * return the same number of pages for all cpus.
10048d408b4bSTejun Heo  *
1005edcb4639STejun Heo  * @reserved_size, if non-zero, specifies the amount of bytes to
1006edcb4639STejun Heo  * reserve after the static area in the first chunk.  This reserves
1007edcb4639STejun Heo  * the first chunk such that it's available only through reserved
1008edcb4639STejun Heo  * percpu allocation.  This is primarily used to serve module percpu
1009edcb4639STejun Heo  * static areas on architectures where the addressing model has
1010edcb4639STejun Heo  * limited offset range for symbol relocations to guarantee module
1011edcb4639STejun Heo  * percpu symbols fall inside the relocatable range.
1012edcb4639STejun Heo  *
10136074d5b0STejun Heo  * @dyn_size, if non-negative, determines the number of bytes
10146074d5b0STejun Heo  * available for dynamic allocation in the first chunk.  Specifying
10156074d5b0STejun Heo  * non-negative value makes percpu leave alone the area beyond
10166074d5b0STejun Heo  * @static_size + @reserved_size + @dyn_size.
10176074d5b0STejun Heo  *
1018cafe8816STejun Heo  * @unit_size, if non-negative, specifies unit size and must be
1019cafe8816STejun Heo  * aligned to PAGE_SIZE and equal to or larger than @static_size +
10206074d5b0STejun Heo  * @reserved_size + if non-negative, @dyn_size.
10218d408b4bSTejun Heo  *
10228d408b4bSTejun Heo  * Non-null @base_addr means that the caller already allocated virtual
10238d408b4bSTejun Heo  * region for the first chunk and mapped it.  percpu must not mess
10248d408b4bSTejun Heo  * with the chunk.  Note that @base_addr with 0 @unit_size or non-NULL
10258d408b4bSTejun Heo  * @populate_pte_fn doesn't make any sense.
10268d408b4bSTejun Heo  *
10278d408b4bSTejun Heo  * @populate_pte_fn is used to populate the pagetable.  NULL means the
10288d408b4bSTejun Heo  * caller already populated the pagetable.
1029fbf59bc9STejun Heo  *
1030edcb4639STejun Heo  * If the first chunk ends up with both reserved and dynamic areas, it
1031edcb4639STejun Heo  * is served by two chunks - one to serve the core static and reserved
1032edcb4639STejun Heo  * areas and the other for the dynamic area.  They share the same vm
1033edcb4639STejun Heo  * and page map but uses different area allocation map to stay away
1034edcb4639STejun Heo  * from each other.  The latter chunk is circulated in the chunk slots
1035edcb4639STejun Heo  * and available for dynamic allocation like any other chunks.
1036edcb4639STejun Heo  *
1037fbf59bc9STejun Heo  * RETURNS:
1038fbf59bc9STejun Heo  * The determined pcpu_unit_size which can be used to initialize
1039fbf59bc9STejun Heo  * percpu access.
1040fbf59bc9STejun Heo  */
10418d408b4bSTejun Heo size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1042edcb4639STejun Heo 				     size_t static_size, size_t reserved_size,
10436074d5b0STejun Heo 				     ssize_t dyn_size, ssize_t unit_size,
1044cafe8816STejun Heo 				     void *base_addr,
1045d4b95f80STejun Heo 				     pcpu_fc_populate_pte_fn_t populate_pte_fn)
1046fbf59bc9STejun Heo {
10472441d15cSTejun Heo 	static struct vm_struct first_vm;
1048edcb4639STejun Heo 	static int smap[2], dmap[2];
10496074d5b0STejun Heo 	size_t size_sum = static_size + reserved_size +
10506074d5b0STejun Heo 			  (dyn_size >= 0 ? dyn_size : 0);
1051edcb4639STejun Heo 	struct pcpu_chunk *schunk, *dchunk = NULL;
1052fbf59bc9STejun Heo 	unsigned int cpu;
10538d408b4bSTejun Heo 	int nr_pages;
1054fbf59bc9STejun Heo 	int err, i;
1055fbf59bc9STejun Heo 
10568d408b4bSTejun Heo 	/* santiy checks */
1057edcb4639STejun Heo 	BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1058edcb4639STejun Heo 		     ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
10598d408b4bSTejun Heo 	BUG_ON(!static_size);
1060cafe8816STejun Heo 	if (unit_size >= 0) {
10616074d5b0STejun Heo 		BUG_ON(unit_size < size_sum);
10628d408b4bSTejun Heo 		BUG_ON(unit_size & ~PAGE_MASK);
10636074d5b0STejun Heo 		BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
10646074d5b0STejun Heo 	} else
1065cafe8816STejun Heo 		BUG_ON(base_addr);
10668d408b4bSTejun Heo 	BUG_ON(base_addr && populate_pte_fn);
1067fbf59bc9STejun Heo 
1068cafe8816STejun Heo 	if (unit_size >= 0)
10698d408b4bSTejun Heo 		pcpu_unit_pages = unit_size >> PAGE_SHIFT;
10708d408b4bSTejun Heo 	else
10718d408b4bSTejun Heo 		pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
10726074d5b0STejun Heo 					PFN_UP(size_sum));
10738d408b4bSTejun Heo 
1074d9b55eebSTejun Heo 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1075fbf59bc9STejun Heo 	pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
1076fbf59bc9STejun Heo 	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
1077cb83b42eSTejun Heo 		+ num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
1078fbf59bc9STejun Heo 
1079cafe8816STejun Heo 	if (dyn_size < 0)
1080edcb4639STejun Heo 		dyn_size = pcpu_unit_size - static_size - reserved_size;
1081cafe8816STejun Heo 
1082d9b55eebSTejun Heo 	/*
1083d9b55eebSTejun Heo 	 * Allocate chunk slots.  The additional last slot is for
1084d9b55eebSTejun Heo 	 * empty chunks.
1085d9b55eebSTejun Heo 	 */
1086d9b55eebSTejun Heo 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1087fbf59bc9STejun Heo 	pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1088fbf59bc9STejun Heo 	for (i = 0; i < pcpu_nr_slots; i++)
1089fbf59bc9STejun Heo 		INIT_LIST_HEAD(&pcpu_slot[i]);
1090fbf59bc9STejun Heo 
1091edcb4639STejun Heo 	/*
1092edcb4639STejun Heo 	 * Initialize static chunk.  If reserved_size is zero, the
1093edcb4639STejun Heo 	 * static chunk covers static area + dynamic allocation area
1094edcb4639STejun Heo 	 * in the first chunk.  If reserved_size is not zero, it
1095edcb4639STejun Heo 	 * covers static area + reserved area (mostly used for module
1096edcb4639STejun Heo 	 * static percpu allocation).
1097edcb4639STejun Heo 	 */
10982441d15cSTejun Heo 	schunk = alloc_bootmem(pcpu_chunk_struct_size);
10992441d15cSTejun Heo 	INIT_LIST_HEAD(&schunk->list);
11002441d15cSTejun Heo 	schunk->vm = &first_vm;
110161ace7faSTejun Heo 	schunk->map = smap;
110261ace7faSTejun Heo 	schunk->map_alloc = ARRAY_SIZE(smap);
11033e24aa58STejun Heo 	schunk->page = schunk->page_ar;
1104edcb4639STejun Heo 
1105edcb4639STejun Heo 	if (reserved_size) {
1106edcb4639STejun Heo 		schunk->free_size = reserved_size;
1107ae9e6bc9STejun Heo 		pcpu_reserved_chunk = schunk;
1108ae9e6bc9STejun Heo 		pcpu_reserved_chunk_limit = static_size + reserved_size;
1109edcb4639STejun Heo 	} else {
11102441d15cSTejun Heo 		schunk->free_size = dyn_size;
1111edcb4639STejun Heo 		dyn_size = 0;			/* dynamic area covered */
1112edcb4639STejun Heo 	}
11132441d15cSTejun Heo 	schunk->contig_hint = schunk->free_size;
1114fbf59bc9STejun Heo 
111561ace7faSTejun Heo 	schunk->map[schunk->map_used++] = -static_size;
111661ace7faSTejun Heo 	if (schunk->free_size)
111761ace7faSTejun Heo 		schunk->map[schunk->map_used++] = schunk->free_size;
111861ace7faSTejun Heo 
1119edcb4639STejun Heo 	/* init dynamic chunk if necessary */
1120edcb4639STejun Heo 	if (dyn_size) {
1121edcb4639STejun Heo 		dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
1122edcb4639STejun Heo 		INIT_LIST_HEAD(&dchunk->list);
1123edcb4639STejun Heo 		dchunk->vm = &first_vm;
1124edcb4639STejun Heo 		dchunk->map = dmap;
1125edcb4639STejun Heo 		dchunk->map_alloc = ARRAY_SIZE(dmap);
1126edcb4639STejun Heo 		dchunk->page = schunk->page_ar;	/* share page map with schunk */
1127edcb4639STejun Heo 
1128edcb4639STejun Heo 		dchunk->contig_hint = dchunk->free_size = dyn_size;
1129edcb4639STejun Heo 		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1130edcb4639STejun Heo 		dchunk->map[dchunk->map_used++] = dchunk->free_size;
1131edcb4639STejun Heo 	}
1132edcb4639STejun Heo 
11338d408b4bSTejun Heo 	/* allocate vm address */
11342441d15cSTejun Heo 	first_vm.flags = VM_ALLOC;
11352441d15cSTejun Heo 	first_vm.size = pcpu_chunk_size;
11368d408b4bSTejun Heo 
11378d408b4bSTejun Heo 	if (!base_addr)
11382441d15cSTejun Heo 		vm_area_register_early(&first_vm, PAGE_SIZE);
11398d408b4bSTejun Heo 	else {
11408d408b4bSTejun Heo 		/*
11418d408b4bSTejun Heo 		 * Pages already mapped.  No need to remap into
1142edcb4639STejun Heo 		 * vmalloc area.  In this case the first chunks can't
1143edcb4639STejun Heo 		 * be mapped or unmapped by percpu and are marked
11448d408b4bSTejun Heo 		 * immutable.
11458d408b4bSTejun Heo 		 */
11462441d15cSTejun Heo 		first_vm.addr = base_addr;
11472441d15cSTejun Heo 		schunk->immutable = true;
1148edcb4639STejun Heo 		if (dchunk)
1149edcb4639STejun Heo 			dchunk->immutable = true;
1150fbf59bc9STejun Heo 	}
1151fbf59bc9STejun Heo 
11528d408b4bSTejun Heo 	/* assign pages */
11538d408b4bSTejun Heo 	nr_pages = -1;
11548d408b4bSTejun Heo 	for_each_possible_cpu(cpu) {
11558d408b4bSTejun Heo 		for (i = 0; i < pcpu_unit_pages; i++) {
11568d408b4bSTejun Heo 			struct page *page = get_page_fn(cpu, i);
11578d408b4bSTejun Heo 
11588d408b4bSTejun Heo 			if (!page)
11598d408b4bSTejun Heo 				break;
11602441d15cSTejun Heo 			*pcpu_chunk_pagep(schunk, cpu, i) = page;
11618d408b4bSTejun Heo 		}
11628d408b4bSTejun Heo 
116361ace7faSTejun Heo 		BUG_ON(i < PFN_UP(static_size));
11648d408b4bSTejun Heo 
11658d408b4bSTejun Heo 		if (nr_pages < 0)
11668d408b4bSTejun Heo 			nr_pages = i;
11678d408b4bSTejun Heo 		else
11688d408b4bSTejun Heo 			BUG_ON(nr_pages != i);
11698d408b4bSTejun Heo 	}
11708d408b4bSTejun Heo 
11718d408b4bSTejun Heo 	/* map them */
11728d408b4bSTejun Heo 	if (populate_pte_fn) {
11738d408b4bSTejun Heo 		for_each_possible_cpu(cpu)
11748d408b4bSTejun Heo 			for (i = 0; i < nr_pages; i++)
11752441d15cSTejun Heo 				populate_pte_fn(pcpu_chunk_addr(schunk,
11768d408b4bSTejun Heo 								cpu, i));
11778d408b4bSTejun Heo 
11782441d15cSTejun Heo 		err = pcpu_map(schunk, 0, nr_pages);
1179fbf59bc9STejun Heo 		if (err)
11808d408b4bSTejun Heo 			panic("failed to setup static percpu area, err=%d\n",
11818d408b4bSTejun Heo 			      err);
11828d408b4bSTejun Heo 	}
1183fbf59bc9STejun Heo 
11842441d15cSTejun Heo 	/* link the first chunk in */
1185ae9e6bc9STejun Heo 	pcpu_first_chunk = dchunk ?: schunk;
1186ae9e6bc9STejun Heo 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1187fbf59bc9STejun Heo 
1188fbf59bc9STejun Heo 	/* we're done */
11892441d15cSTejun Heo 	pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
1190fbf59bc9STejun Heo 	return pcpu_unit_size;
1191fbf59bc9STejun Heo }
119266c3a757STejun Heo 
1193*8c4bfc6eSTejun Heo static size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size,
1194*8c4bfc6eSTejun Heo 				 ssize_t *dyn_sizep)
1195*8c4bfc6eSTejun Heo {
1196*8c4bfc6eSTejun Heo 	size_t size_sum;
1197*8c4bfc6eSTejun Heo 
1198*8c4bfc6eSTejun Heo 	size_sum = PFN_ALIGN(static_size + reserved_size +
1199*8c4bfc6eSTejun Heo 			     (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1200*8c4bfc6eSTejun Heo 	if (*dyn_sizep != 0)
1201*8c4bfc6eSTejun Heo 		*dyn_sizep = size_sum - static_size - reserved_size;
1202*8c4bfc6eSTejun Heo 
1203*8c4bfc6eSTejun Heo 	return size_sum;
1204*8c4bfc6eSTejun Heo }
1205*8c4bfc6eSTejun Heo 
120666c3a757STejun Heo /*
120766c3a757STejun Heo  * Embedding first chunk setup helper.
120866c3a757STejun Heo  */
120966c3a757STejun Heo static void *pcpue_ptr __initdata;
121066c3a757STejun Heo static size_t pcpue_size __initdata;
121166c3a757STejun Heo static size_t pcpue_unit_size __initdata;
121266c3a757STejun Heo 
121366c3a757STejun Heo static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
121466c3a757STejun Heo {
121566c3a757STejun Heo 	size_t off = (size_t)pageno << PAGE_SHIFT;
121666c3a757STejun Heo 
121766c3a757STejun Heo 	if (off >= pcpue_size)
121866c3a757STejun Heo 		return NULL;
121966c3a757STejun Heo 
122066c3a757STejun Heo 	return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
122166c3a757STejun Heo }
122266c3a757STejun Heo 
122366c3a757STejun Heo /**
122466c3a757STejun Heo  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
122566c3a757STejun Heo  * @static_size: the size of static percpu area in bytes
122666c3a757STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
122766c3a757STejun Heo  * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
122866c3a757STejun Heo  *
122966c3a757STejun Heo  * This is a helper to ease setting up embedded first percpu chunk and
123066c3a757STejun Heo  * can be called where pcpu_setup_first_chunk() is expected.
123166c3a757STejun Heo  *
123266c3a757STejun Heo  * If this function is used to setup the first chunk, it is allocated
123366c3a757STejun Heo  * as a contiguous area using bootmem allocator and used as-is without
123466c3a757STejun Heo  * being mapped into vmalloc area.  This enables the first chunk to
123566c3a757STejun Heo  * piggy back on the linear physical mapping which often uses larger
123666c3a757STejun Heo  * page size.
123766c3a757STejun Heo  *
123866c3a757STejun Heo  * When @dyn_size is positive, dynamic area might be larger than
1239788e5abcSTejun Heo  * specified to fill page alignment.  When @dyn_size is auto,
1240788e5abcSTejun Heo  * @dyn_size is just big enough to fill page alignment after static
1241788e5abcSTejun Heo  * and reserved areas.
124266c3a757STejun Heo  *
124366c3a757STejun Heo  * If the needed size is smaller than the minimum or specified unit
124466c3a757STejun Heo  * size, the leftover is returned to the bootmem allocator.
124566c3a757STejun Heo  *
124666c3a757STejun Heo  * RETURNS:
124766c3a757STejun Heo  * The determined pcpu_unit_size which can be used to initialize
124866c3a757STejun Heo  * percpu access on success, -errno on failure.
124966c3a757STejun Heo  */
125066c3a757STejun Heo ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1251788e5abcSTejun Heo 				      ssize_t dyn_size)
125266c3a757STejun Heo {
1253fa8a7094STejun Heo 	size_t chunk_size;
125466c3a757STejun Heo 	unsigned int cpu;
125566c3a757STejun Heo 
125666c3a757STejun Heo 	/* determine parameters and allocate */
1257*8c4bfc6eSTejun Heo 	pcpue_size = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
125866c3a757STejun Heo 
125966c3a757STejun Heo 	pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1260fa8a7094STejun Heo 	chunk_size = pcpue_unit_size * num_possible_cpus();
1261fa8a7094STejun Heo 
1262fa8a7094STejun Heo 	pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
1263fa8a7094STejun Heo 					    __pa(MAX_DMA_ADDRESS));
1264fa8a7094STejun Heo 	if (!pcpue_ptr) {
1265fa8a7094STejun Heo 		pr_warning("PERCPU: failed to allocate %zu bytes for "
1266fa8a7094STejun Heo 			   "embedding\n", chunk_size);
126766c3a757STejun Heo 		return -ENOMEM;
1268fa8a7094STejun Heo 	}
126966c3a757STejun Heo 
127066c3a757STejun Heo 	/* return the leftover and copy */
127166c3a757STejun Heo 	for_each_possible_cpu(cpu) {
127266c3a757STejun Heo 		void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
127366c3a757STejun Heo 
127466c3a757STejun Heo 		free_bootmem(__pa(ptr + pcpue_size),
127566c3a757STejun Heo 			     pcpue_unit_size - pcpue_size);
127666c3a757STejun Heo 		memcpy(ptr, __per_cpu_load, static_size);
127766c3a757STejun Heo 	}
127866c3a757STejun Heo 
127966c3a757STejun Heo 	/* we're ready, commit */
128066c3a757STejun Heo 	pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
128166c3a757STejun Heo 		pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
128266c3a757STejun Heo 
128366c3a757STejun Heo 	return pcpu_setup_first_chunk(pcpue_get_page, static_size,
128466c3a757STejun Heo 				      reserved_size, dyn_size,
128566c3a757STejun Heo 				      pcpue_unit_size, pcpue_ptr, NULL);
128666c3a757STejun Heo }
1287e74e3962STejun Heo 
1288e74e3962STejun Heo /*
1289d4b95f80STejun Heo  * 4k page first chunk setup helper.
1290d4b95f80STejun Heo  */
1291d4b95f80STejun Heo static struct page **pcpu4k_pages __initdata;
12928f05a6a6STejun Heo static int pcpu4k_unit_pages __initdata;
1293d4b95f80STejun Heo 
1294d4b95f80STejun Heo static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
1295d4b95f80STejun Heo {
12968f05a6a6STejun Heo 	if (pageno < pcpu4k_unit_pages)
12978f05a6a6STejun Heo 		return pcpu4k_pages[cpu * pcpu4k_unit_pages + pageno];
1298d4b95f80STejun Heo 	return NULL;
1299d4b95f80STejun Heo }
1300d4b95f80STejun Heo 
1301d4b95f80STejun Heo /**
1302d4b95f80STejun Heo  * pcpu_4k_first_chunk - map the first chunk using PAGE_SIZE pages
1303d4b95f80STejun Heo  * @static_size: the size of static percpu area in bytes
1304d4b95f80STejun Heo  * @reserved_size: the size of reserved percpu area in bytes
1305d4b95f80STejun Heo  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1306d4b95f80STejun Heo  * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1307d4b95f80STejun Heo  * @populate_pte_fn: function to populate pte
1308d4b95f80STejun Heo  *
1309d4b95f80STejun Heo  * This is a helper to ease setting up embedded first percpu chunk and
1310d4b95f80STejun Heo  * can be called where pcpu_setup_first_chunk() is expected.
1311d4b95f80STejun Heo  *
1312d4b95f80STejun Heo  * This is the basic allocator.  Static percpu area is allocated
1313d4b95f80STejun Heo  * page-by-page into vmalloc area.
1314d4b95f80STejun Heo  *
1315d4b95f80STejun Heo  * RETURNS:
1316d4b95f80STejun Heo  * The determined pcpu_unit_size which can be used to initialize
1317d4b95f80STejun Heo  * percpu access on success, -errno on failure.
1318d4b95f80STejun Heo  */
1319d4b95f80STejun Heo ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size,
1320d4b95f80STejun Heo 				   pcpu_fc_alloc_fn_t alloc_fn,
1321d4b95f80STejun Heo 				   pcpu_fc_free_fn_t free_fn,
1322d4b95f80STejun Heo 				   pcpu_fc_populate_pte_fn_t populate_pte_fn)
1323d4b95f80STejun Heo {
13248f05a6a6STejun Heo 	static struct vm_struct vm;
1325d4b95f80STejun Heo 	size_t pages_size;
1326d4b95f80STejun Heo 	unsigned int cpu;
1327d4b95f80STejun Heo 	int i, j;
1328d4b95f80STejun Heo 	ssize_t ret;
1329d4b95f80STejun Heo 
13308f05a6a6STejun Heo 	pcpu4k_unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size,
13318f05a6a6STejun Heo 					 PCPU_MIN_UNIT_SIZE));
1332d4b95f80STejun Heo 
1333d4b95f80STejun Heo 	/* unaligned allocations can't be freed, round up to page size */
13348f05a6a6STejun Heo 	pages_size = PFN_ALIGN(pcpu4k_unit_pages * num_possible_cpus() *
1335d4b95f80STejun Heo 			       sizeof(pcpu4k_pages[0]));
1336d4b95f80STejun Heo 	pcpu4k_pages = alloc_bootmem(pages_size);
1337d4b95f80STejun Heo 
13388f05a6a6STejun Heo 	/* allocate pages */
1339d4b95f80STejun Heo 	j = 0;
1340d4b95f80STejun Heo 	for_each_possible_cpu(cpu)
13418f05a6a6STejun Heo 		for (i = 0; i < pcpu4k_unit_pages; i++) {
1342d4b95f80STejun Heo 			void *ptr;
1343d4b95f80STejun Heo 
1344d4b95f80STejun Heo 			ptr = alloc_fn(cpu, PAGE_SIZE);
1345d4b95f80STejun Heo 			if (!ptr) {
1346d4b95f80STejun Heo 				pr_warning("PERCPU: failed to allocate "
1347d4b95f80STejun Heo 					   "4k page for cpu%u\n", cpu);
1348d4b95f80STejun Heo 				goto enomem;
1349d4b95f80STejun Heo 			}
1350d4b95f80STejun Heo 			pcpu4k_pages[j++] = virt_to_page(ptr);
1351d4b95f80STejun Heo 		}
1352d4b95f80STejun Heo 
13538f05a6a6STejun Heo 	/* allocate vm area, map the pages and copy static data */
13548f05a6a6STejun Heo 	vm.flags = VM_ALLOC;
13558f05a6a6STejun Heo 	vm.size = num_possible_cpus() * pcpu4k_unit_pages << PAGE_SHIFT;
13568f05a6a6STejun Heo 	vm_area_register_early(&vm, PAGE_SIZE);
13578f05a6a6STejun Heo 
13588f05a6a6STejun Heo 	for_each_possible_cpu(cpu) {
13598f05a6a6STejun Heo 		unsigned long unit_addr = (unsigned long)vm.addr +
13608f05a6a6STejun Heo 			(cpu * pcpu4k_unit_pages << PAGE_SHIFT);
13618f05a6a6STejun Heo 
13628f05a6a6STejun Heo 		for (i = 0; i < pcpu4k_unit_pages; i++)
13638f05a6a6STejun Heo 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
13648f05a6a6STejun Heo 
13658f05a6a6STejun Heo 		/* pte already populated, the following shouldn't fail */
13668f05a6a6STejun Heo 		ret = __pcpu_map_pages(unit_addr,
13678f05a6a6STejun Heo 				       &pcpu4k_pages[cpu * pcpu4k_unit_pages],
13688f05a6a6STejun Heo 				       pcpu4k_unit_pages);
13698f05a6a6STejun Heo 		if (ret < 0)
13708f05a6a6STejun Heo 			panic("failed to map percpu area, err=%zd\n", ret);
13718f05a6a6STejun Heo 
13728f05a6a6STejun Heo 		/*
13738f05a6a6STejun Heo 		 * FIXME: Archs with virtual cache should flush local
13748f05a6a6STejun Heo 		 * cache for the linear mapping here - something
13758f05a6a6STejun Heo 		 * equivalent to flush_cache_vmap() on the local cpu.
13768f05a6a6STejun Heo 		 * flush_cache_vmap() can't be used as most supporting
13778f05a6a6STejun Heo 		 * data structures are not set up yet.
13788f05a6a6STejun Heo 		 */
13798f05a6a6STejun Heo 
13808f05a6a6STejun Heo 		/* copy static data */
13818f05a6a6STejun Heo 		memcpy((void *)unit_addr, __per_cpu_load, static_size);
13828f05a6a6STejun Heo 	}
13838f05a6a6STejun Heo 
1384d4b95f80STejun Heo 	/* we're ready, commit */
13858f05a6a6STejun Heo 	pr_info("PERCPU: %d 4k pages per cpu, static data %zu bytes\n",
13868f05a6a6STejun Heo 		pcpu4k_unit_pages, static_size);
1387d4b95f80STejun Heo 
1388d4b95f80STejun Heo 	ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
1389d4b95f80STejun Heo 				     reserved_size, -1,
13908f05a6a6STejun Heo 				     pcpu4k_unit_pages << PAGE_SHIFT, vm.addr,
13918f05a6a6STejun Heo 				     NULL);
1392d4b95f80STejun Heo 	goto out_free_ar;
1393d4b95f80STejun Heo 
1394d4b95f80STejun Heo enomem:
1395d4b95f80STejun Heo 	while (--j >= 0)
1396d4b95f80STejun Heo 		free_fn(page_address(pcpu4k_pages[j]), PAGE_SIZE);
1397d4b95f80STejun Heo 	ret = -ENOMEM;
1398d4b95f80STejun Heo out_free_ar:
1399d4b95f80STejun Heo 	free_bootmem(__pa(pcpu4k_pages), pages_size);
1400d4b95f80STejun Heo 	return ret;
1401d4b95f80STejun Heo }
1402d4b95f80STejun Heo 
1403d4b95f80STejun Heo /*
1404*8c4bfc6eSTejun Heo  * Large page remapping first chunk setup helper
1405*8c4bfc6eSTejun Heo  */
1406*8c4bfc6eSTejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
1407*8c4bfc6eSTejun Heo struct pcpul_ent {
1408*8c4bfc6eSTejun Heo 	unsigned int	cpu;
1409*8c4bfc6eSTejun Heo 	void		*ptr;
1410*8c4bfc6eSTejun Heo };
1411*8c4bfc6eSTejun Heo 
1412*8c4bfc6eSTejun Heo static size_t pcpul_size;
1413*8c4bfc6eSTejun Heo static size_t pcpul_unit_size;
1414*8c4bfc6eSTejun Heo static struct pcpul_ent *pcpul_map;
1415*8c4bfc6eSTejun Heo static struct vm_struct pcpul_vm;
1416*8c4bfc6eSTejun Heo 
1417*8c4bfc6eSTejun Heo static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
1418*8c4bfc6eSTejun Heo {
1419*8c4bfc6eSTejun Heo 	size_t off = (size_t)pageno << PAGE_SHIFT;
1420*8c4bfc6eSTejun Heo 
1421*8c4bfc6eSTejun Heo 	if (off >= pcpul_size)
1422*8c4bfc6eSTejun Heo 		return NULL;
1423*8c4bfc6eSTejun Heo 
1424*8c4bfc6eSTejun Heo 	return virt_to_page(pcpul_map[cpu].ptr + off);
1425*8c4bfc6eSTejun Heo }
1426*8c4bfc6eSTejun Heo 
1427*8c4bfc6eSTejun Heo /**
1428*8c4bfc6eSTejun Heo  * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
1429*8c4bfc6eSTejun Heo  * @static_size: the size of static percpu area in bytes
1430*8c4bfc6eSTejun Heo  * @reserved_size: the size of reserved percpu area in bytes
1431*8c4bfc6eSTejun Heo  * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1432*8c4bfc6eSTejun Heo  * @lpage_size: the size of a large page
1433*8c4bfc6eSTejun Heo  * @alloc_fn: function to allocate percpu lpage, always called with lpage_size
1434*8c4bfc6eSTejun Heo  * @free_fn: function to free percpu memory, @size <= lpage_size
1435*8c4bfc6eSTejun Heo  * @map_fn: function to map percpu lpage, always called with lpage_size
1436*8c4bfc6eSTejun Heo  *
1437*8c4bfc6eSTejun Heo  * This allocator uses large page as unit.  A large page is allocated
1438*8c4bfc6eSTejun Heo  * for each cpu and each is remapped into vmalloc area using large
1439*8c4bfc6eSTejun Heo  * page mapping.  As large page can be quite large, only part of it is
1440*8c4bfc6eSTejun Heo  * used for the first chunk.  Unused part is returned to the bootmem
1441*8c4bfc6eSTejun Heo  * allocator.
1442*8c4bfc6eSTejun Heo  *
1443*8c4bfc6eSTejun Heo  * So, the large pages are mapped twice - once to the physical mapping
1444*8c4bfc6eSTejun Heo  * and to the vmalloc area for the first percpu chunk.  The double
1445*8c4bfc6eSTejun Heo  * mapping does add one more large TLB entry pressure but still is
1446*8c4bfc6eSTejun Heo  * much better than only using 4k mappings while still being NUMA
1447*8c4bfc6eSTejun Heo  * friendly.
1448*8c4bfc6eSTejun Heo  *
1449*8c4bfc6eSTejun Heo  * RETURNS:
1450*8c4bfc6eSTejun Heo  * The determined pcpu_unit_size which can be used to initialize
1451*8c4bfc6eSTejun Heo  * percpu access on success, -errno on failure.
1452*8c4bfc6eSTejun Heo  */
1453*8c4bfc6eSTejun Heo ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size,
1454*8c4bfc6eSTejun Heo 				      ssize_t dyn_size, size_t lpage_size,
1455*8c4bfc6eSTejun Heo 				      pcpu_fc_alloc_fn_t alloc_fn,
1456*8c4bfc6eSTejun Heo 				      pcpu_fc_free_fn_t free_fn,
1457*8c4bfc6eSTejun Heo 				      pcpu_fc_map_fn_t map_fn)
1458*8c4bfc6eSTejun Heo {
1459*8c4bfc6eSTejun Heo 	size_t size_sum;
1460*8c4bfc6eSTejun Heo 	size_t map_size;
1461*8c4bfc6eSTejun Heo 	unsigned int cpu;
1462*8c4bfc6eSTejun Heo 	int i, j;
1463*8c4bfc6eSTejun Heo 	ssize_t ret;
1464*8c4bfc6eSTejun Heo 
1465*8c4bfc6eSTejun Heo 	/*
1466*8c4bfc6eSTejun Heo 	 * Currently supports only single page.  Supporting multiple
1467*8c4bfc6eSTejun Heo 	 * pages won't be too difficult if it ever becomes necessary.
1468*8c4bfc6eSTejun Heo 	 */
1469*8c4bfc6eSTejun Heo 	size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1470*8c4bfc6eSTejun Heo 
1471*8c4bfc6eSTejun Heo 	pcpul_unit_size = lpage_size;
1472*8c4bfc6eSTejun Heo 	pcpul_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1473*8c4bfc6eSTejun Heo 	if (pcpul_size > pcpul_unit_size) {
1474*8c4bfc6eSTejun Heo 		pr_warning("PERCPU: static data is larger than large page, "
1475*8c4bfc6eSTejun Heo 			   "can't use large page\n");
1476*8c4bfc6eSTejun Heo 		return -EINVAL;
1477*8c4bfc6eSTejun Heo 	}
1478*8c4bfc6eSTejun Heo 
1479*8c4bfc6eSTejun Heo 	/* allocate pointer array and alloc large pages */
1480*8c4bfc6eSTejun Heo 	map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
1481*8c4bfc6eSTejun Heo 	pcpul_map = alloc_bootmem(map_size);
1482*8c4bfc6eSTejun Heo 
1483*8c4bfc6eSTejun Heo 	for_each_possible_cpu(cpu) {
1484*8c4bfc6eSTejun Heo 		void *ptr;
1485*8c4bfc6eSTejun Heo 
1486*8c4bfc6eSTejun Heo 		ptr = alloc_fn(cpu, lpage_size);
1487*8c4bfc6eSTejun Heo 		if (!ptr) {
1488*8c4bfc6eSTejun Heo 			pr_warning("PERCPU: failed to allocate large page "
1489*8c4bfc6eSTejun Heo 				   "for cpu%u\n", cpu);
1490*8c4bfc6eSTejun Heo 			goto enomem;
1491*8c4bfc6eSTejun Heo 		}
1492*8c4bfc6eSTejun Heo 
1493*8c4bfc6eSTejun Heo 		/*
1494*8c4bfc6eSTejun Heo 		 * Only use pcpul_size bytes and give back the rest.
1495*8c4bfc6eSTejun Heo 		 *
1496*8c4bfc6eSTejun Heo 		 * Ingo: The lpage_size up-rounding bootmem is needed
1497*8c4bfc6eSTejun Heo 		 * to make sure the partial lpage is still fully RAM -
1498*8c4bfc6eSTejun Heo 		 * it's not well-specified to have a incompatible area
1499*8c4bfc6eSTejun Heo 		 * (unmapped RAM, device memory, etc.) in that hole.
1500*8c4bfc6eSTejun Heo 		 */
1501*8c4bfc6eSTejun Heo 		free_fn(ptr + pcpul_size, lpage_size - pcpul_size);
1502*8c4bfc6eSTejun Heo 
1503*8c4bfc6eSTejun Heo 		pcpul_map[cpu].cpu = cpu;
1504*8c4bfc6eSTejun Heo 		pcpul_map[cpu].ptr = ptr;
1505*8c4bfc6eSTejun Heo 
1506*8c4bfc6eSTejun Heo 		memcpy(ptr, __per_cpu_load, static_size);
1507*8c4bfc6eSTejun Heo 	}
1508*8c4bfc6eSTejun Heo 
1509*8c4bfc6eSTejun Heo 	/* allocate address and map */
1510*8c4bfc6eSTejun Heo 	pcpul_vm.flags = VM_ALLOC;
1511*8c4bfc6eSTejun Heo 	pcpul_vm.size = num_possible_cpus() * pcpul_unit_size;
1512*8c4bfc6eSTejun Heo 	vm_area_register_early(&pcpul_vm, pcpul_unit_size);
1513*8c4bfc6eSTejun Heo 
1514*8c4bfc6eSTejun Heo 	for_each_possible_cpu(cpu)
1515*8c4bfc6eSTejun Heo 		map_fn(pcpul_map[cpu].ptr, pcpul_unit_size,
1516*8c4bfc6eSTejun Heo 		       pcpul_vm.addr + cpu * pcpul_unit_size);
1517*8c4bfc6eSTejun Heo 
1518*8c4bfc6eSTejun Heo 	/* we're ready, commit */
1519*8c4bfc6eSTejun Heo 	pr_info("PERCPU: Remapped at %p with large pages, static data "
1520*8c4bfc6eSTejun Heo 		"%zu bytes\n", pcpul_vm.addr, static_size);
1521*8c4bfc6eSTejun Heo 
1522*8c4bfc6eSTejun Heo 	ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
1523*8c4bfc6eSTejun Heo 				     reserved_size, dyn_size, pcpul_unit_size,
1524*8c4bfc6eSTejun Heo 				     pcpul_vm.addr, NULL);
1525*8c4bfc6eSTejun Heo 
1526*8c4bfc6eSTejun Heo 	/* sort pcpul_map array for pcpu_lpage_remapped() */
1527*8c4bfc6eSTejun Heo 	for (i = 0; i < num_possible_cpus() - 1; i++)
1528*8c4bfc6eSTejun Heo 		for (j = i + 1; j < num_possible_cpus(); j++)
1529*8c4bfc6eSTejun Heo 			if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
1530*8c4bfc6eSTejun Heo 				struct pcpul_ent tmp = pcpul_map[i];
1531*8c4bfc6eSTejun Heo 				pcpul_map[i] = pcpul_map[j];
1532*8c4bfc6eSTejun Heo 				pcpul_map[j] = tmp;
1533*8c4bfc6eSTejun Heo 			}
1534*8c4bfc6eSTejun Heo 
1535*8c4bfc6eSTejun Heo 	return ret;
1536*8c4bfc6eSTejun Heo 
1537*8c4bfc6eSTejun Heo enomem:
1538*8c4bfc6eSTejun Heo 	for_each_possible_cpu(cpu)
1539*8c4bfc6eSTejun Heo 		if (pcpul_map[cpu].ptr)
1540*8c4bfc6eSTejun Heo 			free_fn(pcpul_map[cpu].ptr, pcpul_size);
1541*8c4bfc6eSTejun Heo 	free_bootmem(__pa(pcpul_map), map_size);
1542*8c4bfc6eSTejun Heo 	return -ENOMEM;
1543*8c4bfc6eSTejun Heo }
1544*8c4bfc6eSTejun Heo 
1545*8c4bfc6eSTejun Heo /**
1546*8c4bfc6eSTejun Heo  * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
1547*8c4bfc6eSTejun Heo  * @kaddr: the kernel address in question
1548*8c4bfc6eSTejun Heo  *
1549*8c4bfc6eSTejun Heo  * Determine whether @kaddr falls in the pcpul recycled area.  This is
1550*8c4bfc6eSTejun Heo  * used by pageattr to detect VM aliases and break up the pcpu large
1551*8c4bfc6eSTejun Heo  * page mapping such that the same physical page is not mapped under
1552*8c4bfc6eSTejun Heo  * different attributes.
1553*8c4bfc6eSTejun Heo  *
1554*8c4bfc6eSTejun Heo  * The recycled area is always at the tail of a partially used large
1555*8c4bfc6eSTejun Heo  * page.
1556*8c4bfc6eSTejun Heo  *
1557*8c4bfc6eSTejun Heo  * RETURNS:
1558*8c4bfc6eSTejun Heo  * Address of corresponding remapped pcpu address if match is found;
1559*8c4bfc6eSTejun Heo  * otherwise, NULL.
1560*8c4bfc6eSTejun Heo  */
1561*8c4bfc6eSTejun Heo void *pcpu_lpage_remapped(void *kaddr)
1562*8c4bfc6eSTejun Heo {
1563*8c4bfc6eSTejun Heo 	unsigned long unit_mask = pcpul_unit_size - 1;
1564*8c4bfc6eSTejun Heo 	void *lpage_addr = (void *)((unsigned long)kaddr & ~unit_mask);
1565*8c4bfc6eSTejun Heo 	unsigned long offset = (unsigned long)kaddr & unit_mask;
1566*8c4bfc6eSTejun Heo 	int left = 0, right = num_possible_cpus() - 1;
1567*8c4bfc6eSTejun Heo 	int pos;
1568*8c4bfc6eSTejun Heo 
1569*8c4bfc6eSTejun Heo 	/* pcpul in use at all? */
1570*8c4bfc6eSTejun Heo 	if (!pcpul_map)
1571*8c4bfc6eSTejun Heo 		return NULL;
1572*8c4bfc6eSTejun Heo 
1573*8c4bfc6eSTejun Heo 	/* okay, perform binary search */
1574*8c4bfc6eSTejun Heo 	while (left <= right) {
1575*8c4bfc6eSTejun Heo 		pos = (left + right) / 2;
1576*8c4bfc6eSTejun Heo 
1577*8c4bfc6eSTejun Heo 		if (pcpul_map[pos].ptr < lpage_addr)
1578*8c4bfc6eSTejun Heo 			left = pos + 1;
1579*8c4bfc6eSTejun Heo 		else if (pcpul_map[pos].ptr > lpage_addr)
1580*8c4bfc6eSTejun Heo 			right = pos - 1;
1581*8c4bfc6eSTejun Heo 		else {
1582*8c4bfc6eSTejun Heo 			/* it shouldn't be in the area for the first chunk */
1583*8c4bfc6eSTejun Heo 			WARN_ON(offset < pcpul_size);
1584*8c4bfc6eSTejun Heo 
1585*8c4bfc6eSTejun Heo 			return pcpul_vm.addr +
1586*8c4bfc6eSTejun Heo 				pcpul_map[pos].cpu * pcpul_unit_size + offset;
1587*8c4bfc6eSTejun Heo 		}
1588*8c4bfc6eSTejun Heo 	}
1589*8c4bfc6eSTejun Heo 
1590*8c4bfc6eSTejun Heo 	return NULL;
1591*8c4bfc6eSTejun Heo }
1592*8c4bfc6eSTejun Heo #endif
1593*8c4bfc6eSTejun Heo 
1594*8c4bfc6eSTejun Heo /*
1595e74e3962STejun Heo  * Generic percpu area setup.
1596e74e3962STejun Heo  *
1597e74e3962STejun Heo  * The embedding helper is used because its behavior closely resembles
1598e74e3962STejun Heo  * the original non-dynamic generic percpu area setup.  This is
1599e74e3962STejun Heo  * important because many archs have addressing restrictions and might
1600e74e3962STejun Heo  * fail if the percpu area is located far away from the previous
1601e74e3962STejun Heo  * location.  As an added bonus, in non-NUMA cases, embedding is
1602e74e3962STejun Heo  * generally a good idea TLB-wise because percpu area can piggy back
1603e74e3962STejun Heo  * on the physical linear memory mapping which uses large page
1604e74e3962STejun Heo  * mappings on applicable archs.
1605e74e3962STejun Heo  */
1606e74e3962STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1607e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1608e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset);
1609e74e3962STejun Heo 
1610e74e3962STejun Heo void __init setup_per_cpu_areas(void)
1611e74e3962STejun Heo {
1612e74e3962STejun Heo 	size_t static_size = __per_cpu_end - __per_cpu_start;
1613e74e3962STejun Heo 	ssize_t unit_size;
1614e74e3962STejun Heo 	unsigned long delta;
1615e74e3962STejun Heo 	unsigned int cpu;
1616e74e3962STejun Heo 
1617e74e3962STejun Heo 	/*
1618e74e3962STejun Heo 	 * Always reserve area for module percpu variables.  That's
1619e74e3962STejun Heo 	 * what the legacy allocator did.
1620e74e3962STejun Heo 	 */
1621e74e3962STejun Heo 	unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE,
1622788e5abcSTejun Heo 					   PERCPU_DYNAMIC_RESERVE);
1623e74e3962STejun Heo 	if (unit_size < 0)
1624e74e3962STejun Heo 		panic("Failed to initialized percpu areas.");
1625e74e3962STejun Heo 
1626e74e3962STejun Heo 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1627e74e3962STejun Heo 	for_each_possible_cpu(cpu)
1628e74e3962STejun Heo 		__per_cpu_offset[cpu] = delta + cpu * unit_size;
1629e74e3962STejun Heo }
1630e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1631