1fbf59bc9STejun Heo /* 288999a89STejun Heo * mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 1088999a89STejun Heo * areas. Percpu areas are allocated in chunks. Each chunk is 1188999a89STejun Heo * consisted of boot-time determined number of units and the first 1288999a89STejun Heo * chunk is used for static percpu variables in the kernel image 132f39e637STejun Heo * (special boot time alloc/init handling necessary as these areas 142f39e637STejun Heo * need to be brought up before allocation services are running). 152f39e637STejun Heo * Unit grows as necessary and all units grow or shrink in unison. 1688999a89STejun Heo * When a chunk is filled up, another chunk is allocated. 17fbf59bc9STejun Heo * 18fbf59bc9STejun Heo * c0 c1 c2 19fbf59bc9STejun Heo * ------------------- ------------------- ------------ 20fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 22fbf59bc9STejun Heo * 23fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 24fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 252f39e637STejun Heo * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 262f39e637STejun Heo * cpus. On NUMA, the mapping can be non-linear and even sparse. 272f39e637STejun Heo * Percpu access can be done by configuring percpu base registers 282f39e637STejun Heo * according to cpu to unit mapping and pcpu_unit_size. 29fbf59bc9STejun Heo * 302f39e637STejun Heo * There are usually many small percpu allocations many of them being 312f39e637STejun Heo * as small as 4 bytes. The allocator organizes chunks into lists 32fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 33fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 34fbf59bc9STejun Heo * guaranteed to be eqaul to or larger than the maximum contiguous 35fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 36fbf59bc9STejun Heo * chunk maps unnecessarily. 37fbf59bc9STejun Heo * 38fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 39fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 40fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 41fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 42fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 43e1b9aa3fSChristoph Lameter * Chunks can be determined from the address using the index field 44e1b9aa3fSChristoph Lameter * in the page struct. The index field contains a pointer to the chunk. 45fbf59bc9STejun Heo * 46fbf59bc9STejun Heo * To use this allocator, arch code should do the followings. 47fbf59bc9STejun Heo * 48fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49e0100983STejun Heo * regular address to percpu pointer and back if they need to be 50e0100983STejun Heo * different from the default 51fbf59bc9STejun Heo * 528d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 538d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 54fbf59bc9STejun Heo */ 55fbf59bc9STejun Heo 56fbf59bc9STejun Heo #include <linux/bitmap.h> 57fbf59bc9STejun Heo #include <linux/bootmem.h> 58fd1e8a1fSTejun Heo #include <linux/err.h> 59fbf59bc9STejun Heo #include <linux/list.h> 60a530b795STejun Heo #include <linux/log2.h> 61fbf59bc9STejun Heo #include <linux/mm.h> 62fbf59bc9STejun Heo #include <linux/module.h> 63fbf59bc9STejun Heo #include <linux/mutex.h> 64fbf59bc9STejun Heo #include <linux/percpu.h> 65fbf59bc9STejun Heo #include <linux/pfn.h> 66fbf59bc9STejun Heo #include <linux/slab.h> 67ccea34b5STejun Heo #include <linux/spinlock.h> 68fbf59bc9STejun Heo #include <linux/vmalloc.h> 69a56dbddfSTejun Heo #include <linux/workqueue.h> 70fbf59bc9STejun Heo 71fbf59bc9STejun Heo #include <asm/cacheflush.h> 72e0100983STejun Heo #include <asm/sections.h> 73fbf59bc9STejun Heo #include <asm/tlbflush.h> 743b034b0dSVivek Goyal #include <asm/io.h> 75fbf59bc9STejun Heo 76fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 77fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 78fbf59bc9STejun Heo 79e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 80e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 81e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 8243cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 8343cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 8443cf38ebSTejun Heo (unsigned long)__per_cpu_start) 85e0100983STejun Heo #endif 86e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 87e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 8843cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 8943cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 9043cf38ebSTejun Heo (unsigned long)__per_cpu_start) 91e0100983STejun Heo #endif 92e0100983STejun Heo 93fbf59bc9STejun Heo struct pcpu_chunk { 94fbf59bc9STejun Heo struct list_head list; /* linked to pcpu_slot lists */ 95fbf59bc9STejun Heo int free_size; /* free bytes in the chunk */ 96fbf59bc9STejun Heo int contig_hint; /* max contiguous size hint */ 97bba174f5STejun Heo void *base_addr; /* base address of this chunk */ 98fbf59bc9STejun Heo int map_used; /* # of map entries used */ 99fbf59bc9STejun Heo int map_alloc; /* # of map entries allocated */ 100fbf59bc9STejun Heo int *map; /* allocation map */ 10188999a89STejun Heo void *data; /* chunk data */ 1028d408b4bSTejun Heo bool immutable; /* no [de]population allowed */ 103ce3141a2STejun Heo unsigned long populated[]; /* populated bitmap */ 104fbf59bc9STejun Heo }; 105fbf59bc9STejun Heo 10640150d37STejun Heo static int pcpu_unit_pages __read_mostly; 10740150d37STejun Heo static int pcpu_unit_size __read_mostly; 1082f39e637STejun Heo static int pcpu_nr_units __read_mostly; 1096563297cSTejun Heo static int pcpu_atom_size __read_mostly; 11040150d37STejun Heo static int pcpu_nr_slots __read_mostly; 11140150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly; 112fbf59bc9STejun Heo 1132f39e637STejun Heo /* cpus with the lowest and highest unit numbers */ 1142f39e637STejun Heo static unsigned int pcpu_first_unit_cpu __read_mostly; 1152f39e637STejun Heo static unsigned int pcpu_last_unit_cpu __read_mostly; 1162f39e637STejun Heo 117fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 11840150d37STejun Heo void *pcpu_base_addr __read_mostly; 119fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 120fbf59bc9STejun Heo 121fb435d52STejun Heo static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ 122fb435d52STejun Heo const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ 1232f39e637STejun Heo 1246563297cSTejun Heo /* group information, used for vm allocation */ 1256563297cSTejun Heo static int pcpu_nr_groups __read_mostly; 1266563297cSTejun Heo static const unsigned long *pcpu_group_offsets __read_mostly; 1276563297cSTejun Heo static const size_t *pcpu_group_sizes __read_mostly; 1286563297cSTejun Heo 129ae9e6bc9STejun Heo /* 130ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 131ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 132ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 133ae9e6bc9STejun Heo */ 134ae9e6bc9STejun Heo static struct pcpu_chunk *pcpu_first_chunk; 135ae9e6bc9STejun Heo 136ae9e6bc9STejun Heo /* 137ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 138ae9e6bc9STejun Heo * chunk and serves it for reserved allocations. The amount of 139ae9e6bc9STejun Heo * reserved offset is in pcpu_reserved_chunk_limit. When reserved 140ae9e6bc9STejun Heo * area doesn't exist, the following variables contain NULL and 0 141ae9e6bc9STejun Heo * respectively. 142ae9e6bc9STejun Heo */ 143edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk; 144edcb4639STejun Heo static int pcpu_reserved_chunk_limit; 145edcb4639STejun Heo 146fbf59bc9STejun Heo /* 147ccea34b5STejun Heo * Synchronization rules. 148fbf59bc9STejun Heo * 149ccea34b5STejun Heo * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 150ce3141a2STejun Heo * protects allocation/reclaim paths, chunks, populated bitmap and 151ce3141a2STejun Heo * vmalloc mapping. The latter is a spinlock and protects the index 152ce3141a2STejun Heo * data structures - chunk slots, chunks and area maps in chunks. 153fbf59bc9STejun Heo * 154ccea34b5STejun Heo * During allocation, pcpu_alloc_mutex is kept locked all the time and 155ccea34b5STejun Heo * pcpu_lock is grabbed and released as necessary. All actual memory 156403a91b1SJiri Kosina * allocations are done using GFP_KERNEL with pcpu_lock released. In 157403a91b1SJiri Kosina * general, percpu memory can't be allocated with irq off but 158403a91b1SJiri Kosina * irqsave/restore are still used in alloc path so that it can be used 159403a91b1SJiri Kosina * from early init path - sched_init() specifically. 160ccea34b5STejun Heo * 161ccea34b5STejun Heo * Free path accesses and alters only the index data structures, so it 162ccea34b5STejun Heo * can be safely called from atomic context. When memory needs to be 163ccea34b5STejun Heo * returned to the system, free path schedules reclaim_work which 164ccea34b5STejun Heo * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 165ccea34b5STejun Heo * reclaimed, release both locks and frees the chunks. Note that it's 166ccea34b5STejun Heo * necessary to grab both locks to remove a chunk from circulation as 167ccea34b5STejun Heo * allocation path might be referencing the chunk with only 168ccea34b5STejun Heo * pcpu_alloc_mutex locked. 169fbf59bc9STejun Heo */ 170ccea34b5STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 171ccea34b5STejun Heo static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 172fbf59bc9STejun Heo 17340150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 174fbf59bc9STejun Heo 175a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */ 176a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work); 177a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 178a56dbddfSTejun Heo 179020ec653STejun Heo static bool pcpu_addr_in_first_chunk(void *addr) 180020ec653STejun Heo { 181020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 182020ec653STejun Heo 183020ec653STejun Heo return addr >= first_start && addr < first_start + pcpu_unit_size; 184020ec653STejun Heo } 185020ec653STejun Heo 186020ec653STejun Heo static bool pcpu_addr_in_reserved_chunk(void *addr) 187020ec653STejun Heo { 188020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 189020ec653STejun Heo 190020ec653STejun Heo return addr >= first_start && 191020ec653STejun Heo addr < first_start + pcpu_reserved_chunk_limit; 192020ec653STejun Heo } 193020ec653STejun Heo 194d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 195fbf59bc9STejun Heo { 196cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 197fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 198fbf59bc9STejun Heo } 199fbf59bc9STejun Heo 200d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 201d9b55eebSTejun Heo { 202d9b55eebSTejun Heo if (size == pcpu_unit_size) 203d9b55eebSTejun Heo return pcpu_nr_slots - 1; 204d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 205d9b55eebSTejun Heo } 206d9b55eebSTejun Heo 207fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 208fbf59bc9STejun Heo { 209fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 210fbf59bc9STejun Heo return 0; 211fbf59bc9STejun Heo 212fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 213fbf59bc9STejun Heo } 214fbf59bc9STejun Heo 21588999a89STejun Heo /* set the pointer to a chunk in a page struct */ 21688999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 21788999a89STejun Heo { 21888999a89STejun Heo page->index = (unsigned long)pcpu; 21988999a89STejun Heo } 22088999a89STejun Heo 22188999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 22288999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 22388999a89STejun Heo { 22488999a89STejun Heo return (struct pcpu_chunk *)page->index; 22588999a89STejun Heo } 22688999a89STejun Heo 22788999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 228fbf59bc9STejun Heo { 2292f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 230fbf59bc9STejun Heo } 231fbf59bc9STejun Heo 2329983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 233fbf59bc9STejun Heo unsigned int cpu, int page_idx) 234fbf59bc9STejun Heo { 235bba174f5STejun Heo return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 236fb435d52STejun Heo (page_idx << PAGE_SHIFT); 237fbf59bc9STejun Heo } 238fbf59bc9STejun Heo 23988999a89STejun Heo static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, 24088999a89STejun Heo int *rs, int *re, int end) 241ce3141a2STejun Heo { 242ce3141a2STejun Heo *rs = find_next_zero_bit(chunk->populated, end, *rs); 243ce3141a2STejun Heo *re = find_next_bit(chunk->populated, end, *rs + 1); 244ce3141a2STejun Heo } 245ce3141a2STejun Heo 24688999a89STejun Heo static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, 24788999a89STejun Heo int *rs, int *re, int end) 248ce3141a2STejun Heo { 249ce3141a2STejun Heo *rs = find_next_bit(chunk->populated, end, *rs); 250ce3141a2STejun Heo *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 251ce3141a2STejun Heo } 252ce3141a2STejun Heo 253ce3141a2STejun Heo /* 254ce3141a2STejun Heo * (Un)populated page region iterators. Iterate over (un)populated 255ce3141a2STejun Heo * page regions betwen @start and @end in @chunk. @rs and @re should 256ce3141a2STejun Heo * be integer variables and will be set to start and end page index of 257ce3141a2STejun Heo * the current region. 258ce3141a2STejun Heo */ 259ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 260ce3141a2STejun Heo for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 261ce3141a2STejun Heo (rs) < (re); \ 262ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 263ce3141a2STejun Heo 264ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 265ce3141a2STejun Heo for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 266ce3141a2STejun Heo (rs) < (re); \ 267ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 268ce3141a2STejun Heo 269fbf59bc9STejun Heo /** 2701880d93bSTejun Heo * pcpu_mem_alloc - allocate memory 2711880d93bSTejun Heo * @size: bytes to allocate 272fbf59bc9STejun Heo * 2731880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 2741880d93bSTejun Heo * kzalloc() is used; otherwise, vmalloc() is used. The returned 2751880d93bSTejun Heo * memory is always zeroed. 276fbf59bc9STejun Heo * 277ccea34b5STejun Heo * CONTEXT: 278ccea34b5STejun Heo * Does GFP_KERNEL allocation. 279ccea34b5STejun Heo * 280fbf59bc9STejun Heo * RETURNS: 2811880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 282fbf59bc9STejun Heo */ 2831880d93bSTejun Heo static void *pcpu_mem_alloc(size_t size) 284fbf59bc9STejun Heo { 285*099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 286*099a19d9STejun Heo return NULL; 287*099a19d9STejun Heo 288fbf59bc9STejun Heo if (size <= PAGE_SIZE) 2891880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 2901880d93bSTejun Heo else { 2911880d93bSTejun Heo void *ptr = vmalloc(size); 2921880d93bSTejun Heo if (ptr) 2931880d93bSTejun Heo memset(ptr, 0, size); 2941880d93bSTejun Heo return ptr; 2951880d93bSTejun Heo } 2961880d93bSTejun Heo } 297fbf59bc9STejun Heo 2981880d93bSTejun Heo /** 2991880d93bSTejun Heo * pcpu_mem_free - free memory 3001880d93bSTejun Heo * @ptr: memory to free 3011880d93bSTejun Heo * @size: size of the area 3021880d93bSTejun Heo * 3031880d93bSTejun Heo * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 3041880d93bSTejun Heo */ 3051880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size) 3061880d93bSTejun Heo { 3071880d93bSTejun Heo if (size <= PAGE_SIZE) 3081880d93bSTejun Heo kfree(ptr); 3091880d93bSTejun Heo else 3101880d93bSTejun Heo vfree(ptr); 311fbf59bc9STejun Heo } 312fbf59bc9STejun Heo 313fbf59bc9STejun Heo /** 314fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 315fbf59bc9STejun Heo * @chunk: chunk of interest 316fbf59bc9STejun Heo * @oslot: the previous slot it was on 317fbf59bc9STejun Heo * 318fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 319fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 320edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 321edcb4639STejun Heo * chunk slots. 322ccea34b5STejun Heo * 323ccea34b5STejun Heo * CONTEXT: 324ccea34b5STejun Heo * pcpu_lock. 325fbf59bc9STejun Heo */ 326fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 327fbf59bc9STejun Heo { 328fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 329fbf59bc9STejun Heo 330edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 331fbf59bc9STejun Heo if (oslot < nslot) 332fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 333fbf59bc9STejun Heo else 334fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 335fbf59bc9STejun Heo } 336fbf59bc9STejun Heo } 337fbf59bc9STejun Heo 338fbf59bc9STejun Heo /** 339833af842STejun Heo * pcpu_need_to_extend - determine whether chunk area map needs to be extended 340833af842STejun Heo * @chunk: chunk of interest 3419f7dcf22STejun Heo * 342833af842STejun Heo * Determine whether area map of @chunk needs to be extended to 343833af842STejun Heo * accomodate a new allocation. 3449f7dcf22STejun Heo * 345ccea34b5STejun Heo * CONTEXT: 346833af842STejun Heo * pcpu_lock. 347ccea34b5STejun Heo * 3489f7dcf22STejun Heo * RETURNS: 349833af842STejun Heo * New target map allocation length if extension is necessary, 0 350833af842STejun Heo * otherwise. 3519f7dcf22STejun Heo */ 352833af842STejun Heo static int pcpu_need_to_extend(struct pcpu_chunk *chunk) 3539f7dcf22STejun Heo { 3549f7dcf22STejun Heo int new_alloc; 3559f7dcf22STejun Heo 3569f7dcf22STejun Heo if (chunk->map_alloc >= chunk->map_used + 2) 3579f7dcf22STejun Heo return 0; 3589f7dcf22STejun Heo 3599f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 3609f7dcf22STejun Heo while (new_alloc < chunk->map_used + 2) 3619f7dcf22STejun Heo new_alloc *= 2; 3629f7dcf22STejun Heo 363833af842STejun Heo return new_alloc; 364ccea34b5STejun Heo } 365ccea34b5STejun Heo 366833af842STejun Heo /** 367833af842STejun Heo * pcpu_extend_area_map - extend area map of a chunk 368833af842STejun Heo * @chunk: chunk of interest 369833af842STejun Heo * @new_alloc: new target allocation length of the area map 370833af842STejun Heo * 371833af842STejun Heo * Extend area map of @chunk to have @new_alloc entries. 372833af842STejun Heo * 373833af842STejun Heo * CONTEXT: 374833af842STejun Heo * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 375833af842STejun Heo * 376833af842STejun Heo * RETURNS: 377833af842STejun Heo * 0 on success, -errno on failure. 378ccea34b5STejun Heo */ 379833af842STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 380833af842STejun Heo { 381833af842STejun Heo int *old = NULL, *new = NULL; 382833af842STejun Heo size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 383833af842STejun Heo unsigned long flags; 3849f7dcf22STejun Heo 385833af842STejun Heo new = pcpu_mem_alloc(new_size); 386833af842STejun Heo if (!new) 387833af842STejun Heo return -ENOMEM; 388833af842STejun Heo 389833af842STejun Heo /* acquire pcpu_lock and switch to new area map */ 390833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 391833af842STejun Heo 392833af842STejun Heo if (new_alloc <= chunk->map_alloc) 393833af842STejun Heo goto out_unlock; 394833af842STejun Heo 395833af842STejun Heo old_size = chunk->map_alloc * sizeof(chunk->map[0]); 396833af842STejun Heo memcpy(new, chunk->map, old_size); 3979f7dcf22STejun Heo 3989f7dcf22STejun Heo chunk->map_alloc = new_alloc; 3999f7dcf22STejun Heo chunk->map = new; 400833af842STejun Heo new = NULL; 401833af842STejun Heo 402833af842STejun Heo out_unlock: 403833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 404833af842STejun Heo 405833af842STejun Heo /* 406833af842STejun Heo * pcpu_mem_free() might end up calling vfree() which uses 407833af842STejun Heo * IRQ-unsafe lock and thus can't be called under pcpu_lock. 408833af842STejun Heo */ 409833af842STejun Heo pcpu_mem_free(old, old_size); 410833af842STejun Heo pcpu_mem_free(new, new_size); 411833af842STejun Heo 4129f7dcf22STejun Heo return 0; 4139f7dcf22STejun Heo } 4149f7dcf22STejun Heo 4159f7dcf22STejun Heo /** 416fbf59bc9STejun Heo * pcpu_split_block - split a map block 417fbf59bc9STejun Heo * @chunk: chunk of interest 418fbf59bc9STejun Heo * @i: index of map block to split 419cae3aeb8STejun Heo * @head: head size in bytes (can be 0) 420cae3aeb8STejun Heo * @tail: tail size in bytes (can be 0) 421fbf59bc9STejun Heo * 422fbf59bc9STejun Heo * Split the @i'th map block into two or three blocks. If @head is 423fbf59bc9STejun Heo * non-zero, @head bytes block is inserted before block @i moving it 424fbf59bc9STejun Heo * to @i+1 and reducing its size by @head bytes. 425fbf59bc9STejun Heo * 426fbf59bc9STejun Heo * If @tail is non-zero, the target block, which can be @i or @i+1 427fbf59bc9STejun Heo * depending on @head, is reduced by @tail bytes and @tail byte block 428fbf59bc9STejun Heo * is inserted after the target block. 429fbf59bc9STejun Heo * 4309f7dcf22STejun Heo * @chunk->map must have enough free slots to accomodate the split. 431ccea34b5STejun Heo * 432ccea34b5STejun Heo * CONTEXT: 433ccea34b5STejun Heo * pcpu_lock. 434fbf59bc9STejun Heo */ 4359f7dcf22STejun Heo static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 4369f7dcf22STejun Heo int head, int tail) 437fbf59bc9STejun Heo { 438fbf59bc9STejun Heo int nr_extra = !!head + !!tail; 439fbf59bc9STejun Heo 4409f7dcf22STejun Heo BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 441fbf59bc9STejun Heo 4429f7dcf22STejun Heo /* insert new subblocks */ 443fbf59bc9STejun Heo memmove(&chunk->map[i + nr_extra], &chunk->map[i], 444fbf59bc9STejun Heo sizeof(chunk->map[0]) * (chunk->map_used - i)); 445fbf59bc9STejun Heo chunk->map_used += nr_extra; 446fbf59bc9STejun Heo 447fbf59bc9STejun Heo if (head) { 448fbf59bc9STejun Heo chunk->map[i + 1] = chunk->map[i] - head; 449fbf59bc9STejun Heo chunk->map[i++] = head; 450fbf59bc9STejun Heo } 451fbf59bc9STejun Heo if (tail) { 452fbf59bc9STejun Heo chunk->map[i++] -= tail; 453fbf59bc9STejun Heo chunk->map[i] = tail; 454fbf59bc9STejun Heo } 455fbf59bc9STejun Heo } 456fbf59bc9STejun Heo 457fbf59bc9STejun Heo /** 458fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 459fbf59bc9STejun Heo * @chunk: chunk of interest 460cae3aeb8STejun Heo * @size: wanted size in bytes 461fbf59bc9STejun Heo * @align: wanted align 462fbf59bc9STejun Heo * 463fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 464fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 465fbf59bc9STejun Heo * populate or map the area. 466fbf59bc9STejun Heo * 4679f7dcf22STejun Heo * @chunk->map must have at least two free slots. 4689f7dcf22STejun Heo * 469ccea34b5STejun Heo * CONTEXT: 470ccea34b5STejun Heo * pcpu_lock. 471ccea34b5STejun Heo * 472fbf59bc9STejun Heo * RETURNS: 4739f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 4749f7dcf22STejun Heo * found. 475fbf59bc9STejun Heo */ 476fbf59bc9STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 477fbf59bc9STejun Heo { 478fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 479fbf59bc9STejun Heo int max_contig = 0; 480fbf59bc9STejun Heo int i, off; 481fbf59bc9STejun Heo 482fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 483fbf59bc9STejun Heo bool is_last = i + 1 == chunk->map_used; 484fbf59bc9STejun Heo int head, tail; 485fbf59bc9STejun Heo 486fbf59bc9STejun Heo /* extra for alignment requirement */ 487fbf59bc9STejun Heo head = ALIGN(off, align) - off; 488fbf59bc9STejun Heo BUG_ON(i == 0 && head != 0); 489fbf59bc9STejun Heo 490fbf59bc9STejun Heo if (chunk->map[i] < 0) 491fbf59bc9STejun Heo continue; 492fbf59bc9STejun Heo if (chunk->map[i] < head + size) { 493fbf59bc9STejun Heo max_contig = max(chunk->map[i], max_contig); 494fbf59bc9STejun Heo continue; 495fbf59bc9STejun Heo } 496fbf59bc9STejun Heo 497fbf59bc9STejun Heo /* 498fbf59bc9STejun Heo * If head is small or the previous block is free, 499fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 500fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 501fbf59bc9STejun Heo * uncommon for percpu allocations. 502fbf59bc9STejun Heo */ 503fbf59bc9STejun Heo if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 504fbf59bc9STejun Heo if (chunk->map[i - 1] > 0) 505fbf59bc9STejun Heo chunk->map[i - 1] += head; 506fbf59bc9STejun Heo else { 507fbf59bc9STejun Heo chunk->map[i - 1] -= head; 508fbf59bc9STejun Heo chunk->free_size -= head; 509fbf59bc9STejun Heo } 510fbf59bc9STejun Heo chunk->map[i] -= head; 511fbf59bc9STejun Heo off += head; 512fbf59bc9STejun Heo head = 0; 513fbf59bc9STejun Heo } 514fbf59bc9STejun Heo 515fbf59bc9STejun Heo /* if tail is small, just keep it around */ 516fbf59bc9STejun Heo tail = chunk->map[i] - head - size; 517fbf59bc9STejun Heo if (tail < sizeof(int)) 518fbf59bc9STejun Heo tail = 0; 519fbf59bc9STejun Heo 520fbf59bc9STejun Heo /* split if warranted */ 521fbf59bc9STejun Heo if (head || tail) { 5229f7dcf22STejun Heo pcpu_split_block(chunk, i, head, tail); 523fbf59bc9STejun Heo if (head) { 524fbf59bc9STejun Heo i++; 525fbf59bc9STejun Heo off += head; 526fbf59bc9STejun Heo max_contig = max(chunk->map[i - 1], max_contig); 527fbf59bc9STejun Heo } 528fbf59bc9STejun Heo if (tail) 529fbf59bc9STejun Heo max_contig = max(chunk->map[i + 1], max_contig); 530fbf59bc9STejun Heo } 531fbf59bc9STejun Heo 532fbf59bc9STejun Heo /* update hint and mark allocated */ 533fbf59bc9STejun Heo if (is_last) 534fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 535fbf59bc9STejun Heo else 536fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 537fbf59bc9STejun Heo max_contig); 538fbf59bc9STejun Heo 539fbf59bc9STejun Heo chunk->free_size -= chunk->map[i]; 540fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 541fbf59bc9STejun Heo 542fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 543fbf59bc9STejun Heo return off; 544fbf59bc9STejun Heo } 545fbf59bc9STejun Heo 546fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 547fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 548fbf59bc9STejun Heo 5499f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 5509f7dcf22STejun Heo return -1; 551fbf59bc9STejun Heo } 552fbf59bc9STejun Heo 553fbf59bc9STejun Heo /** 554fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 555fbf59bc9STejun Heo * @chunk: chunk of interest 556fbf59bc9STejun Heo * @freeme: offset of area to free 557fbf59bc9STejun Heo * 558fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 559fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 560fbf59bc9STejun Heo * the area. 561ccea34b5STejun Heo * 562ccea34b5STejun Heo * CONTEXT: 563ccea34b5STejun Heo * pcpu_lock. 564fbf59bc9STejun Heo */ 565fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 566fbf59bc9STejun Heo { 567fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 568fbf59bc9STejun Heo int i, off; 569fbf59bc9STejun Heo 570fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 571fbf59bc9STejun Heo if (off == freeme) 572fbf59bc9STejun Heo break; 573fbf59bc9STejun Heo BUG_ON(off != freeme); 574fbf59bc9STejun Heo BUG_ON(chunk->map[i] > 0); 575fbf59bc9STejun Heo 576fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 577fbf59bc9STejun Heo chunk->free_size += chunk->map[i]; 578fbf59bc9STejun Heo 579fbf59bc9STejun Heo /* merge with previous? */ 580fbf59bc9STejun Heo if (i > 0 && chunk->map[i - 1] >= 0) { 581fbf59bc9STejun Heo chunk->map[i - 1] += chunk->map[i]; 582fbf59bc9STejun Heo chunk->map_used--; 583fbf59bc9STejun Heo memmove(&chunk->map[i], &chunk->map[i + 1], 584fbf59bc9STejun Heo (chunk->map_used - i) * sizeof(chunk->map[0])); 585fbf59bc9STejun Heo i--; 586fbf59bc9STejun Heo } 587fbf59bc9STejun Heo /* merge with next? */ 588fbf59bc9STejun Heo if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 589fbf59bc9STejun Heo chunk->map[i] += chunk->map[i + 1]; 590fbf59bc9STejun Heo chunk->map_used--; 591fbf59bc9STejun Heo memmove(&chunk->map[i + 1], &chunk->map[i + 2], 592fbf59bc9STejun Heo (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 593fbf59bc9STejun Heo } 594fbf59bc9STejun Heo 595fbf59bc9STejun Heo chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 596fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 597fbf59bc9STejun Heo } 598fbf59bc9STejun Heo 5996081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void) 6006081089fSTejun Heo { 6016081089fSTejun Heo struct pcpu_chunk *chunk; 6026081089fSTejun Heo 603*099a19d9STejun Heo chunk = pcpu_mem_alloc(pcpu_chunk_struct_size); 6046081089fSTejun Heo if (!chunk) 6056081089fSTejun Heo return NULL; 6066081089fSTejun Heo 6076081089fSTejun Heo chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 6086081089fSTejun Heo if (!chunk->map) { 6096081089fSTejun Heo kfree(chunk); 6106081089fSTejun Heo return NULL; 6116081089fSTejun Heo } 6126081089fSTejun Heo 6136081089fSTejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 6146081089fSTejun Heo chunk->map[chunk->map_used++] = pcpu_unit_size; 6156081089fSTejun Heo 6166081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 6176081089fSTejun Heo chunk->free_size = pcpu_unit_size; 6186081089fSTejun Heo chunk->contig_hint = pcpu_unit_size; 6196081089fSTejun Heo 6206081089fSTejun Heo return chunk; 6216081089fSTejun Heo } 6226081089fSTejun Heo 6236081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 6246081089fSTejun Heo { 6256081089fSTejun Heo if (!chunk) 6266081089fSTejun Heo return; 6276081089fSTejun Heo pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 6286081089fSTejun Heo kfree(chunk); 6296081089fSTejun Heo } 6306081089fSTejun Heo 631fbf59bc9STejun Heo /* 6329f645532STejun Heo * Chunk management implementation. 633fbf59bc9STejun Heo * 6349f645532STejun Heo * To allow different implementations, chunk alloc/free and 6359f645532STejun Heo * [de]population are implemented in a separate file which is pulled 6369f645532STejun Heo * into this file and compiled together. The following functions 6379f645532STejun Heo * should be implemented. 638ccea34b5STejun Heo * 6399f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 6409f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 6419f645532STejun Heo * pcpu_create_chunk - create a new chunk 6429f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 6439f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 6449f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 645fbf59bc9STejun Heo */ 6469f645532STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); 6479f645532STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); 6489f645532STejun Heo static struct pcpu_chunk *pcpu_create_chunk(void); 6499f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 6509f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 6519f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 652fbf59bc9STejun Heo 653b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 654b0c9778bSTejun Heo #include "percpu-km.c" 655b0c9778bSTejun Heo #else 6569f645532STejun Heo #include "percpu-vm.c" 657b0c9778bSTejun Heo #endif 658fbf59bc9STejun Heo 659fbf59bc9STejun Heo /** 66088999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 66188999a89STejun Heo * @addr: address for which the chunk needs to be determined. 66288999a89STejun Heo * 66388999a89STejun Heo * RETURNS: 66488999a89STejun Heo * The address of the found chunk. 66588999a89STejun Heo */ 66688999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 66788999a89STejun Heo { 66888999a89STejun Heo /* is it in the first chunk? */ 66988999a89STejun Heo if (pcpu_addr_in_first_chunk(addr)) { 67088999a89STejun Heo /* is it in the reserved area? */ 67188999a89STejun Heo if (pcpu_addr_in_reserved_chunk(addr)) 67288999a89STejun Heo return pcpu_reserved_chunk; 67388999a89STejun Heo return pcpu_first_chunk; 67488999a89STejun Heo } 67588999a89STejun Heo 67688999a89STejun Heo /* 67788999a89STejun Heo * The address is relative to unit0 which might be unused and 67888999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 67988999a89STejun Heo * current processor before looking it up in the vmalloc 68088999a89STejun Heo * space. Note that any possible cpu id can be used here, so 68188999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 68288999a89STejun Heo */ 68388999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 6849f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 68588999a89STejun Heo } 68688999a89STejun Heo 68788999a89STejun Heo /** 688edcb4639STejun Heo * pcpu_alloc - the percpu allocator 689cae3aeb8STejun Heo * @size: size of area to allocate in bytes 690fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 691edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 692fbf59bc9STejun Heo * 693ccea34b5STejun Heo * Allocate percpu area of @size bytes aligned at @align. 694ccea34b5STejun Heo * 695ccea34b5STejun Heo * CONTEXT: 696ccea34b5STejun Heo * Does GFP_KERNEL allocation. 697fbf59bc9STejun Heo * 698fbf59bc9STejun Heo * RETURNS: 699fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 700fbf59bc9STejun Heo */ 70143cf38ebSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) 702fbf59bc9STejun Heo { 703f2badb0cSTejun Heo static int warn_limit = 10; 704fbf59bc9STejun Heo struct pcpu_chunk *chunk; 705f2badb0cSTejun Heo const char *err; 706833af842STejun Heo int slot, off, new_alloc; 707403a91b1SJiri Kosina unsigned long flags; 708fbf59bc9STejun Heo 7098d408b4bSTejun Heo if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 710fbf59bc9STejun Heo WARN(true, "illegal size (%zu) or align (%zu) for " 711fbf59bc9STejun Heo "percpu allocation\n", size, align); 712fbf59bc9STejun Heo return NULL; 713fbf59bc9STejun Heo } 714fbf59bc9STejun Heo 715ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 716403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 717fbf59bc9STejun Heo 718edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 719edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 720edcb4639STejun Heo chunk = pcpu_reserved_chunk; 721833af842STejun Heo 722833af842STejun Heo if (size > chunk->contig_hint) { 723833af842STejun Heo err = "alloc from reserved chunk failed"; 724ccea34b5STejun Heo goto fail_unlock; 725f2badb0cSTejun Heo } 726833af842STejun Heo 727833af842STejun Heo while ((new_alloc = pcpu_need_to_extend(chunk))) { 728833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 729833af842STejun Heo if (pcpu_extend_area_map(chunk, new_alloc) < 0) { 730833af842STejun Heo err = "failed to extend area map of reserved chunk"; 731833af842STejun Heo goto fail_unlock_mutex; 732833af842STejun Heo } 733833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 734833af842STejun Heo } 735833af842STejun Heo 736edcb4639STejun Heo off = pcpu_alloc_area(chunk, size, align); 737edcb4639STejun Heo if (off >= 0) 738edcb4639STejun Heo goto area_found; 739833af842STejun Heo 740f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 741ccea34b5STejun Heo goto fail_unlock; 742edcb4639STejun Heo } 743edcb4639STejun Heo 744ccea34b5STejun Heo restart: 745edcb4639STejun Heo /* search through normal chunks */ 746fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 747fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 748fbf59bc9STejun Heo if (size > chunk->contig_hint) 749fbf59bc9STejun Heo continue; 750ccea34b5STejun Heo 751833af842STejun Heo new_alloc = pcpu_need_to_extend(chunk); 752833af842STejun Heo if (new_alloc) { 753833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 754833af842STejun Heo if (pcpu_extend_area_map(chunk, 755833af842STejun Heo new_alloc) < 0) { 756f2badb0cSTejun Heo err = "failed to extend area map"; 757833af842STejun Heo goto fail_unlock_mutex; 758833af842STejun Heo } 759833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 760833af842STejun Heo /* 761833af842STejun Heo * pcpu_lock has been dropped, need to 762833af842STejun Heo * restart cpu_slot list walking. 763833af842STejun Heo */ 764833af842STejun Heo goto restart; 765ccea34b5STejun Heo } 766ccea34b5STejun Heo 767fbf59bc9STejun Heo off = pcpu_alloc_area(chunk, size, align); 768fbf59bc9STejun Heo if (off >= 0) 769fbf59bc9STejun Heo goto area_found; 770fbf59bc9STejun Heo } 771fbf59bc9STejun Heo } 772fbf59bc9STejun Heo 773fbf59bc9STejun Heo /* hmmm... no space left, create a new chunk */ 774403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 775ccea34b5STejun Heo 7766081089fSTejun Heo chunk = pcpu_create_chunk(); 777f2badb0cSTejun Heo if (!chunk) { 778f2badb0cSTejun Heo err = "failed to allocate new chunk"; 779ccea34b5STejun Heo goto fail_unlock_mutex; 780f2badb0cSTejun Heo } 781ccea34b5STejun Heo 782403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 783fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 784ccea34b5STejun Heo goto restart; 785fbf59bc9STejun Heo 786fbf59bc9STejun Heo area_found: 787403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 788ccea34b5STejun Heo 789fbf59bc9STejun Heo /* populate, map and clear the area */ 790fbf59bc9STejun Heo if (pcpu_populate_chunk(chunk, off, size)) { 791403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 792fbf59bc9STejun Heo pcpu_free_area(chunk, off); 793f2badb0cSTejun Heo err = "failed to populate"; 794ccea34b5STejun Heo goto fail_unlock; 795fbf59bc9STejun Heo } 796fbf59bc9STejun Heo 797ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 798ccea34b5STejun Heo 799bba174f5STejun Heo /* return address relative to base address */ 800bba174f5STejun Heo return __addr_to_pcpu_ptr(chunk->base_addr + off); 801ccea34b5STejun Heo 802ccea34b5STejun Heo fail_unlock: 803403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 804ccea34b5STejun Heo fail_unlock_mutex: 805ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 806f2badb0cSTejun Heo if (warn_limit) { 807f2badb0cSTejun Heo pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " 808f2badb0cSTejun Heo "%s\n", size, align, err); 809f2badb0cSTejun Heo dump_stack(); 810f2badb0cSTejun Heo if (!--warn_limit) 811f2badb0cSTejun Heo pr_info("PERCPU: limit reached, disable warning\n"); 812f2badb0cSTejun Heo } 813ccea34b5STejun Heo return NULL; 814fbf59bc9STejun Heo } 815edcb4639STejun Heo 816edcb4639STejun Heo /** 817edcb4639STejun Heo * __alloc_percpu - allocate dynamic percpu area 818edcb4639STejun Heo * @size: size of area to allocate in bytes 819edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 820edcb4639STejun Heo * 821edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align. Might 822edcb4639STejun Heo * sleep. Might trigger writeouts. 823edcb4639STejun Heo * 824ccea34b5STejun Heo * CONTEXT: 825ccea34b5STejun Heo * Does GFP_KERNEL allocation. 826ccea34b5STejun Heo * 827edcb4639STejun Heo * RETURNS: 828edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 829edcb4639STejun Heo */ 83043cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 831edcb4639STejun Heo { 832edcb4639STejun Heo return pcpu_alloc(size, align, false); 833edcb4639STejun Heo } 834fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 835fbf59bc9STejun Heo 836edcb4639STejun Heo /** 837edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 838edcb4639STejun Heo * @size: size of area to allocate in bytes 839edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 840edcb4639STejun Heo * 841edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align from reserved 842edcb4639STejun Heo * percpu area if arch has set it up; otherwise, allocation is served 843edcb4639STejun Heo * from the same dynamic area. Might sleep. Might trigger writeouts. 844edcb4639STejun Heo * 845ccea34b5STejun Heo * CONTEXT: 846ccea34b5STejun Heo * Does GFP_KERNEL allocation. 847ccea34b5STejun Heo * 848edcb4639STejun Heo * RETURNS: 849edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 850edcb4639STejun Heo */ 85143cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 852edcb4639STejun Heo { 853edcb4639STejun Heo return pcpu_alloc(size, align, true); 854edcb4639STejun Heo } 855edcb4639STejun Heo 856a56dbddfSTejun Heo /** 857a56dbddfSTejun Heo * pcpu_reclaim - reclaim fully free chunks, workqueue function 858a56dbddfSTejun Heo * @work: unused 859a56dbddfSTejun Heo * 860a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 861ccea34b5STejun Heo * 862ccea34b5STejun Heo * CONTEXT: 863ccea34b5STejun Heo * workqueue context. 864a56dbddfSTejun Heo */ 865a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work) 866fbf59bc9STejun Heo { 867a56dbddfSTejun Heo LIST_HEAD(todo); 868a56dbddfSTejun Heo struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 869a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 870a56dbddfSTejun Heo 871ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 872ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 873a56dbddfSTejun Heo 874a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, head, list) { 8758d408b4bSTejun Heo WARN_ON(chunk->immutable); 876a56dbddfSTejun Heo 877a56dbddfSTejun Heo /* spare the first one */ 878a56dbddfSTejun Heo if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 879a56dbddfSTejun Heo continue; 880a56dbddfSTejun Heo 881a56dbddfSTejun Heo list_move(&chunk->list, &todo); 882a56dbddfSTejun Heo } 883a56dbddfSTejun Heo 884ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 885a56dbddfSTejun Heo 886a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, &todo, list) { 887ce3141a2STejun Heo pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 8886081089fSTejun Heo pcpu_destroy_chunk(chunk); 889fbf59bc9STejun Heo } 890971f3918STejun Heo 891971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 892a56dbddfSTejun Heo } 893fbf59bc9STejun Heo 894fbf59bc9STejun Heo /** 895fbf59bc9STejun Heo * free_percpu - free percpu area 896fbf59bc9STejun Heo * @ptr: pointer to area to free 897fbf59bc9STejun Heo * 898ccea34b5STejun Heo * Free percpu area @ptr. 899ccea34b5STejun Heo * 900ccea34b5STejun Heo * CONTEXT: 901ccea34b5STejun Heo * Can be called from atomic context. 902fbf59bc9STejun Heo */ 90343cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 904fbf59bc9STejun Heo { 905129182e5SAndrew Morton void *addr; 906fbf59bc9STejun Heo struct pcpu_chunk *chunk; 907ccea34b5STejun Heo unsigned long flags; 908fbf59bc9STejun Heo int off; 909fbf59bc9STejun Heo 910fbf59bc9STejun Heo if (!ptr) 911fbf59bc9STejun Heo return; 912fbf59bc9STejun Heo 913129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 914129182e5SAndrew Morton 915ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 916fbf59bc9STejun Heo 917fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 918bba174f5STejun Heo off = addr - chunk->base_addr; 919fbf59bc9STejun Heo 920fbf59bc9STejun Heo pcpu_free_area(chunk, off); 921fbf59bc9STejun Heo 922a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 923fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 924fbf59bc9STejun Heo struct pcpu_chunk *pos; 925fbf59bc9STejun Heo 926a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 927fbf59bc9STejun Heo if (pos != chunk) { 928a56dbddfSTejun Heo schedule_work(&pcpu_reclaim_work); 929fbf59bc9STejun Heo break; 930fbf59bc9STejun Heo } 931fbf59bc9STejun Heo } 932fbf59bc9STejun Heo 933ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 934fbf59bc9STejun Heo } 935fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 936fbf59bc9STejun Heo 9373b034b0dSVivek Goyal /** 93810fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 93910fad5e4STejun Heo * @addr: address to test 94010fad5e4STejun Heo * 94110fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 94210fad5e4STejun Heo * static percpu areas are not considered. For those, use 94310fad5e4STejun Heo * is_module_percpu_address(). 94410fad5e4STejun Heo * 94510fad5e4STejun Heo * RETURNS: 94610fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 94710fad5e4STejun Heo */ 94810fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 94910fad5e4STejun Heo { 95010fad5e4STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 95110fad5e4STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 95210fad5e4STejun Heo unsigned int cpu; 95310fad5e4STejun Heo 95410fad5e4STejun Heo for_each_possible_cpu(cpu) { 95510fad5e4STejun Heo void *start = per_cpu_ptr(base, cpu); 95610fad5e4STejun Heo 95710fad5e4STejun Heo if ((void *)addr >= start && (void *)addr < start + static_size) 95810fad5e4STejun Heo return true; 95910fad5e4STejun Heo } 96010fad5e4STejun Heo return false; 96110fad5e4STejun Heo } 96210fad5e4STejun Heo 96310fad5e4STejun Heo /** 9643b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 9653b034b0dSVivek Goyal * @addr: the address to be converted to physical address 9663b034b0dSVivek Goyal * 9673b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 9683b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 9693b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 9703b034b0dSVivek Goyal * until this function finishes. 9713b034b0dSVivek Goyal * 9723b034b0dSVivek Goyal * RETURNS: 9733b034b0dSVivek Goyal * The physical address for @addr. 9743b034b0dSVivek Goyal */ 9753b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 9763b034b0dSVivek Goyal { 9779983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 9789983b6f0STejun Heo bool in_first_chunk = false; 9799983b6f0STejun Heo unsigned long first_start, first_end; 9809983b6f0STejun Heo unsigned int cpu; 9819983b6f0STejun Heo 9829983b6f0STejun Heo /* 9839983b6f0STejun Heo * The following test on first_start/end isn't strictly 9849983b6f0STejun Heo * necessary but will speed up lookups of addresses which 9859983b6f0STejun Heo * aren't in the first chunk. 9869983b6f0STejun Heo */ 9879983b6f0STejun Heo first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0); 9889983b6f0STejun Heo first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu, 9899983b6f0STejun Heo pcpu_unit_pages); 9909983b6f0STejun Heo if ((unsigned long)addr >= first_start && 9919983b6f0STejun Heo (unsigned long)addr < first_end) { 9929983b6f0STejun Heo for_each_possible_cpu(cpu) { 9939983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 9949983b6f0STejun Heo 9959983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 9969983b6f0STejun Heo in_first_chunk = true; 9979983b6f0STejun Heo break; 9989983b6f0STejun Heo } 9999983b6f0STejun Heo } 10009983b6f0STejun Heo } 10019983b6f0STejun Heo 10029983b6f0STejun Heo if (in_first_chunk) { 10033b034b0dSVivek Goyal if ((unsigned long)addr < VMALLOC_START || 10043b034b0dSVivek Goyal (unsigned long)addr >= VMALLOC_END) 10053b034b0dSVivek Goyal return __pa(addr); 10063b034b0dSVivek Goyal else 10073b034b0dSVivek Goyal return page_to_phys(vmalloc_to_page(addr)); 1008020ec653STejun Heo } else 10099f645532STejun Heo return page_to_phys(pcpu_addr_to_page(addr)); 10103b034b0dSVivek Goyal } 10113b034b0dSVivek Goyal 1012fbf59bc9STejun Heo /** 1013fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 1014fd1e8a1fSTejun Heo * @nr_groups: the number of groups 1015fd1e8a1fSTejun Heo * @nr_units: the number of units 1016033e48fbSTejun Heo * 1017fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 1018fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 1019fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 1020fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1021fd1e8a1fSTejun Heo * pointer of other groups. 1022033e48fbSTejun Heo * 1023033e48fbSTejun Heo * RETURNS: 1024fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 1025fd1e8a1fSTejun Heo * failure. 1026033e48fbSTejun Heo */ 1027fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1028fd1e8a1fSTejun Heo int nr_units) 1029fd1e8a1fSTejun Heo { 1030fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1031fd1e8a1fSTejun Heo size_t base_size, ai_size; 1032fd1e8a1fSTejun Heo void *ptr; 1033fd1e8a1fSTejun Heo int unit; 1034fd1e8a1fSTejun Heo 1035fd1e8a1fSTejun Heo base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1036fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 1037fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1038fd1e8a1fSTejun Heo 1039fd1e8a1fSTejun Heo ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); 1040fd1e8a1fSTejun Heo if (!ptr) 1041fd1e8a1fSTejun Heo return NULL; 1042fd1e8a1fSTejun Heo ai = ptr; 1043fd1e8a1fSTejun Heo ptr += base_size; 1044fd1e8a1fSTejun Heo 1045fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 1046fd1e8a1fSTejun Heo 1047fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 1048fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 1049fd1e8a1fSTejun Heo 1050fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 1051fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 1052fd1e8a1fSTejun Heo 1053fd1e8a1fSTejun Heo return ai; 1054fd1e8a1fSTejun Heo } 1055fd1e8a1fSTejun Heo 1056fd1e8a1fSTejun Heo /** 1057fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 1058fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 1059fd1e8a1fSTejun Heo * 1060fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1061fd1e8a1fSTejun Heo */ 1062fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1063fd1e8a1fSTejun Heo { 1064fd1e8a1fSTejun Heo free_bootmem(__pa(ai), ai->__ai_size); 1065fd1e8a1fSTejun Heo } 1066fd1e8a1fSTejun Heo 1067fd1e8a1fSTejun Heo /** 1068fd1e8a1fSTejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1069edcb4639STejun Heo * @reserved_size: the size of reserved percpu area in bytes 10704ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1071fd1e8a1fSTejun Heo * @atom_size: allocation atom size 1072fd1e8a1fSTejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1073fd1e8a1fSTejun Heo * 1074fd1e8a1fSTejun Heo * This function determines grouping of units, their mappings to cpus 1075fd1e8a1fSTejun Heo * and other parameters considering needed percpu size, allocation 1076fd1e8a1fSTejun Heo * atom size and distances between CPUs. 1077fd1e8a1fSTejun Heo * 1078fd1e8a1fSTejun Heo * Groups are always mutliples of atom size and CPUs which are of 1079fd1e8a1fSTejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 1080fd1e8a1fSTejun Heo * units in the same group. The returned configuration is guaranteed 1081fd1e8a1fSTejun Heo * to have CPUs on different nodes on different groups and >=75% usage 1082fd1e8a1fSTejun Heo * of allocated virtual address space. 1083fd1e8a1fSTejun Heo * 1084fd1e8a1fSTejun Heo * RETURNS: 1085fd1e8a1fSTejun Heo * On success, pointer to the new allocation_info is returned. On 1086fd1e8a1fSTejun Heo * failure, ERR_PTR value is returned. 1087fd1e8a1fSTejun Heo */ 10884ba6ce25STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 10894ba6ce25STejun Heo size_t reserved_size, size_t dyn_size, 1090fd1e8a1fSTejun Heo size_t atom_size, 1091033e48fbSTejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1092033e48fbSTejun Heo { 1093033e48fbSTejun Heo static int group_map[NR_CPUS] __initdata; 1094033e48fbSTejun Heo static int group_cnt[NR_CPUS] __initdata; 1095033e48fbSTejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 1096a92d3ff9SPavel V. Panteleev int nr_groups = 1, nr_units = 0; 1097033e48fbSTejun Heo size_t size_sum, min_unit_size, alloc_size; 1098033e48fbSTejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1099fd1e8a1fSTejun Heo int last_allocs, group, unit; 1100033e48fbSTejun Heo unsigned int cpu, tcpu; 1101fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1102fd1e8a1fSTejun Heo unsigned int *cpu_map; 1103033e48fbSTejun Heo 1104fb59e72eSTejun Heo /* this function may be called multiple times */ 1105fb59e72eSTejun Heo memset(group_map, 0, sizeof(group_map)); 1106a92d3ff9SPavel V. Panteleev memset(group_cnt, 0, sizeof(group_cnt)); 1107fb59e72eSTejun Heo 1108*099a19d9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 1109*099a19d9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 1110*099a19d9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 11114ba6ce25STejun Heo dyn_size = size_sum - static_size - reserved_size; 11124ba6ce25STejun Heo 1113033e48fbSTejun Heo /* 1114033e48fbSTejun Heo * Determine min_unit_size, alloc_size and max_upa such that 1115fd1e8a1fSTejun Heo * alloc_size is multiple of atom_size and is the smallest 1116033e48fbSTejun Heo * which can accomodate 4k aligned segments which are equal to 1117033e48fbSTejun Heo * or larger than min_unit_size. 1118033e48fbSTejun Heo */ 1119033e48fbSTejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1120033e48fbSTejun Heo 1121fd1e8a1fSTejun Heo alloc_size = roundup(min_unit_size, atom_size); 1122033e48fbSTejun Heo upa = alloc_size / min_unit_size; 1123033e48fbSTejun Heo while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1124033e48fbSTejun Heo upa--; 1125033e48fbSTejun Heo max_upa = upa; 1126033e48fbSTejun Heo 1127033e48fbSTejun Heo /* group cpus according to their proximity */ 1128033e48fbSTejun Heo for_each_possible_cpu(cpu) { 1129033e48fbSTejun Heo group = 0; 1130033e48fbSTejun Heo next_group: 1131033e48fbSTejun Heo for_each_possible_cpu(tcpu) { 1132033e48fbSTejun Heo if (cpu == tcpu) 1133033e48fbSTejun Heo break; 1134fd1e8a1fSTejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 1135033e48fbSTejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1136033e48fbSTejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1137033e48fbSTejun Heo group++; 1138fd1e8a1fSTejun Heo nr_groups = max(nr_groups, group + 1); 1139033e48fbSTejun Heo goto next_group; 1140033e48fbSTejun Heo } 1141033e48fbSTejun Heo } 1142033e48fbSTejun Heo group_map[cpu] = group; 1143033e48fbSTejun Heo group_cnt[group]++; 1144033e48fbSTejun Heo } 1145033e48fbSTejun Heo 1146033e48fbSTejun Heo /* 1147033e48fbSTejun Heo * Expand unit size until address space usage goes over 75% 1148033e48fbSTejun Heo * and then as much as possible without using more address 1149033e48fbSTejun Heo * space. 1150033e48fbSTejun Heo */ 1151033e48fbSTejun Heo last_allocs = INT_MAX; 1152033e48fbSTejun Heo for (upa = max_upa; upa; upa--) { 1153033e48fbSTejun Heo int allocs = 0, wasted = 0; 1154033e48fbSTejun Heo 1155033e48fbSTejun Heo if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1156033e48fbSTejun Heo continue; 1157033e48fbSTejun Heo 1158fd1e8a1fSTejun Heo for (group = 0; group < nr_groups; group++) { 1159033e48fbSTejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1160033e48fbSTejun Heo allocs += this_allocs; 1161033e48fbSTejun Heo wasted += this_allocs * upa - group_cnt[group]; 1162033e48fbSTejun Heo } 1163033e48fbSTejun Heo 1164033e48fbSTejun Heo /* 1165033e48fbSTejun Heo * Don't accept if wastage is over 25%. The 1166033e48fbSTejun Heo * greater-than comparison ensures upa==1 always 1167033e48fbSTejun Heo * passes the following check. 1168033e48fbSTejun Heo */ 1169033e48fbSTejun Heo if (wasted > num_possible_cpus() / 3) 1170033e48fbSTejun Heo continue; 1171033e48fbSTejun Heo 1172033e48fbSTejun Heo /* and then don't consume more memory */ 1173033e48fbSTejun Heo if (allocs > last_allocs) 1174033e48fbSTejun Heo break; 1175033e48fbSTejun Heo last_allocs = allocs; 1176033e48fbSTejun Heo best_upa = upa; 1177033e48fbSTejun Heo } 1178fd1e8a1fSTejun Heo upa = best_upa; 1179033e48fbSTejun Heo 1180fd1e8a1fSTejun Heo /* allocate and fill alloc_info */ 1181fd1e8a1fSTejun Heo for (group = 0; group < nr_groups; group++) 1182fd1e8a1fSTejun Heo nr_units += roundup(group_cnt[group], upa); 1183fd1e8a1fSTejun Heo 1184fd1e8a1fSTejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1185fd1e8a1fSTejun Heo if (!ai) 1186fd1e8a1fSTejun Heo return ERR_PTR(-ENOMEM); 1187fd1e8a1fSTejun Heo cpu_map = ai->groups[0].cpu_map; 1188fd1e8a1fSTejun Heo 1189fd1e8a1fSTejun Heo for (group = 0; group < nr_groups; group++) { 1190fd1e8a1fSTejun Heo ai->groups[group].cpu_map = cpu_map; 1191fd1e8a1fSTejun Heo cpu_map += roundup(group_cnt[group], upa); 1192fd1e8a1fSTejun Heo } 1193fd1e8a1fSTejun Heo 1194fd1e8a1fSTejun Heo ai->static_size = static_size; 1195fd1e8a1fSTejun Heo ai->reserved_size = reserved_size; 1196fd1e8a1fSTejun Heo ai->dyn_size = dyn_size; 1197fd1e8a1fSTejun Heo ai->unit_size = alloc_size / upa; 1198fd1e8a1fSTejun Heo ai->atom_size = atom_size; 1199fd1e8a1fSTejun Heo ai->alloc_size = alloc_size; 1200fd1e8a1fSTejun Heo 1201fd1e8a1fSTejun Heo for (group = 0, unit = 0; group_cnt[group]; group++) { 1202fd1e8a1fSTejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1203fd1e8a1fSTejun Heo 1204fd1e8a1fSTejun Heo /* 1205fd1e8a1fSTejun Heo * Initialize base_offset as if all groups are located 1206fd1e8a1fSTejun Heo * back-to-back. The caller should update this to 1207fd1e8a1fSTejun Heo * reflect actual allocation. 1208fd1e8a1fSTejun Heo */ 1209fd1e8a1fSTejun Heo gi->base_offset = unit * ai->unit_size; 1210fd1e8a1fSTejun Heo 1211033e48fbSTejun Heo for_each_possible_cpu(cpu) 1212033e48fbSTejun Heo if (group_map[cpu] == group) 1213fd1e8a1fSTejun Heo gi->cpu_map[gi->nr_units++] = cpu; 1214fd1e8a1fSTejun Heo gi->nr_units = roundup(gi->nr_units, upa); 1215fd1e8a1fSTejun Heo unit += gi->nr_units; 1216fd1e8a1fSTejun Heo } 1217fd1e8a1fSTejun Heo BUG_ON(unit != nr_units); 1218fd1e8a1fSTejun Heo 1219fd1e8a1fSTejun Heo return ai; 1220033e48fbSTejun Heo } 1221033e48fbSTejun Heo 1222fd1e8a1fSTejun Heo /** 1223fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1224fd1e8a1fSTejun Heo * @lvl: loglevel 1225fd1e8a1fSTejun Heo * @ai: allocation info to dump 1226fd1e8a1fSTejun Heo * 1227fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 1228fd1e8a1fSTejun Heo */ 1229fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 1230fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 1231033e48fbSTejun Heo { 1232fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 1233033e48fbSTejun Heo char empty_str[] = "--------"; 1234fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 1235fd1e8a1fSTejun Heo int group, v; 1236fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 1237033e48fbSTejun Heo 1238fd1e8a1fSTejun Heo v = ai->nr_groups; 1239033e48fbSTejun Heo while (v /= 10) 1240fd1e8a1fSTejun Heo group_width++; 1241033e48fbSTejun Heo 1242fd1e8a1fSTejun Heo v = num_possible_cpus(); 1243fd1e8a1fSTejun Heo while (v /= 10) 1244fd1e8a1fSTejun Heo cpu_width++; 1245fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1246033e48fbSTejun Heo 1247fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 1248fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 1249fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 1250033e48fbSTejun Heo 1251fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1252fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1253fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1254fd1e8a1fSTejun Heo 1255fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 1256fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 1257fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 1258fd1e8a1fSTejun Heo 1259fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 1260fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 1261fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 1262fd1e8a1fSTejun Heo if (!(alloc % apl)) { 1263033e48fbSTejun Heo printk("\n"); 1264fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 1265033e48fbSTejun Heo } 1266fd1e8a1fSTejun Heo printk("[%0*d] ", group_width, group); 1267fd1e8a1fSTejun Heo 1268fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 1269fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 1270fd1e8a1fSTejun Heo printk("%0*d ", cpu_width, 1271fd1e8a1fSTejun Heo gi->cpu_map[unit]); 1272033e48fbSTejun Heo else 1273033e48fbSTejun Heo printk("%s ", empty_str); 1274033e48fbSTejun Heo } 1275fd1e8a1fSTejun Heo } 1276033e48fbSTejun Heo printk("\n"); 1277033e48fbSTejun Heo } 1278033e48fbSTejun Heo 1279fbf59bc9STejun Heo /** 1280fbf59bc9STejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 1281fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 128238a6be52STejun Heo * @base_addr: mapped address 1283fbf59bc9STejun Heo * 12848d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 12858d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 128638a6be52STejun Heo * setup path. 12878d408b4bSTejun Heo * 1288fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 1289fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 12908d408b4bSTejun Heo * 1291fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 1292fd1e8a1fSTejun Heo * 1293fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1294edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1295edcb4639STejun Heo * the first chunk such that it's available only through reserved 1296edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1297edcb4639STejun Heo * static areas on architectures where the addressing model has 1298edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1299edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1300edcb4639STejun Heo * 1301fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 1302fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 1303fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 13046074d5b0STejun Heo * 1305fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1306fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 1307fd1e8a1fSTejun Heo * @ai->dyn_size. 13088d408b4bSTejun Heo * 1309fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 1310fd1e8a1fSTejun Heo * for vm areas. 13118d408b4bSTejun Heo * 1312fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 1313fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 1314fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 1315fd1e8a1fSTejun Heo * 1316fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 1317fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 1318fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 1319fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 1320fd1e8a1fSTejun Heo * all units is assumed. 13218d408b4bSTejun Heo * 132238a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 132338a6be52STejun Heo * copied static data to each unit. 1324fbf59bc9STejun Heo * 1325edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1326edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1327edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1328edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1329edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1330edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1331edcb4639STejun Heo * 1332fbf59bc9STejun Heo * RETURNS: 1333fb435d52STejun Heo * 0 on success, -errno on failure. 1334fbf59bc9STejun Heo */ 1335fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1336fd1e8a1fSTejun Heo void *base_addr) 1337fbf59bc9STejun Heo { 1338635b75fcSTejun Heo static char cpus_buf[4096] __initdata; 1339*099a19d9STejun Heo static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1340*099a19d9STejun Heo static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1341fd1e8a1fSTejun Heo size_t dyn_size = ai->dyn_size; 1342fd1e8a1fSTejun Heo size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1343edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 13446563297cSTejun Heo unsigned long *group_offsets; 13456563297cSTejun Heo size_t *group_sizes; 1346fb435d52STejun Heo unsigned long *unit_off; 1347fbf59bc9STejun Heo unsigned int cpu; 1348fd1e8a1fSTejun Heo int *unit_map; 1349fd1e8a1fSTejun Heo int group, unit, i; 1350fbf59bc9STejun Heo 1351635b75fcSTejun Heo cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); 1352635b75fcSTejun Heo 1353635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 1354635b75fcSTejun Heo if (unlikely(cond)) { \ 1355635b75fcSTejun Heo pr_emerg("PERCPU: failed to initialize, %s", #cond); \ 1356635b75fcSTejun Heo pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ 1357635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1358635b75fcSTejun Heo BUG(); \ 1359635b75fcSTejun Heo } \ 1360635b75fcSTejun Heo } while (0) 1361635b75fcSTejun Heo 13622f39e637STejun Heo /* sanity checks */ 1363635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1364635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 1365635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 1366635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1367635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1368635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1369*099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 13709f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 13718d408b4bSTejun Heo 13726563297cSTejun Heo /* process group information and build config tables accordingly */ 13736563297cSTejun Heo group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 13746563297cSTejun Heo group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); 1375fd1e8a1fSTejun Heo unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); 1376fb435d52STejun Heo unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 13772f39e637STejun Heo 1378fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1379ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 1380fd1e8a1fSTejun Heo pcpu_first_unit_cpu = NR_CPUS; 13812f39e637STejun Heo 1382fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1383fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 13842f39e637STejun Heo 13856563297cSTejun Heo group_offsets[group] = gi->base_offset; 13866563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 13876563297cSTejun Heo 1388fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 1389fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 1390fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 1391fd1e8a1fSTejun Heo continue; 1392fd1e8a1fSTejun Heo 1393635b75fcSTejun Heo PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); 1394635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1395635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1396fd1e8a1fSTejun Heo 1397fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 1398fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1399fb435d52STejun Heo 1400fd1e8a1fSTejun Heo if (pcpu_first_unit_cpu == NR_CPUS) 14012f39e637STejun Heo pcpu_first_unit_cpu = cpu; 14022f39e637STejun Heo } 1403fd1e8a1fSTejun Heo } 14042f39e637STejun Heo pcpu_last_unit_cpu = cpu; 1405fd1e8a1fSTejun Heo pcpu_nr_units = unit; 14062f39e637STejun Heo 14072f39e637STejun Heo for_each_possible_cpu(cpu) 1408635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1409635b75fcSTejun Heo 1410635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 1411635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 1412635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_INFO, ai); 14132f39e637STejun Heo 14146563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 14156563297cSTejun Heo pcpu_group_offsets = group_offsets; 14166563297cSTejun Heo pcpu_group_sizes = group_sizes; 1417fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 1418fb435d52STejun Heo pcpu_unit_offsets = unit_off; 14192f39e637STejun Heo 14202f39e637STejun Heo /* determine basic parameters */ 1421fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1422d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 14236563297cSTejun Heo pcpu_atom_size = ai->atom_size; 1424ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1425ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1426cafe8816STejun Heo 1427d9b55eebSTejun Heo /* 1428d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1429d9b55eebSTejun Heo * empty chunks. 1430d9b55eebSTejun Heo */ 1431d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1432fbf59bc9STejun Heo pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1433fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1434fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1435fbf59bc9STejun Heo 1436edcb4639STejun Heo /* 1437edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1438edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1439edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1440edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1441edcb4639STejun Heo * static percpu allocation). 1442edcb4639STejun Heo */ 14432441d15cSTejun Heo schunk = alloc_bootmem(pcpu_chunk_struct_size); 14442441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 1445bba174f5STejun Heo schunk->base_addr = base_addr; 144661ace7faSTejun Heo schunk->map = smap; 144761ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 144838a6be52STejun Heo schunk->immutable = true; 1449ce3141a2STejun Heo bitmap_fill(schunk->populated, pcpu_unit_pages); 1450edcb4639STejun Heo 1451fd1e8a1fSTejun Heo if (ai->reserved_size) { 1452fd1e8a1fSTejun Heo schunk->free_size = ai->reserved_size; 1453ae9e6bc9STejun Heo pcpu_reserved_chunk = schunk; 1454fd1e8a1fSTejun Heo pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; 1455edcb4639STejun Heo } else { 14562441d15cSTejun Heo schunk->free_size = dyn_size; 1457edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1458edcb4639STejun Heo } 14592441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1460fbf59bc9STejun Heo 1461fd1e8a1fSTejun Heo schunk->map[schunk->map_used++] = -ai->static_size; 146261ace7faSTejun Heo if (schunk->free_size) 146361ace7faSTejun Heo schunk->map[schunk->map_used++] = schunk->free_size; 146461ace7faSTejun Heo 1465edcb4639STejun Heo /* init dynamic chunk if necessary */ 1466edcb4639STejun Heo if (dyn_size) { 1467ce3141a2STejun Heo dchunk = alloc_bootmem(pcpu_chunk_struct_size); 1468edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 1469bba174f5STejun Heo dchunk->base_addr = base_addr; 1470edcb4639STejun Heo dchunk->map = dmap; 1471edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 147238a6be52STejun Heo dchunk->immutable = true; 1473ce3141a2STejun Heo bitmap_fill(dchunk->populated, pcpu_unit_pages); 1474edcb4639STejun Heo 1475edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1476edcb4639STejun Heo dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1477edcb4639STejun Heo dchunk->map[dchunk->map_used++] = dchunk->free_size; 1478edcb4639STejun Heo } 1479edcb4639STejun Heo 14802441d15cSTejun Heo /* link the first chunk in */ 1481ae9e6bc9STejun Heo pcpu_first_chunk = dchunk ?: schunk; 1482ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 1483fbf59bc9STejun Heo 1484fbf59bc9STejun Heo /* we're done */ 1485bba174f5STejun Heo pcpu_base_addr = base_addr; 1486fb435d52STejun Heo return 0; 1487fbf59bc9STejun Heo } 148866c3a757STejun Heo 1489f58dc01bSTejun Heo const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { 1490f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 1491f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 1492f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 1493f58dc01bSTejun Heo }; 149466c3a757STejun Heo 1495f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1496f58dc01bSTejun Heo 1497f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 149866c3a757STejun Heo { 1499f58dc01bSTejun Heo if (0) 1500f58dc01bSTejun Heo /* nada */; 1501f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1502f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 1503f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 1504f58dc01bSTejun Heo #endif 1505f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1506f58dc01bSTejun Heo else if (!strcmp(str, "page")) 1507f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 1508f58dc01bSTejun Heo #endif 1509f58dc01bSTejun Heo else 1510f58dc01bSTejun Heo pr_warning("PERCPU: unknown allocator %s specified\n", str); 151166c3a757STejun Heo 1512f58dc01bSTejun Heo return 0; 151366c3a757STejun Heo } 1514f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 151566c3a757STejun Heo 151608fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 151708fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 151866c3a757STejun Heo /** 151966c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 152066c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 15214ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 1522c8826dd5STejun Heo * @atom_size: allocation atom size 1523c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1524c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 1525c8826dd5STejun Heo * @free_fn: funtion to free percpu page 152666c3a757STejun Heo * 152766c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 152866c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 152966c3a757STejun Heo * 153066c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 1531c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 1532c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 1533c8826dd5STejun Heo * aligned to @atom_size. 1534c8826dd5STejun Heo * 1535c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 1536c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 1537c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 1538c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 1539c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 1540c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 154166c3a757STejun Heo * 15424ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 154366c3a757STejun Heo * 154466c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 1545c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 154666c3a757STejun Heo * 154766c3a757STejun Heo * RETURNS: 1548fb435d52STejun Heo * 0 on success, -errno on failure. 154966c3a757STejun Heo */ 15504ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 1551c8826dd5STejun Heo size_t atom_size, 1552c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1553c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1554c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 155566c3a757STejun Heo { 1556c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 1557c8826dd5STejun Heo void **areas = NULL; 1558fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 15596ea529a2STejun Heo size_t size_sum, areas_size, max_distance; 1560c8826dd5STejun Heo int group, i, rc; 156166c3a757STejun Heo 1562c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1563c8826dd5STejun Heo cpu_distance_fn); 1564fd1e8a1fSTejun Heo if (IS_ERR(ai)) 1565fd1e8a1fSTejun Heo return PTR_ERR(ai); 156666c3a757STejun Heo 1567fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1568c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 156966c3a757STejun Heo 1570c8826dd5STejun Heo areas = alloc_bootmem_nopanic(areas_size); 1571c8826dd5STejun Heo if (!areas) { 1572fb435d52STejun Heo rc = -ENOMEM; 1573c8826dd5STejun Heo goto out_free; 1574fa8a7094STejun Heo } 157566c3a757STejun Heo 1576c8826dd5STejun Heo /* allocate, copy and determine base address */ 1577c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 1578c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1579c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 1580c8826dd5STejun Heo void *ptr; 158166c3a757STejun Heo 1582c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 1583c8826dd5STejun Heo cpu = gi->cpu_map[i]; 1584c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 1585c8826dd5STejun Heo 1586c8826dd5STejun Heo /* allocate space for the whole group */ 1587c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 1588c8826dd5STejun Heo if (!ptr) { 1589c8826dd5STejun Heo rc = -ENOMEM; 1590c8826dd5STejun Heo goto out_free_areas; 1591c8826dd5STejun Heo } 1592c8826dd5STejun Heo areas[group] = ptr; 1593c8826dd5STejun Heo 1594c8826dd5STejun Heo base = min(ptr, base); 1595c8826dd5STejun Heo 1596c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1597c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 1598c8826dd5STejun Heo /* unused unit, free whole */ 1599c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 1600c8826dd5STejun Heo continue; 1601c8826dd5STejun Heo } 1602c8826dd5STejun Heo /* copy and return the unused part */ 1603fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 1604c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 1605c8826dd5STejun Heo } 160666c3a757STejun Heo } 160766c3a757STejun Heo 1608c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 16096ea529a2STejun Heo max_distance = 0; 16106ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 1611c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 16121a0c3298STejun Heo max_distance = max_t(size_t, max_distance, 16131a0c3298STejun Heo ai->groups[group].base_offset); 16146ea529a2STejun Heo } 16156ea529a2STejun Heo max_distance += ai->unit_size; 16166ea529a2STejun Heo 16176ea529a2STejun Heo /* warn if maximum distance is further than 75% of vmalloc space */ 16186ea529a2STejun Heo if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { 16191a0c3298STejun Heo pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 16206ea529a2STejun Heo "space 0x%lx\n", 16216ea529a2STejun Heo max_distance, VMALLOC_END - VMALLOC_START); 16226ea529a2STejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 16236ea529a2STejun Heo /* and fail if we have fallback */ 16246ea529a2STejun Heo rc = -EINVAL; 16256ea529a2STejun Heo goto out_free; 16266ea529a2STejun Heo #endif 16276ea529a2STejun Heo } 1628c8826dd5STejun Heo 1629004018e2STejun Heo pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1630fd1e8a1fSTejun Heo PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1631fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 163266c3a757STejun Heo 1633fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, base); 1634c8826dd5STejun Heo goto out_free; 1635c8826dd5STejun Heo 1636c8826dd5STejun Heo out_free_areas: 1637c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 1638c8826dd5STejun Heo free_fn(areas[group], 1639c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 1640c8826dd5STejun Heo out_free: 1641fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 1642c8826dd5STejun Heo if (areas) 1643c8826dd5STejun Heo free_bootmem(__pa(areas), areas_size); 1644fb435d52STejun Heo return rc; 1645d4b95f80STejun Heo } 164608fc4580STejun Heo #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || 164708fc4580STejun Heo !CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1648d4b95f80STejun Heo 164908fc4580STejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1650d4b95f80STejun Heo /** 165100ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1652d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1653d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 1654d4b95f80STejun Heo * @free_fn: funtion to free percpu page, always called with PAGE_SIZE 1655d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 1656d4b95f80STejun Heo * 165700ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 165800ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 1659d4b95f80STejun Heo * 1660d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 1661d4b95f80STejun Heo * page-by-page into vmalloc area. 1662d4b95f80STejun Heo * 1663d4b95f80STejun Heo * RETURNS: 1664fb435d52STejun Heo * 0 on success, -errno on failure. 1665d4b95f80STejun Heo */ 1666fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 1667d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1668d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 1669d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 1670d4b95f80STejun Heo { 16718f05a6a6STejun Heo static struct vm_struct vm; 1672fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 167300ae4064STejun Heo char psize_str[16]; 1674ce3141a2STejun Heo int unit_pages; 1675d4b95f80STejun Heo size_t pages_size; 1676ce3141a2STejun Heo struct page **pages; 1677fb435d52STejun Heo int unit, i, j, rc; 1678d4b95f80STejun Heo 167900ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 168000ae4064STejun Heo 16814ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 1682fd1e8a1fSTejun Heo if (IS_ERR(ai)) 1683fd1e8a1fSTejun Heo return PTR_ERR(ai); 1684fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 1685fd1e8a1fSTejun Heo BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); 1686fd1e8a1fSTejun Heo 1687fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 1688d4b95f80STejun Heo 1689d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 1690fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 1691fd1e8a1fSTejun Heo sizeof(pages[0])); 1692ce3141a2STejun Heo pages = alloc_bootmem(pages_size); 1693d4b95f80STejun Heo 16948f05a6a6STejun Heo /* allocate pages */ 1695d4b95f80STejun Heo j = 0; 1696fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) 1697ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) { 1698fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 1699d4b95f80STejun Heo void *ptr; 1700d4b95f80STejun Heo 17013cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 1702d4b95f80STejun Heo if (!ptr) { 170300ae4064STejun Heo pr_warning("PERCPU: failed to allocate %s page " 170400ae4064STejun Heo "for cpu%u\n", psize_str, cpu); 1705d4b95f80STejun Heo goto enomem; 1706d4b95f80STejun Heo } 1707ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 1708d4b95f80STejun Heo } 1709d4b95f80STejun Heo 17108f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 17118f05a6a6STejun Heo vm.flags = VM_ALLOC; 1712fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 17138f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 17148f05a6a6STejun Heo 1715fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 17161d9d3257STejun Heo unsigned long unit_addr = 1717fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 17188f05a6a6STejun Heo 1719ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 17208f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 17218f05a6a6STejun Heo 17228f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 1723fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 1724ce3141a2STejun Heo unit_pages); 1725fb435d52STejun Heo if (rc < 0) 1726fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 17278f05a6a6STejun Heo 17288f05a6a6STejun Heo /* 17298f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 17308f05a6a6STejun Heo * cache for the linear mapping here - something 17318f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 17328f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 17338f05a6a6STejun Heo * data structures are not set up yet. 17348f05a6a6STejun Heo */ 17358f05a6a6STejun Heo 17368f05a6a6STejun Heo /* copy static data */ 1737fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 173866c3a757STejun Heo } 173966c3a757STejun Heo 174066c3a757STejun Heo /* we're ready, commit */ 17411d9d3257STejun Heo pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", 1742fd1e8a1fSTejun Heo unit_pages, psize_str, vm.addr, ai->static_size, 1743fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 174466c3a757STejun Heo 1745fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, vm.addr); 1746d4b95f80STejun Heo goto out_free_ar; 1747d4b95f80STejun Heo 1748d4b95f80STejun Heo enomem: 1749d4b95f80STejun Heo while (--j >= 0) 1750ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 1751fb435d52STejun Heo rc = -ENOMEM; 1752d4b95f80STejun Heo out_free_ar: 1753ce3141a2STejun Heo free_bootmem(__pa(pages), pages_size); 1754fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 1755fb435d52STejun Heo return rc; 175666c3a757STejun Heo } 175708fc4580STejun Heo #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ 1758d4b95f80STejun Heo 17598c4bfc6eSTejun Heo /* 1760e74e3962STejun Heo * Generic percpu area setup. 1761e74e3962STejun Heo * 1762e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 1763e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 1764e74e3962STejun Heo * important because many archs have addressing restrictions and might 1765e74e3962STejun Heo * fail if the percpu area is located far away from the previous 1766e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 1767e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 1768e74e3962STejun Heo * on the physical linear memory mapping which uses large page 1769e74e3962STejun Heo * mappings on applicable archs. 1770e74e3962STejun Heo */ 1771e74e3962STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 1772e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 1773e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 1774e74e3962STejun Heo 1775c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 1776c8826dd5STejun Heo size_t align) 1777c8826dd5STejun Heo { 1778c8826dd5STejun Heo return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 1779c8826dd5STejun Heo } 1780c8826dd5STejun Heo 1781c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 1782c8826dd5STejun Heo { 1783c8826dd5STejun Heo free_bootmem(__pa(ptr), size); 1784c8826dd5STejun Heo } 1785c8826dd5STejun Heo 1786e74e3962STejun Heo void __init setup_per_cpu_areas(void) 1787e74e3962STejun Heo { 1788e74e3962STejun Heo unsigned long delta; 1789e74e3962STejun Heo unsigned int cpu; 1790fb435d52STejun Heo int rc; 1791e74e3962STejun Heo 1792e74e3962STejun Heo /* 1793e74e3962STejun Heo * Always reserve area for module percpu variables. That's 1794e74e3962STejun Heo * what the legacy allocator did. 1795e74e3962STejun Heo */ 1796fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 1797c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 1798c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 1799fb435d52STejun Heo if (rc < 0) 1800e74e3962STejun Heo panic("Failed to initialized percpu areas."); 1801e74e3962STejun Heo 1802e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1803e74e3962STejun Heo for_each_possible_cpu(cpu) 1804fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 1805e74e3962STejun Heo } 1806e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1807*099a19d9STejun Heo 1808*099a19d9STejun Heo /* 1809*099a19d9STejun Heo * First and reserved chunks are initialized with temporary allocation 1810*099a19d9STejun Heo * map in initdata so that they can be used before slab is online. 1811*099a19d9STejun Heo * This function is called after slab is brought up and replaces those 1812*099a19d9STejun Heo * with properly allocated maps. 1813*099a19d9STejun Heo */ 1814*099a19d9STejun Heo void __init percpu_init_late(void) 1815*099a19d9STejun Heo { 1816*099a19d9STejun Heo struct pcpu_chunk *target_chunks[] = 1817*099a19d9STejun Heo { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; 1818*099a19d9STejun Heo struct pcpu_chunk *chunk; 1819*099a19d9STejun Heo unsigned long flags; 1820*099a19d9STejun Heo int i; 1821*099a19d9STejun Heo 1822*099a19d9STejun Heo for (i = 0; (chunk = target_chunks[i]); i++) { 1823*099a19d9STejun Heo int *map; 1824*099a19d9STejun Heo const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); 1825*099a19d9STejun Heo 1826*099a19d9STejun Heo BUILD_BUG_ON(size > PAGE_SIZE); 1827*099a19d9STejun Heo 1828*099a19d9STejun Heo map = pcpu_mem_alloc(size); 1829*099a19d9STejun Heo BUG_ON(!map); 1830*099a19d9STejun Heo 1831*099a19d9STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1832*099a19d9STejun Heo memcpy(map, chunk->map, size); 1833*099a19d9STejun Heo chunk->map = map; 1834*099a19d9STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1835*099a19d9STejun Heo } 1836*099a19d9STejun Heo } 1837