1fbf59bc9STejun Heo /* 2fbf59bc9STejun Heo * linux/mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 10fbf59bc9STejun Heo * areas. Percpu areas are allocated in chunks in vmalloc area. Each 11fbf59bc9STejun Heo * chunk is consisted of num_possible_cpus() units and the first chunk 12fbf59bc9STejun Heo * is used for static percpu variables in the kernel image (special 13fbf59bc9STejun Heo * boot time alloc/init handling necessary as these areas need to be 14fbf59bc9STejun Heo * brought up before allocation services are running). Unit grows as 15fbf59bc9STejun Heo * necessary and all units grow or shrink in unison. When a chunk is 16fbf59bc9STejun Heo * filled up, another chunk is allocated. ie. in vmalloc area 17fbf59bc9STejun Heo * 18fbf59bc9STejun Heo * c0 c1 c2 19fbf59bc9STejun Heo * ------------------- ------------------- ------------ 20fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 22fbf59bc9STejun Heo * 23fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 24fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 25fbf59bc9STejun Heo * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring 26fbf59bc9STejun Heo * percpu base registers UNIT_SIZE apart. 27fbf59bc9STejun Heo * 28fbf59bc9STejun Heo * There are usually many small percpu allocations many of them as 29fbf59bc9STejun Heo * small as 4 bytes. The allocator organizes chunks into lists 30fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 31fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 32fbf59bc9STejun Heo * guaranteed to be eqaul to or larger than the maximum contiguous 33fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 34fbf59bc9STejun Heo * chunk maps unnecessarily. 35fbf59bc9STejun Heo * 36fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 37fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 38fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 39fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 40fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 41fbf59bc9STejun Heo * Chunks are also linked into a rb tree to ease address to chunk 42fbf59bc9STejun Heo * mapping during free. 43fbf59bc9STejun Heo * 44fbf59bc9STejun Heo * To use this allocator, arch code should do the followings. 45fbf59bc9STejun Heo * 46fbf59bc9STejun Heo * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 47fbf59bc9STejun Heo * 48fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49e0100983STejun Heo * regular address to percpu pointer and back if they need to be 50e0100983STejun Heo * different from the default 51fbf59bc9STejun Heo * 528d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 538d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 54fbf59bc9STejun Heo */ 55fbf59bc9STejun Heo 56fbf59bc9STejun Heo #include <linux/bitmap.h> 57fbf59bc9STejun Heo #include <linux/bootmem.h> 58fbf59bc9STejun Heo #include <linux/list.h> 59fbf59bc9STejun Heo #include <linux/mm.h> 60fbf59bc9STejun Heo #include <linux/module.h> 61fbf59bc9STejun Heo #include <linux/mutex.h> 62fbf59bc9STejun Heo #include <linux/percpu.h> 63fbf59bc9STejun Heo #include <linux/pfn.h> 64fbf59bc9STejun Heo #include <linux/rbtree.h> 65fbf59bc9STejun Heo #include <linux/slab.h> 66ccea34b5STejun Heo #include <linux/spinlock.h> 67fbf59bc9STejun Heo #include <linux/vmalloc.h> 68a56dbddfSTejun Heo #include <linux/workqueue.h> 69fbf59bc9STejun Heo 70fbf59bc9STejun Heo #include <asm/cacheflush.h> 71e0100983STejun Heo #include <asm/sections.h> 72fbf59bc9STejun Heo #include <asm/tlbflush.h> 73fbf59bc9STejun Heo 74fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 75fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 76fbf59bc9STejun Heo 77e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 78e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 79e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 80e0100983STejun Heo (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ 81e0100983STejun Heo + (unsigned long)__per_cpu_start) 82e0100983STejun Heo #endif 83e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 84e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 85e0100983STejun Heo (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ 86e0100983STejun Heo - (unsigned long)__per_cpu_start) 87e0100983STejun Heo #endif 88e0100983STejun Heo 89fbf59bc9STejun Heo struct pcpu_chunk { 90fbf59bc9STejun Heo struct list_head list; /* linked to pcpu_slot lists */ 91fbf59bc9STejun Heo struct rb_node rb_node; /* key is chunk->vm->addr */ 92fbf59bc9STejun Heo int free_size; /* free bytes in the chunk */ 93fbf59bc9STejun Heo int contig_hint; /* max contiguous size hint */ 94fbf59bc9STejun Heo struct vm_struct *vm; /* mapped vmalloc region */ 95fbf59bc9STejun Heo int map_used; /* # of map entries used */ 96fbf59bc9STejun Heo int map_alloc; /* # of map entries allocated */ 97fbf59bc9STejun Heo int *map; /* allocation map */ 988d408b4bSTejun Heo bool immutable; /* no [de]population allowed */ 993e24aa58STejun Heo struct page **page; /* points to page array */ 1003e24aa58STejun Heo struct page *page_ar[]; /* #cpus * UNIT_PAGES */ 101fbf59bc9STejun Heo }; 102fbf59bc9STejun Heo 10340150d37STejun Heo static int pcpu_unit_pages __read_mostly; 10440150d37STejun Heo static int pcpu_unit_size __read_mostly; 10540150d37STejun Heo static int pcpu_chunk_size __read_mostly; 10640150d37STejun Heo static int pcpu_nr_slots __read_mostly; 10740150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly; 108fbf59bc9STejun Heo 109fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 11040150d37STejun Heo void *pcpu_base_addr __read_mostly; 111fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 112fbf59bc9STejun Heo 113*ae9e6bc9STejun Heo /* 114*ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 115*ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 116*ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 117*ae9e6bc9STejun Heo */ 118*ae9e6bc9STejun Heo static struct pcpu_chunk *pcpu_first_chunk; 119*ae9e6bc9STejun Heo 120*ae9e6bc9STejun Heo /* 121*ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 122*ae9e6bc9STejun Heo * chunk and serves it for reserved allocations. The amount of 123*ae9e6bc9STejun Heo * reserved offset is in pcpu_reserved_chunk_limit. When reserved 124*ae9e6bc9STejun Heo * area doesn't exist, the following variables contain NULL and 0 125*ae9e6bc9STejun Heo * respectively. 126*ae9e6bc9STejun Heo */ 127edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk; 128edcb4639STejun Heo static int pcpu_reserved_chunk_limit; 129edcb4639STejun Heo 130fbf59bc9STejun Heo /* 131ccea34b5STejun Heo * Synchronization rules. 132fbf59bc9STejun Heo * 133ccea34b5STejun Heo * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 134ccea34b5STejun Heo * protects allocation/reclaim paths, chunks and chunk->page arrays. 135ccea34b5STejun Heo * The latter is a spinlock and protects the index data structures - 136ccea34b5STejun Heo * chunk slots, rbtree, chunks and area maps in chunks. 137fbf59bc9STejun Heo * 138ccea34b5STejun Heo * During allocation, pcpu_alloc_mutex is kept locked all the time and 139ccea34b5STejun Heo * pcpu_lock is grabbed and released as necessary. All actual memory 140ccea34b5STejun Heo * allocations are done using GFP_KERNEL with pcpu_lock released. 141ccea34b5STejun Heo * 142ccea34b5STejun Heo * Free path accesses and alters only the index data structures, so it 143ccea34b5STejun Heo * can be safely called from atomic context. When memory needs to be 144ccea34b5STejun Heo * returned to the system, free path schedules reclaim_work which 145ccea34b5STejun Heo * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 146ccea34b5STejun Heo * reclaimed, release both locks and frees the chunks. Note that it's 147ccea34b5STejun Heo * necessary to grab both locks to remove a chunk from circulation as 148ccea34b5STejun Heo * allocation path might be referencing the chunk with only 149ccea34b5STejun Heo * pcpu_alloc_mutex locked. 150fbf59bc9STejun Heo */ 151ccea34b5STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 152ccea34b5STejun Heo static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 153fbf59bc9STejun Heo 15440150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 155fbf59bc9STejun Heo static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ 156fbf59bc9STejun Heo 157a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */ 158a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work); 159a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 160a56dbddfSTejun Heo 161d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 162fbf59bc9STejun Heo { 163cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 164fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 165fbf59bc9STejun Heo } 166fbf59bc9STejun Heo 167d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 168d9b55eebSTejun Heo { 169d9b55eebSTejun Heo if (size == pcpu_unit_size) 170d9b55eebSTejun Heo return pcpu_nr_slots - 1; 171d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 172d9b55eebSTejun Heo } 173d9b55eebSTejun Heo 174fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 175fbf59bc9STejun Heo { 176fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 177fbf59bc9STejun Heo return 0; 178fbf59bc9STejun Heo 179fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 180fbf59bc9STejun Heo } 181fbf59bc9STejun Heo 182fbf59bc9STejun Heo static int pcpu_page_idx(unsigned int cpu, int page_idx) 183fbf59bc9STejun Heo { 184d9b55eebSTejun Heo return cpu * pcpu_unit_pages + page_idx; 185fbf59bc9STejun Heo } 186fbf59bc9STejun Heo 187fbf59bc9STejun Heo static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk, 188fbf59bc9STejun Heo unsigned int cpu, int page_idx) 189fbf59bc9STejun Heo { 190fbf59bc9STejun Heo return &chunk->page[pcpu_page_idx(cpu, page_idx)]; 191fbf59bc9STejun Heo } 192fbf59bc9STejun Heo 193fbf59bc9STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 194fbf59bc9STejun Heo unsigned int cpu, int page_idx) 195fbf59bc9STejun Heo { 196fbf59bc9STejun Heo return (unsigned long)chunk->vm->addr + 197fbf59bc9STejun Heo (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); 198fbf59bc9STejun Heo } 199fbf59bc9STejun Heo 200fbf59bc9STejun Heo static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, 201fbf59bc9STejun Heo int page_idx) 202fbf59bc9STejun Heo { 203fbf59bc9STejun Heo return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; 204fbf59bc9STejun Heo } 205fbf59bc9STejun Heo 206fbf59bc9STejun Heo /** 2071880d93bSTejun Heo * pcpu_mem_alloc - allocate memory 2081880d93bSTejun Heo * @size: bytes to allocate 209fbf59bc9STejun Heo * 2101880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 2111880d93bSTejun Heo * kzalloc() is used; otherwise, vmalloc() is used. The returned 2121880d93bSTejun Heo * memory is always zeroed. 213fbf59bc9STejun Heo * 214ccea34b5STejun Heo * CONTEXT: 215ccea34b5STejun Heo * Does GFP_KERNEL allocation. 216ccea34b5STejun Heo * 217fbf59bc9STejun Heo * RETURNS: 2181880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 219fbf59bc9STejun Heo */ 2201880d93bSTejun Heo static void *pcpu_mem_alloc(size_t size) 221fbf59bc9STejun Heo { 222fbf59bc9STejun Heo if (size <= PAGE_SIZE) 2231880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 2241880d93bSTejun Heo else { 2251880d93bSTejun Heo void *ptr = vmalloc(size); 2261880d93bSTejun Heo if (ptr) 2271880d93bSTejun Heo memset(ptr, 0, size); 2281880d93bSTejun Heo return ptr; 2291880d93bSTejun Heo } 2301880d93bSTejun Heo } 231fbf59bc9STejun Heo 2321880d93bSTejun Heo /** 2331880d93bSTejun Heo * pcpu_mem_free - free memory 2341880d93bSTejun Heo * @ptr: memory to free 2351880d93bSTejun Heo * @size: size of the area 2361880d93bSTejun Heo * 2371880d93bSTejun Heo * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 2381880d93bSTejun Heo */ 2391880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size) 2401880d93bSTejun Heo { 2411880d93bSTejun Heo if (size <= PAGE_SIZE) 2421880d93bSTejun Heo kfree(ptr); 2431880d93bSTejun Heo else 2441880d93bSTejun Heo vfree(ptr); 245fbf59bc9STejun Heo } 246fbf59bc9STejun Heo 247fbf59bc9STejun Heo /** 248fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 249fbf59bc9STejun Heo * @chunk: chunk of interest 250fbf59bc9STejun Heo * @oslot: the previous slot it was on 251fbf59bc9STejun Heo * 252fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 253fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 254edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 255edcb4639STejun Heo * chunk slots. 256ccea34b5STejun Heo * 257ccea34b5STejun Heo * CONTEXT: 258ccea34b5STejun Heo * pcpu_lock. 259fbf59bc9STejun Heo */ 260fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 261fbf59bc9STejun Heo { 262fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 263fbf59bc9STejun Heo 264edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 265fbf59bc9STejun Heo if (oslot < nslot) 266fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 267fbf59bc9STejun Heo else 268fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 269fbf59bc9STejun Heo } 270fbf59bc9STejun Heo } 271fbf59bc9STejun Heo 272fbf59bc9STejun Heo static struct rb_node **pcpu_chunk_rb_search(void *addr, 273fbf59bc9STejun Heo struct rb_node **parentp) 274fbf59bc9STejun Heo { 275fbf59bc9STejun Heo struct rb_node **p = &pcpu_addr_root.rb_node; 276fbf59bc9STejun Heo struct rb_node *parent = NULL; 277fbf59bc9STejun Heo struct pcpu_chunk *chunk; 278fbf59bc9STejun Heo 279fbf59bc9STejun Heo while (*p) { 280fbf59bc9STejun Heo parent = *p; 281fbf59bc9STejun Heo chunk = rb_entry(parent, struct pcpu_chunk, rb_node); 282fbf59bc9STejun Heo 283fbf59bc9STejun Heo if (addr < chunk->vm->addr) 284fbf59bc9STejun Heo p = &(*p)->rb_left; 285fbf59bc9STejun Heo else if (addr > chunk->vm->addr) 286fbf59bc9STejun Heo p = &(*p)->rb_right; 287fbf59bc9STejun Heo else 288fbf59bc9STejun Heo break; 289fbf59bc9STejun Heo } 290fbf59bc9STejun Heo 291fbf59bc9STejun Heo if (parentp) 292fbf59bc9STejun Heo *parentp = parent; 293fbf59bc9STejun Heo return p; 294fbf59bc9STejun Heo } 295fbf59bc9STejun Heo 296fbf59bc9STejun Heo /** 297fbf59bc9STejun Heo * pcpu_chunk_addr_search - search for chunk containing specified address 298fbf59bc9STejun Heo * @addr: address to search for 299fbf59bc9STejun Heo * 300fbf59bc9STejun Heo * Look for chunk which might contain @addr. More specifically, it 301fbf59bc9STejun Heo * searchs for the chunk with the highest start address which isn't 302fbf59bc9STejun Heo * beyond @addr. 303fbf59bc9STejun Heo * 304ccea34b5STejun Heo * CONTEXT: 305ccea34b5STejun Heo * pcpu_lock. 306ccea34b5STejun Heo * 307fbf59bc9STejun Heo * RETURNS: 308fbf59bc9STejun Heo * The address of the found chunk. 309fbf59bc9STejun Heo */ 310fbf59bc9STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 311fbf59bc9STejun Heo { 312*ae9e6bc9STejun Heo void *first_start = pcpu_first_chunk->vm->addr; 313fbf59bc9STejun Heo struct rb_node *n, *parent; 314fbf59bc9STejun Heo struct pcpu_chunk *chunk; 315fbf59bc9STejun Heo 316*ae9e6bc9STejun Heo /* is it in the first chunk? */ 317*ae9e6bc9STejun Heo if (addr >= first_start && addr < first_start + pcpu_chunk_size) { 318*ae9e6bc9STejun Heo /* is it in the reserved area? */ 319*ae9e6bc9STejun Heo if (addr < first_start + pcpu_reserved_chunk_limit) 320edcb4639STejun Heo return pcpu_reserved_chunk; 321*ae9e6bc9STejun Heo return pcpu_first_chunk; 322edcb4639STejun Heo } 323edcb4639STejun Heo 324edcb4639STejun Heo /* nah... search the regular ones */ 325fbf59bc9STejun Heo n = *pcpu_chunk_rb_search(addr, &parent); 326fbf59bc9STejun Heo if (!n) { 327fbf59bc9STejun Heo /* no exactly matching chunk, the parent is the closest */ 328fbf59bc9STejun Heo n = parent; 329fbf59bc9STejun Heo BUG_ON(!n); 330fbf59bc9STejun Heo } 331fbf59bc9STejun Heo chunk = rb_entry(n, struct pcpu_chunk, rb_node); 332fbf59bc9STejun Heo 333fbf59bc9STejun Heo if (addr < chunk->vm->addr) { 334fbf59bc9STejun Heo /* the parent was the next one, look for the previous one */ 335fbf59bc9STejun Heo n = rb_prev(n); 336fbf59bc9STejun Heo BUG_ON(!n); 337fbf59bc9STejun Heo chunk = rb_entry(n, struct pcpu_chunk, rb_node); 338fbf59bc9STejun Heo } 339fbf59bc9STejun Heo 340fbf59bc9STejun Heo return chunk; 341fbf59bc9STejun Heo } 342fbf59bc9STejun Heo 343fbf59bc9STejun Heo /** 344fbf59bc9STejun Heo * pcpu_chunk_addr_insert - insert chunk into address rb tree 345fbf59bc9STejun Heo * @new: chunk to insert 346fbf59bc9STejun Heo * 347fbf59bc9STejun Heo * Insert @new into address rb tree. 348ccea34b5STejun Heo * 349ccea34b5STejun Heo * CONTEXT: 350ccea34b5STejun Heo * pcpu_lock. 351fbf59bc9STejun Heo */ 352fbf59bc9STejun Heo static void pcpu_chunk_addr_insert(struct pcpu_chunk *new) 353fbf59bc9STejun Heo { 354fbf59bc9STejun Heo struct rb_node **p, *parent; 355fbf59bc9STejun Heo 356fbf59bc9STejun Heo p = pcpu_chunk_rb_search(new->vm->addr, &parent); 357fbf59bc9STejun Heo BUG_ON(*p); 358fbf59bc9STejun Heo rb_link_node(&new->rb_node, parent, p); 359fbf59bc9STejun Heo rb_insert_color(&new->rb_node, &pcpu_addr_root); 360fbf59bc9STejun Heo } 361fbf59bc9STejun Heo 362fbf59bc9STejun Heo /** 3639f7dcf22STejun Heo * pcpu_extend_area_map - extend area map for allocation 3649f7dcf22STejun Heo * @chunk: target chunk 3659f7dcf22STejun Heo * 3669f7dcf22STejun Heo * Extend area map of @chunk so that it can accomodate an allocation. 3679f7dcf22STejun Heo * A single allocation can split an area into three areas, so this 3689f7dcf22STejun Heo * function makes sure that @chunk->map has at least two extra slots. 3699f7dcf22STejun Heo * 370ccea34b5STejun Heo * CONTEXT: 371ccea34b5STejun Heo * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 372ccea34b5STejun Heo * if area map is extended. 373ccea34b5STejun Heo * 3749f7dcf22STejun Heo * RETURNS: 3759f7dcf22STejun Heo * 0 if noop, 1 if successfully extended, -errno on failure. 3769f7dcf22STejun Heo */ 3779f7dcf22STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk) 3789f7dcf22STejun Heo { 3799f7dcf22STejun Heo int new_alloc; 3809f7dcf22STejun Heo int *new; 3819f7dcf22STejun Heo size_t size; 3829f7dcf22STejun Heo 3839f7dcf22STejun Heo /* has enough? */ 3849f7dcf22STejun Heo if (chunk->map_alloc >= chunk->map_used + 2) 3859f7dcf22STejun Heo return 0; 3869f7dcf22STejun Heo 387ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 388ccea34b5STejun Heo 3899f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 3909f7dcf22STejun Heo while (new_alloc < chunk->map_used + 2) 3919f7dcf22STejun Heo new_alloc *= 2; 3929f7dcf22STejun Heo 3939f7dcf22STejun Heo new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 394ccea34b5STejun Heo if (!new) { 395ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 3969f7dcf22STejun Heo return -ENOMEM; 397ccea34b5STejun Heo } 398ccea34b5STejun Heo 399ccea34b5STejun Heo /* 400ccea34b5STejun Heo * Acquire pcpu_lock and switch to new area map. Only free 401ccea34b5STejun Heo * could have happened inbetween, so map_used couldn't have 402ccea34b5STejun Heo * grown. 403ccea34b5STejun Heo */ 404ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 405ccea34b5STejun Heo BUG_ON(new_alloc < chunk->map_used + 2); 4069f7dcf22STejun Heo 4079f7dcf22STejun Heo size = chunk->map_alloc * sizeof(chunk->map[0]); 4089f7dcf22STejun Heo memcpy(new, chunk->map, size); 4099f7dcf22STejun Heo 4109f7dcf22STejun Heo /* 4119f7dcf22STejun Heo * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 4129f7dcf22STejun Heo * one of the first chunks and still using static map. 4139f7dcf22STejun Heo */ 4149f7dcf22STejun Heo if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 4159f7dcf22STejun Heo pcpu_mem_free(chunk->map, size); 4169f7dcf22STejun Heo 4179f7dcf22STejun Heo chunk->map_alloc = new_alloc; 4189f7dcf22STejun Heo chunk->map = new; 4199f7dcf22STejun Heo return 0; 4209f7dcf22STejun Heo } 4219f7dcf22STejun Heo 4229f7dcf22STejun Heo /** 423fbf59bc9STejun Heo * pcpu_split_block - split a map block 424fbf59bc9STejun Heo * @chunk: chunk of interest 425fbf59bc9STejun Heo * @i: index of map block to split 426cae3aeb8STejun Heo * @head: head size in bytes (can be 0) 427cae3aeb8STejun Heo * @tail: tail size in bytes (can be 0) 428fbf59bc9STejun Heo * 429fbf59bc9STejun Heo * Split the @i'th map block into two or three blocks. If @head is 430fbf59bc9STejun Heo * non-zero, @head bytes block is inserted before block @i moving it 431fbf59bc9STejun Heo * to @i+1 and reducing its size by @head bytes. 432fbf59bc9STejun Heo * 433fbf59bc9STejun Heo * If @tail is non-zero, the target block, which can be @i or @i+1 434fbf59bc9STejun Heo * depending on @head, is reduced by @tail bytes and @tail byte block 435fbf59bc9STejun Heo * is inserted after the target block. 436fbf59bc9STejun Heo * 4379f7dcf22STejun Heo * @chunk->map must have enough free slots to accomodate the split. 438ccea34b5STejun Heo * 439ccea34b5STejun Heo * CONTEXT: 440ccea34b5STejun Heo * pcpu_lock. 441fbf59bc9STejun Heo */ 4429f7dcf22STejun Heo static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 4439f7dcf22STejun Heo int head, int tail) 444fbf59bc9STejun Heo { 445fbf59bc9STejun Heo int nr_extra = !!head + !!tail; 446fbf59bc9STejun Heo 4479f7dcf22STejun Heo BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 448fbf59bc9STejun Heo 4499f7dcf22STejun Heo /* insert new subblocks */ 450fbf59bc9STejun Heo memmove(&chunk->map[i + nr_extra], &chunk->map[i], 451fbf59bc9STejun Heo sizeof(chunk->map[0]) * (chunk->map_used - i)); 452fbf59bc9STejun Heo chunk->map_used += nr_extra; 453fbf59bc9STejun Heo 454fbf59bc9STejun Heo if (head) { 455fbf59bc9STejun Heo chunk->map[i + 1] = chunk->map[i] - head; 456fbf59bc9STejun Heo chunk->map[i++] = head; 457fbf59bc9STejun Heo } 458fbf59bc9STejun Heo if (tail) { 459fbf59bc9STejun Heo chunk->map[i++] -= tail; 460fbf59bc9STejun Heo chunk->map[i] = tail; 461fbf59bc9STejun Heo } 462fbf59bc9STejun Heo } 463fbf59bc9STejun Heo 464fbf59bc9STejun Heo /** 465fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 466fbf59bc9STejun Heo * @chunk: chunk of interest 467cae3aeb8STejun Heo * @size: wanted size in bytes 468fbf59bc9STejun Heo * @align: wanted align 469fbf59bc9STejun Heo * 470fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 471fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 472fbf59bc9STejun Heo * populate or map the area. 473fbf59bc9STejun Heo * 4749f7dcf22STejun Heo * @chunk->map must have at least two free slots. 4759f7dcf22STejun Heo * 476ccea34b5STejun Heo * CONTEXT: 477ccea34b5STejun Heo * pcpu_lock. 478ccea34b5STejun Heo * 479fbf59bc9STejun Heo * RETURNS: 4809f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 4819f7dcf22STejun Heo * found. 482fbf59bc9STejun Heo */ 483fbf59bc9STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 484fbf59bc9STejun Heo { 485fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 486fbf59bc9STejun Heo int max_contig = 0; 487fbf59bc9STejun Heo int i, off; 488fbf59bc9STejun Heo 489fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 490fbf59bc9STejun Heo bool is_last = i + 1 == chunk->map_used; 491fbf59bc9STejun Heo int head, tail; 492fbf59bc9STejun Heo 493fbf59bc9STejun Heo /* extra for alignment requirement */ 494fbf59bc9STejun Heo head = ALIGN(off, align) - off; 495fbf59bc9STejun Heo BUG_ON(i == 0 && head != 0); 496fbf59bc9STejun Heo 497fbf59bc9STejun Heo if (chunk->map[i] < 0) 498fbf59bc9STejun Heo continue; 499fbf59bc9STejun Heo if (chunk->map[i] < head + size) { 500fbf59bc9STejun Heo max_contig = max(chunk->map[i], max_contig); 501fbf59bc9STejun Heo continue; 502fbf59bc9STejun Heo } 503fbf59bc9STejun Heo 504fbf59bc9STejun Heo /* 505fbf59bc9STejun Heo * If head is small or the previous block is free, 506fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 507fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 508fbf59bc9STejun Heo * uncommon for percpu allocations. 509fbf59bc9STejun Heo */ 510fbf59bc9STejun Heo if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 511fbf59bc9STejun Heo if (chunk->map[i - 1] > 0) 512fbf59bc9STejun Heo chunk->map[i - 1] += head; 513fbf59bc9STejun Heo else { 514fbf59bc9STejun Heo chunk->map[i - 1] -= head; 515fbf59bc9STejun Heo chunk->free_size -= head; 516fbf59bc9STejun Heo } 517fbf59bc9STejun Heo chunk->map[i] -= head; 518fbf59bc9STejun Heo off += head; 519fbf59bc9STejun Heo head = 0; 520fbf59bc9STejun Heo } 521fbf59bc9STejun Heo 522fbf59bc9STejun Heo /* if tail is small, just keep it around */ 523fbf59bc9STejun Heo tail = chunk->map[i] - head - size; 524fbf59bc9STejun Heo if (tail < sizeof(int)) 525fbf59bc9STejun Heo tail = 0; 526fbf59bc9STejun Heo 527fbf59bc9STejun Heo /* split if warranted */ 528fbf59bc9STejun Heo if (head || tail) { 5299f7dcf22STejun Heo pcpu_split_block(chunk, i, head, tail); 530fbf59bc9STejun Heo if (head) { 531fbf59bc9STejun Heo i++; 532fbf59bc9STejun Heo off += head; 533fbf59bc9STejun Heo max_contig = max(chunk->map[i - 1], max_contig); 534fbf59bc9STejun Heo } 535fbf59bc9STejun Heo if (tail) 536fbf59bc9STejun Heo max_contig = max(chunk->map[i + 1], max_contig); 537fbf59bc9STejun Heo } 538fbf59bc9STejun Heo 539fbf59bc9STejun Heo /* update hint and mark allocated */ 540fbf59bc9STejun Heo if (is_last) 541fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 542fbf59bc9STejun Heo else 543fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 544fbf59bc9STejun Heo max_contig); 545fbf59bc9STejun Heo 546fbf59bc9STejun Heo chunk->free_size -= chunk->map[i]; 547fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 548fbf59bc9STejun Heo 549fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 550fbf59bc9STejun Heo return off; 551fbf59bc9STejun Heo } 552fbf59bc9STejun Heo 553fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 554fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 555fbf59bc9STejun Heo 5569f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 5579f7dcf22STejun Heo return -1; 558fbf59bc9STejun Heo } 559fbf59bc9STejun Heo 560fbf59bc9STejun Heo /** 561fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 562fbf59bc9STejun Heo * @chunk: chunk of interest 563fbf59bc9STejun Heo * @freeme: offset of area to free 564fbf59bc9STejun Heo * 565fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 566fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 567fbf59bc9STejun Heo * the area. 568ccea34b5STejun Heo * 569ccea34b5STejun Heo * CONTEXT: 570ccea34b5STejun Heo * pcpu_lock. 571fbf59bc9STejun Heo */ 572fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 573fbf59bc9STejun Heo { 574fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 575fbf59bc9STejun Heo int i, off; 576fbf59bc9STejun Heo 577fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 578fbf59bc9STejun Heo if (off == freeme) 579fbf59bc9STejun Heo break; 580fbf59bc9STejun Heo BUG_ON(off != freeme); 581fbf59bc9STejun Heo BUG_ON(chunk->map[i] > 0); 582fbf59bc9STejun Heo 583fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 584fbf59bc9STejun Heo chunk->free_size += chunk->map[i]; 585fbf59bc9STejun Heo 586fbf59bc9STejun Heo /* merge with previous? */ 587fbf59bc9STejun Heo if (i > 0 && chunk->map[i - 1] >= 0) { 588fbf59bc9STejun Heo chunk->map[i - 1] += chunk->map[i]; 589fbf59bc9STejun Heo chunk->map_used--; 590fbf59bc9STejun Heo memmove(&chunk->map[i], &chunk->map[i + 1], 591fbf59bc9STejun Heo (chunk->map_used - i) * sizeof(chunk->map[0])); 592fbf59bc9STejun Heo i--; 593fbf59bc9STejun Heo } 594fbf59bc9STejun Heo /* merge with next? */ 595fbf59bc9STejun Heo if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 596fbf59bc9STejun Heo chunk->map[i] += chunk->map[i + 1]; 597fbf59bc9STejun Heo chunk->map_used--; 598fbf59bc9STejun Heo memmove(&chunk->map[i + 1], &chunk->map[i + 2], 599fbf59bc9STejun Heo (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 600fbf59bc9STejun Heo } 601fbf59bc9STejun Heo 602fbf59bc9STejun Heo chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 603fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 604fbf59bc9STejun Heo } 605fbf59bc9STejun Heo 606fbf59bc9STejun Heo /** 607fbf59bc9STejun Heo * pcpu_unmap - unmap pages out of a pcpu_chunk 608fbf59bc9STejun Heo * @chunk: chunk of interest 609fbf59bc9STejun Heo * @page_start: page index of the first page to unmap 610fbf59bc9STejun Heo * @page_end: page index of the last page to unmap + 1 611fbf59bc9STejun Heo * @flush: whether to flush cache and tlb or not 612fbf59bc9STejun Heo * 613fbf59bc9STejun Heo * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 614fbf59bc9STejun Heo * If @flush is true, vcache is flushed before unmapping and tlb 615fbf59bc9STejun Heo * after. 616fbf59bc9STejun Heo */ 617fbf59bc9STejun Heo static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, 618fbf59bc9STejun Heo bool flush) 619fbf59bc9STejun Heo { 620fbf59bc9STejun Heo unsigned int last = num_possible_cpus() - 1; 621fbf59bc9STejun Heo unsigned int cpu; 622fbf59bc9STejun Heo 6238d408b4bSTejun Heo /* unmap must not be done on immutable chunk */ 6248d408b4bSTejun Heo WARN_ON(chunk->immutable); 6258d408b4bSTejun Heo 626fbf59bc9STejun Heo /* 627fbf59bc9STejun Heo * Each flushing trial can be very expensive, issue flush on 628fbf59bc9STejun Heo * the whole region at once rather than doing it for each cpu. 629fbf59bc9STejun Heo * This could be an overkill but is more scalable. 630fbf59bc9STejun Heo */ 631fbf59bc9STejun Heo if (flush) 632fbf59bc9STejun Heo flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), 633fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 634fbf59bc9STejun Heo 635fbf59bc9STejun Heo for_each_possible_cpu(cpu) 636fbf59bc9STejun Heo unmap_kernel_range_noflush( 637fbf59bc9STejun Heo pcpu_chunk_addr(chunk, cpu, page_start), 638fbf59bc9STejun Heo (page_end - page_start) << PAGE_SHIFT); 639fbf59bc9STejun Heo 640fbf59bc9STejun Heo /* ditto as flush_cache_vunmap() */ 641fbf59bc9STejun Heo if (flush) 642fbf59bc9STejun Heo flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), 643fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 644fbf59bc9STejun Heo } 645fbf59bc9STejun Heo 646fbf59bc9STejun Heo /** 647fbf59bc9STejun Heo * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 648fbf59bc9STejun Heo * @chunk: chunk to depopulate 649fbf59bc9STejun Heo * @off: offset to the area to depopulate 650cae3aeb8STejun Heo * @size: size of the area to depopulate in bytes 651fbf59bc9STejun Heo * @flush: whether to flush cache and tlb or not 652fbf59bc9STejun Heo * 653fbf59bc9STejun Heo * For each cpu, depopulate and unmap pages [@page_start,@page_end) 654fbf59bc9STejun Heo * from @chunk. If @flush is true, vcache is flushed before unmapping 655fbf59bc9STejun Heo * and tlb after. 656ccea34b5STejun Heo * 657ccea34b5STejun Heo * CONTEXT: 658ccea34b5STejun Heo * pcpu_alloc_mutex. 659fbf59bc9STejun Heo */ 660cae3aeb8STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size, 661cae3aeb8STejun Heo bool flush) 662fbf59bc9STejun Heo { 663fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 664fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 665fbf59bc9STejun Heo int unmap_start = -1; 666fbf59bc9STejun Heo int uninitialized_var(unmap_end); 667fbf59bc9STejun Heo unsigned int cpu; 668fbf59bc9STejun Heo int i; 669fbf59bc9STejun Heo 670fbf59bc9STejun Heo for (i = page_start; i < page_end; i++) { 671fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 672fbf59bc9STejun Heo struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); 673fbf59bc9STejun Heo 674fbf59bc9STejun Heo if (!*pagep) 675fbf59bc9STejun Heo continue; 676fbf59bc9STejun Heo 677fbf59bc9STejun Heo __free_page(*pagep); 678fbf59bc9STejun Heo 679fbf59bc9STejun Heo /* 680fbf59bc9STejun Heo * If it's partial depopulation, it might get 681fbf59bc9STejun Heo * populated or depopulated again. Mark the 682fbf59bc9STejun Heo * page gone. 683fbf59bc9STejun Heo */ 684fbf59bc9STejun Heo *pagep = NULL; 685fbf59bc9STejun Heo 686fbf59bc9STejun Heo unmap_start = unmap_start < 0 ? i : unmap_start; 687fbf59bc9STejun Heo unmap_end = i + 1; 688fbf59bc9STejun Heo } 689fbf59bc9STejun Heo } 690fbf59bc9STejun Heo 691fbf59bc9STejun Heo if (unmap_start >= 0) 692fbf59bc9STejun Heo pcpu_unmap(chunk, unmap_start, unmap_end, flush); 693fbf59bc9STejun Heo } 694fbf59bc9STejun Heo 695fbf59bc9STejun Heo /** 696fbf59bc9STejun Heo * pcpu_map - map pages into a pcpu_chunk 697fbf59bc9STejun Heo * @chunk: chunk of interest 698fbf59bc9STejun Heo * @page_start: page index of the first page to map 699fbf59bc9STejun Heo * @page_end: page index of the last page to map + 1 700fbf59bc9STejun Heo * 701fbf59bc9STejun Heo * For each cpu, map pages [@page_start,@page_end) into @chunk. 702fbf59bc9STejun Heo * vcache is flushed afterwards. 703fbf59bc9STejun Heo */ 704fbf59bc9STejun Heo static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) 705fbf59bc9STejun Heo { 706fbf59bc9STejun Heo unsigned int last = num_possible_cpus() - 1; 707fbf59bc9STejun Heo unsigned int cpu; 708fbf59bc9STejun Heo int err; 709fbf59bc9STejun Heo 7108d408b4bSTejun Heo /* map must not be done on immutable chunk */ 7118d408b4bSTejun Heo WARN_ON(chunk->immutable); 7128d408b4bSTejun Heo 713fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 714fbf59bc9STejun Heo err = map_kernel_range_noflush( 715fbf59bc9STejun Heo pcpu_chunk_addr(chunk, cpu, page_start), 716fbf59bc9STejun Heo (page_end - page_start) << PAGE_SHIFT, 717fbf59bc9STejun Heo PAGE_KERNEL, 718fbf59bc9STejun Heo pcpu_chunk_pagep(chunk, cpu, page_start)); 719fbf59bc9STejun Heo if (err < 0) 720fbf59bc9STejun Heo return err; 721fbf59bc9STejun Heo } 722fbf59bc9STejun Heo 723fbf59bc9STejun Heo /* flush at once, please read comments in pcpu_unmap() */ 724fbf59bc9STejun Heo flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start), 725fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 726fbf59bc9STejun Heo return 0; 727fbf59bc9STejun Heo } 728fbf59bc9STejun Heo 729fbf59bc9STejun Heo /** 730fbf59bc9STejun Heo * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 731fbf59bc9STejun Heo * @chunk: chunk of interest 732fbf59bc9STejun Heo * @off: offset to the area to populate 733cae3aeb8STejun Heo * @size: size of the area to populate in bytes 734fbf59bc9STejun Heo * 735fbf59bc9STejun Heo * For each cpu, populate and map pages [@page_start,@page_end) into 736fbf59bc9STejun Heo * @chunk. The area is cleared on return. 737ccea34b5STejun Heo * 738ccea34b5STejun Heo * CONTEXT: 739ccea34b5STejun Heo * pcpu_alloc_mutex, does GFP_KERNEL allocation. 740fbf59bc9STejun Heo */ 741fbf59bc9STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 742fbf59bc9STejun Heo { 743fbf59bc9STejun Heo const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 744fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 745fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 746fbf59bc9STejun Heo int map_start = -1; 74702d51fdfSTejun Heo int uninitialized_var(map_end); 748fbf59bc9STejun Heo unsigned int cpu; 749fbf59bc9STejun Heo int i; 750fbf59bc9STejun Heo 751fbf59bc9STejun Heo for (i = page_start; i < page_end; i++) { 752fbf59bc9STejun Heo if (pcpu_chunk_page_occupied(chunk, i)) { 753fbf59bc9STejun Heo if (map_start >= 0) { 754fbf59bc9STejun Heo if (pcpu_map(chunk, map_start, map_end)) 755fbf59bc9STejun Heo goto err; 756fbf59bc9STejun Heo map_start = -1; 757fbf59bc9STejun Heo } 758fbf59bc9STejun Heo continue; 759fbf59bc9STejun Heo } 760fbf59bc9STejun Heo 761fbf59bc9STejun Heo map_start = map_start < 0 ? i : map_start; 762fbf59bc9STejun Heo map_end = i + 1; 763fbf59bc9STejun Heo 764fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 765fbf59bc9STejun Heo struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); 766fbf59bc9STejun Heo 767fbf59bc9STejun Heo *pagep = alloc_pages_node(cpu_to_node(cpu), 768fbf59bc9STejun Heo alloc_mask, 0); 769fbf59bc9STejun Heo if (!*pagep) 770fbf59bc9STejun Heo goto err; 771fbf59bc9STejun Heo } 772fbf59bc9STejun Heo } 773fbf59bc9STejun Heo 774fbf59bc9STejun Heo if (map_start >= 0 && pcpu_map(chunk, map_start, map_end)) 775fbf59bc9STejun Heo goto err; 776fbf59bc9STejun Heo 777fbf59bc9STejun Heo for_each_possible_cpu(cpu) 778d9b55eebSTejun Heo memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0, 779fbf59bc9STejun Heo size); 780fbf59bc9STejun Heo 781fbf59bc9STejun Heo return 0; 782fbf59bc9STejun Heo err: 783fbf59bc9STejun Heo /* likely under heavy memory pressure, give memory back */ 784fbf59bc9STejun Heo pcpu_depopulate_chunk(chunk, off, size, true); 785fbf59bc9STejun Heo return -ENOMEM; 786fbf59bc9STejun Heo } 787fbf59bc9STejun Heo 788fbf59bc9STejun Heo static void free_pcpu_chunk(struct pcpu_chunk *chunk) 789fbf59bc9STejun Heo { 790fbf59bc9STejun Heo if (!chunk) 791fbf59bc9STejun Heo return; 792fbf59bc9STejun Heo if (chunk->vm) 793fbf59bc9STejun Heo free_vm_area(chunk->vm); 7941880d93bSTejun Heo pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 795fbf59bc9STejun Heo kfree(chunk); 796fbf59bc9STejun Heo } 797fbf59bc9STejun Heo 798fbf59bc9STejun Heo static struct pcpu_chunk *alloc_pcpu_chunk(void) 799fbf59bc9STejun Heo { 800fbf59bc9STejun Heo struct pcpu_chunk *chunk; 801fbf59bc9STejun Heo 802fbf59bc9STejun Heo chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 803fbf59bc9STejun Heo if (!chunk) 804fbf59bc9STejun Heo return NULL; 805fbf59bc9STejun Heo 8061880d93bSTejun Heo chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 807fbf59bc9STejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 808fbf59bc9STejun Heo chunk->map[chunk->map_used++] = pcpu_unit_size; 8093e24aa58STejun Heo chunk->page = chunk->page_ar; 810fbf59bc9STejun Heo 811fbf59bc9STejun Heo chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); 812fbf59bc9STejun Heo if (!chunk->vm) { 813fbf59bc9STejun Heo free_pcpu_chunk(chunk); 814fbf59bc9STejun Heo return NULL; 815fbf59bc9STejun Heo } 816fbf59bc9STejun Heo 817fbf59bc9STejun Heo INIT_LIST_HEAD(&chunk->list); 818fbf59bc9STejun Heo chunk->free_size = pcpu_unit_size; 819fbf59bc9STejun Heo chunk->contig_hint = pcpu_unit_size; 820fbf59bc9STejun Heo 821fbf59bc9STejun Heo return chunk; 822fbf59bc9STejun Heo } 823fbf59bc9STejun Heo 824fbf59bc9STejun Heo /** 825edcb4639STejun Heo * pcpu_alloc - the percpu allocator 826cae3aeb8STejun Heo * @size: size of area to allocate in bytes 827fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 828edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 829fbf59bc9STejun Heo * 830ccea34b5STejun Heo * Allocate percpu area of @size bytes aligned at @align. 831ccea34b5STejun Heo * 832ccea34b5STejun Heo * CONTEXT: 833ccea34b5STejun Heo * Does GFP_KERNEL allocation. 834fbf59bc9STejun Heo * 835fbf59bc9STejun Heo * RETURNS: 836fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 837fbf59bc9STejun Heo */ 838edcb4639STejun Heo static void *pcpu_alloc(size_t size, size_t align, bool reserved) 839fbf59bc9STejun Heo { 840fbf59bc9STejun Heo struct pcpu_chunk *chunk; 841fbf59bc9STejun Heo int slot, off; 842fbf59bc9STejun Heo 8438d408b4bSTejun Heo if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 844fbf59bc9STejun Heo WARN(true, "illegal size (%zu) or align (%zu) for " 845fbf59bc9STejun Heo "percpu allocation\n", size, align); 846fbf59bc9STejun Heo return NULL; 847fbf59bc9STejun Heo } 848fbf59bc9STejun Heo 849ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 850ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 851fbf59bc9STejun Heo 852edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 853edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 854edcb4639STejun Heo chunk = pcpu_reserved_chunk; 8559f7dcf22STejun Heo if (size > chunk->contig_hint || 8569f7dcf22STejun Heo pcpu_extend_area_map(chunk) < 0) 857ccea34b5STejun Heo goto fail_unlock; 858edcb4639STejun Heo off = pcpu_alloc_area(chunk, size, align); 859edcb4639STejun Heo if (off >= 0) 860edcb4639STejun Heo goto area_found; 861ccea34b5STejun Heo goto fail_unlock; 862edcb4639STejun Heo } 863edcb4639STejun Heo 864ccea34b5STejun Heo restart: 865edcb4639STejun Heo /* search through normal chunks */ 866fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 867fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 868fbf59bc9STejun Heo if (size > chunk->contig_hint) 869fbf59bc9STejun Heo continue; 870ccea34b5STejun Heo 871ccea34b5STejun Heo switch (pcpu_extend_area_map(chunk)) { 872ccea34b5STejun Heo case 0: 873ccea34b5STejun Heo break; 874ccea34b5STejun Heo case 1: 875ccea34b5STejun Heo goto restart; /* pcpu_lock dropped, restart */ 876ccea34b5STejun Heo default: 877ccea34b5STejun Heo goto fail_unlock; 878ccea34b5STejun Heo } 879ccea34b5STejun Heo 880fbf59bc9STejun Heo off = pcpu_alloc_area(chunk, size, align); 881fbf59bc9STejun Heo if (off >= 0) 882fbf59bc9STejun Heo goto area_found; 883fbf59bc9STejun Heo } 884fbf59bc9STejun Heo } 885fbf59bc9STejun Heo 886fbf59bc9STejun Heo /* hmmm... no space left, create a new chunk */ 887ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 888ccea34b5STejun Heo 889fbf59bc9STejun Heo chunk = alloc_pcpu_chunk(); 890fbf59bc9STejun Heo if (!chunk) 891ccea34b5STejun Heo goto fail_unlock_mutex; 892ccea34b5STejun Heo 893ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 894fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 895fbf59bc9STejun Heo pcpu_chunk_addr_insert(chunk); 896ccea34b5STejun Heo goto restart; 897fbf59bc9STejun Heo 898fbf59bc9STejun Heo area_found: 899ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 900ccea34b5STejun Heo 901fbf59bc9STejun Heo /* populate, map and clear the area */ 902fbf59bc9STejun Heo if (pcpu_populate_chunk(chunk, off, size)) { 903ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 904fbf59bc9STejun Heo pcpu_free_area(chunk, off); 905ccea34b5STejun Heo goto fail_unlock; 906fbf59bc9STejun Heo } 907fbf59bc9STejun Heo 908ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 909ccea34b5STejun Heo 910ccea34b5STejun Heo return __addr_to_pcpu_ptr(chunk->vm->addr + off); 911ccea34b5STejun Heo 912ccea34b5STejun Heo fail_unlock: 913ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 914ccea34b5STejun Heo fail_unlock_mutex: 915ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 916ccea34b5STejun Heo return NULL; 917fbf59bc9STejun Heo } 918edcb4639STejun Heo 919edcb4639STejun Heo /** 920edcb4639STejun Heo * __alloc_percpu - allocate dynamic percpu area 921edcb4639STejun Heo * @size: size of area to allocate in bytes 922edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 923edcb4639STejun Heo * 924edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align. Might 925edcb4639STejun Heo * sleep. Might trigger writeouts. 926edcb4639STejun Heo * 927ccea34b5STejun Heo * CONTEXT: 928ccea34b5STejun Heo * Does GFP_KERNEL allocation. 929ccea34b5STejun Heo * 930edcb4639STejun Heo * RETURNS: 931edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 932edcb4639STejun Heo */ 933edcb4639STejun Heo void *__alloc_percpu(size_t size, size_t align) 934edcb4639STejun Heo { 935edcb4639STejun Heo return pcpu_alloc(size, align, false); 936edcb4639STejun Heo } 937fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 938fbf59bc9STejun Heo 939edcb4639STejun Heo /** 940edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 941edcb4639STejun Heo * @size: size of area to allocate in bytes 942edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 943edcb4639STejun Heo * 944edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align from reserved 945edcb4639STejun Heo * percpu area if arch has set it up; otherwise, allocation is served 946edcb4639STejun Heo * from the same dynamic area. Might sleep. Might trigger writeouts. 947edcb4639STejun Heo * 948ccea34b5STejun Heo * CONTEXT: 949ccea34b5STejun Heo * Does GFP_KERNEL allocation. 950ccea34b5STejun Heo * 951edcb4639STejun Heo * RETURNS: 952edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 953edcb4639STejun Heo */ 954edcb4639STejun Heo void *__alloc_reserved_percpu(size_t size, size_t align) 955edcb4639STejun Heo { 956edcb4639STejun Heo return pcpu_alloc(size, align, true); 957edcb4639STejun Heo } 958edcb4639STejun Heo 959a56dbddfSTejun Heo /** 960a56dbddfSTejun Heo * pcpu_reclaim - reclaim fully free chunks, workqueue function 961a56dbddfSTejun Heo * @work: unused 962a56dbddfSTejun Heo * 963a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 964ccea34b5STejun Heo * 965ccea34b5STejun Heo * CONTEXT: 966ccea34b5STejun Heo * workqueue context. 967a56dbddfSTejun Heo */ 968a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work) 969fbf59bc9STejun Heo { 970a56dbddfSTejun Heo LIST_HEAD(todo); 971a56dbddfSTejun Heo struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 972a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 973a56dbddfSTejun Heo 974ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 975ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 976a56dbddfSTejun Heo 977a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, head, list) { 9788d408b4bSTejun Heo WARN_ON(chunk->immutable); 979a56dbddfSTejun Heo 980a56dbddfSTejun Heo /* spare the first one */ 981a56dbddfSTejun Heo if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 982a56dbddfSTejun Heo continue; 983a56dbddfSTejun Heo 984fbf59bc9STejun Heo rb_erase(&chunk->rb_node, &pcpu_addr_root); 985a56dbddfSTejun Heo list_move(&chunk->list, &todo); 986a56dbddfSTejun Heo } 987a56dbddfSTejun Heo 988ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 989ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 990a56dbddfSTejun Heo 991a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, &todo, list) { 992a56dbddfSTejun Heo pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); 993fbf59bc9STejun Heo free_pcpu_chunk(chunk); 994fbf59bc9STejun Heo } 995a56dbddfSTejun Heo } 996fbf59bc9STejun Heo 997fbf59bc9STejun Heo /** 998fbf59bc9STejun Heo * free_percpu - free percpu area 999fbf59bc9STejun Heo * @ptr: pointer to area to free 1000fbf59bc9STejun Heo * 1001ccea34b5STejun Heo * Free percpu area @ptr. 1002ccea34b5STejun Heo * 1003ccea34b5STejun Heo * CONTEXT: 1004ccea34b5STejun Heo * Can be called from atomic context. 1005fbf59bc9STejun Heo */ 1006fbf59bc9STejun Heo void free_percpu(void *ptr) 1007fbf59bc9STejun Heo { 1008fbf59bc9STejun Heo void *addr = __pcpu_ptr_to_addr(ptr); 1009fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1010ccea34b5STejun Heo unsigned long flags; 1011fbf59bc9STejun Heo int off; 1012fbf59bc9STejun Heo 1013fbf59bc9STejun Heo if (!ptr) 1014fbf59bc9STejun Heo return; 1015fbf59bc9STejun Heo 1016ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1017fbf59bc9STejun Heo 1018fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1019fbf59bc9STejun Heo off = addr - chunk->vm->addr; 1020fbf59bc9STejun Heo 1021fbf59bc9STejun Heo pcpu_free_area(chunk, off); 1022fbf59bc9STejun Heo 1023a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 1024fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 1025fbf59bc9STejun Heo struct pcpu_chunk *pos; 1026fbf59bc9STejun Heo 1027a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1028fbf59bc9STejun Heo if (pos != chunk) { 1029a56dbddfSTejun Heo schedule_work(&pcpu_reclaim_work); 1030fbf59bc9STejun Heo break; 1031fbf59bc9STejun Heo } 1032fbf59bc9STejun Heo } 1033fbf59bc9STejun Heo 1034ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1035fbf59bc9STejun Heo } 1036fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1037fbf59bc9STejun Heo 1038fbf59bc9STejun Heo /** 10398d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 10408d408b4bSTejun Heo * @get_page_fn: callback to fetch page pointer 10418d408b4bSTejun Heo * @static_size: the size of static percpu area in bytes 1042edcb4639STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1043cafe8816STejun Heo * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 10446074d5b0STejun Heo * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto 10458d408b4bSTejun Heo * @base_addr: mapped address, NULL for auto 10468d408b4bSTejun Heo * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary 1047fbf59bc9STejun Heo * 10488d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 10498d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 10508d408b4bSTejun Heo * setup path. The first two parameters are mandatory. The rest are 10518d408b4bSTejun Heo * optional. 10528d408b4bSTejun Heo * 10538d408b4bSTejun Heo * @get_page_fn() should return pointer to percpu page given cpu 10548d408b4bSTejun Heo * number and page number. It should at least return enough pages to 10558d408b4bSTejun Heo * cover the static area. The returned pages for static area should 10568d408b4bSTejun Heo * have been initialized with valid data. If @unit_size is specified, 10578d408b4bSTejun Heo * it can also return pages after the static area. NULL return 10588d408b4bSTejun Heo * indicates end of pages for the cpu. Note that @get_page_fn() must 10598d408b4bSTejun Heo * return the same number of pages for all cpus. 10608d408b4bSTejun Heo * 1061edcb4639STejun Heo * @reserved_size, if non-zero, specifies the amount of bytes to 1062edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1063edcb4639STejun Heo * the first chunk such that it's available only through reserved 1064edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1065edcb4639STejun Heo * static areas on architectures where the addressing model has 1066edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1067edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1068edcb4639STejun Heo * 10696074d5b0STejun Heo * @dyn_size, if non-negative, determines the number of bytes 10706074d5b0STejun Heo * available for dynamic allocation in the first chunk. Specifying 10716074d5b0STejun Heo * non-negative value makes percpu leave alone the area beyond 10726074d5b0STejun Heo * @static_size + @reserved_size + @dyn_size. 10736074d5b0STejun Heo * 1074cafe8816STejun Heo * @unit_size, if non-negative, specifies unit size and must be 1075cafe8816STejun Heo * aligned to PAGE_SIZE and equal to or larger than @static_size + 10766074d5b0STejun Heo * @reserved_size + if non-negative, @dyn_size. 10778d408b4bSTejun Heo * 10788d408b4bSTejun Heo * Non-null @base_addr means that the caller already allocated virtual 10798d408b4bSTejun Heo * region for the first chunk and mapped it. percpu must not mess 10808d408b4bSTejun Heo * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL 10818d408b4bSTejun Heo * @populate_pte_fn doesn't make any sense. 10828d408b4bSTejun Heo * 10838d408b4bSTejun Heo * @populate_pte_fn is used to populate the pagetable. NULL means the 10848d408b4bSTejun Heo * caller already populated the pagetable. 1085fbf59bc9STejun Heo * 1086edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1087edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1088edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1089edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1090edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1091edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1092edcb4639STejun Heo * 1093fbf59bc9STejun Heo * RETURNS: 1094fbf59bc9STejun Heo * The determined pcpu_unit_size which can be used to initialize 1095fbf59bc9STejun Heo * percpu access. 1096fbf59bc9STejun Heo */ 10978d408b4bSTejun Heo size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 1098edcb4639STejun Heo size_t static_size, size_t reserved_size, 10996074d5b0STejun Heo ssize_t dyn_size, ssize_t unit_size, 1100cafe8816STejun Heo void *base_addr, 11018d408b4bSTejun Heo pcpu_populate_pte_fn_t populate_pte_fn) 1102fbf59bc9STejun Heo { 11032441d15cSTejun Heo static struct vm_struct first_vm; 1104edcb4639STejun Heo static int smap[2], dmap[2]; 11056074d5b0STejun Heo size_t size_sum = static_size + reserved_size + 11066074d5b0STejun Heo (dyn_size >= 0 ? dyn_size : 0); 1107edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 1108fbf59bc9STejun Heo unsigned int cpu; 11098d408b4bSTejun Heo int nr_pages; 1110fbf59bc9STejun Heo int err, i; 1111fbf59bc9STejun Heo 11128d408b4bSTejun Heo /* santiy checks */ 1113edcb4639STejun Heo BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1114edcb4639STejun Heo ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 11158d408b4bSTejun Heo BUG_ON(!static_size); 1116cafe8816STejun Heo if (unit_size >= 0) { 11176074d5b0STejun Heo BUG_ON(unit_size < size_sum); 11188d408b4bSTejun Heo BUG_ON(unit_size & ~PAGE_MASK); 11196074d5b0STejun Heo BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE); 11206074d5b0STejun Heo } else 1121cafe8816STejun Heo BUG_ON(base_addr); 11228d408b4bSTejun Heo BUG_ON(base_addr && populate_pte_fn); 1123fbf59bc9STejun Heo 1124cafe8816STejun Heo if (unit_size >= 0) 11258d408b4bSTejun Heo pcpu_unit_pages = unit_size >> PAGE_SHIFT; 11268d408b4bSTejun Heo else 11278d408b4bSTejun Heo pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, 11286074d5b0STejun Heo PFN_UP(size_sum)); 11298d408b4bSTejun Heo 1130d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1131fbf59bc9STejun Heo pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; 1132fbf59bc9STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) 1133cb83b42eSTejun Heo + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); 1134fbf59bc9STejun Heo 1135cafe8816STejun Heo if (dyn_size < 0) 1136edcb4639STejun Heo dyn_size = pcpu_unit_size - static_size - reserved_size; 1137cafe8816STejun Heo 1138d9b55eebSTejun Heo /* 1139d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1140d9b55eebSTejun Heo * empty chunks. 1141d9b55eebSTejun Heo */ 1142d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1143fbf59bc9STejun Heo pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1144fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1145fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1146fbf59bc9STejun Heo 1147edcb4639STejun Heo /* 1148edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1149edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1150edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1151edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1152edcb4639STejun Heo * static percpu allocation). 1153edcb4639STejun Heo */ 11542441d15cSTejun Heo schunk = alloc_bootmem(pcpu_chunk_struct_size); 11552441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 11562441d15cSTejun Heo schunk->vm = &first_vm; 115761ace7faSTejun Heo schunk->map = smap; 115861ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 11593e24aa58STejun Heo schunk->page = schunk->page_ar; 1160edcb4639STejun Heo 1161edcb4639STejun Heo if (reserved_size) { 1162edcb4639STejun Heo schunk->free_size = reserved_size; 1163*ae9e6bc9STejun Heo pcpu_reserved_chunk = schunk; 1164*ae9e6bc9STejun Heo pcpu_reserved_chunk_limit = static_size + reserved_size; 1165edcb4639STejun Heo } else { 11662441d15cSTejun Heo schunk->free_size = dyn_size; 1167edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1168edcb4639STejun Heo } 11692441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1170fbf59bc9STejun Heo 117161ace7faSTejun Heo schunk->map[schunk->map_used++] = -static_size; 117261ace7faSTejun Heo if (schunk->free_size) 117361ace7faSTejun Heo schunk->map[schunk->map_used++] = schunk->free_size; 117461ace7faSTejun Heo 1175edcb4639STejun Heo /* init dynamic chunk if necessary */ 1176edcb4639STejun Heo if (dyn_size) { 1177edcb4639STejun Heo dchunk = alloc_bootmem(sizeof(struct pcpu_chunk)); 1178edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 1179edcb4639STejun Heo dchunk->vm = &first_vm; 1180edcb4639STejun Heo dchunk->map = dmap; 1181edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 1182edcb4639STejun Heo dchunk->page = schunk->page_ar; /* share page map with schunk */ 1183edcb4639STejun Heo 1184edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1185edcb4639STejun Heo dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1186edcb4639STejun Heo dchunk->map[dchunk->map_used++] = dchunk->free_size; 1187edcb4639STejun Heo } 1188edcb4639STejun Heo 11898d408b4bSTejun Heo /* allocate vm address */ 11902441d15cSTejun Heo first_vm.flags = VM_ALLOC; 11912441d15cSTejun Heo first_vm.size = pcpu_chunk_size; 11928d408b4bSTejun Heo 11938d408b4bSTejun Heo if (!base_addr) 11942441d15cSTejun Heo vm_area_register_early(&first_vm, PAGE_SIZE); 11958d408b4bSTejun Heo else { 11968d408b4bSTejun Heo /* 11978d408b4bSTejun Heo * Pages already mapped. No need to remap into 1198edcb4639STejun Heo * vmalloc area. In this case the first chunks can't 1199edcb4639STejun Heo * be mapped or unmapped by percpu and are marked 12008d408b4bSTejun Heo * immutable. 12018d408b4bSTejun Heo */ 12022441d15cSTejun Heo first_vm.addr = base_addr; 12032441d15cSTejun Heo schunk->immutable = true; 1204edcb4639STejun Heo if (dchunk) 1205edcb4639STejun Heo dchunk->immutable = true; 1206fbf59bc9STejun Heo } 1207fbf59bc9STejun Heo 12088d408b4bSTejun Heo /* assign pages */ 12098d408b4bSTejun Heo nr_pages = -1; 12108d408b4bSTejun Heo for_each_possible_cpu(cpu) { 12118d408b4bSTejun Heo for (i = 0; i < pcpu_unit_pages; i++) { 12128d408b4bSTejun Heo struct page *page = get_page_fn(cpu, i); 12138d408b4bSTejun Heo 12148d408b4bSTejun Heo if (!page) 12158d408b4bSTejun Heo break; 12162441d15cSTejun Heo *pcpu_chunk_pagep(schunk, cpu, i) = page; 12178d408b4bSTejun Heo } 12188d408b4bSTejun Heo 121961ace7faSTejun Heo BUG_ON(i < PFN_UP(static_size)); 12208d408b4bSTejun Heo 12218d408b4bSTejun Heo if (nr_pages < 0) 12228d408b4bSTejun Heo nr_pages = i; 12238d408b4bSTejun Heo else 12248d408b4bSTejun Heo BUG_ON(nr_pages != i); 12258d408b4bSTejun Heo } 12268d408b4bSTejun Heo 12278d408b4bSTejun Heo /* map them */ 12288d408b4bSTejun Heo if (populate_pte_fn) { 12298d408b4bSTejun Heo for_each_possible_cpu(cpu) 12308d408b4bSTejun Heo for (i = 0; i < nr_pages; i++) 12312441d15cSTejun Heo populate_pte_fn(pcpu_chunk_addr(schunk, 12328d408b4bSTejun Heo cpu, i)); 12338d408b4bSTejun Heo 12342441d15cSTejun Heo err = pcpu_map(schunk, 0, nr_pages); 1235fbf59bc9STejun Heo if (err) 12368d408b4bSTejun Heo panic("failed to setup static percpu area, err=%d\n", 12378d408b4bSTejun Heo err); 12388d408b4bSTejun Heo } 1239fbf59bc9STejun Heo 12402441d15cSTejun Heo /* link the first chunk in */ 1241*ae9e6bc9STejun Heo pcpu_first_chunk = dchunk ?: schunk; 1242*ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 1243fbf59bc9STejun Heo 1244fbf59bc9STejun Heo /* we're done */ 12452441d15cSTejun Heo pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); 1246fbf59bc9STejun Heo return pcpu_unit_size; 1247fbf59bc9STejun Heo } 124866c3a757STejun Heo 124966c3a757STejun Heo /* 125066c3a757STejun Heo * Embedding first chunk setup helper. 125166c3a757STejun Heo */ 125266c3a757STejun Heo static void *pcpue_ptr __initdata; 125366c3a757STejun Heo static size_t pcpue_size __initdata; 125466c3a757STejun Heo static size_t pcpue_unit_size __initdata; 125566c3a757STejun Heo 125666c3a757STejun Heo static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) 125766c3a757STejun Heo { 125866c3a757STejun Heo size_t off = (size_t)pageno << PAGE_SHIFT; 125966c3a757STejun Heo 126066c3a757STejun Heo if (off >= pcpue_size) 126166c3a757STejun Heo return NULL; 126266c3a757STejun Heo 126366c3a757STejun Heo return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off); 126466c3a757STejun Heo } 126566c3a757STejun Heo 126666c3a757STejun Heo /** 126766c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 126866c3a757STejun Heo * @static_size: the size of static percpu area in bytes 126966c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 127066c3a757STejun Heo * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 127166c3a757STejun Heo * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto 127266c3a757STejun Heo * 127366c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 127466c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 127566c3a757STejun Heo * 127666c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 127766c3a757STejun Heo * as a contiguous area using bootmem allocator and used as-is without 127866c3a757STejun Heo * being mapped into vmalloc area. This enables the first chunk to 127966c3a757STejun Heo * piggy back on the linear physical mapping which often uses larger 128066c3a757STejun Heo * page size. 128166c3a757STejun Heo * 128266c3a757STejun Heo * When @dyn_size is positive, dynamic area might be larger than 128366c3a757STejun Heo * specified to fill page alignment. Also, when @dyn_size is auto, 128466c3a757STejun Heo * @dyn_size does not fill the whole first chunk but only what's 128566c3a757STejun Heo * necessary for page alignment after static and reserved areas. 128666c3a757STejun Heo * 128766c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 128866c3a757STejun Heo * size, the leftover is returned to the bootmem allocator. 128966c3a757STejun Heo * 129066c3a757STejun Heo * RETURNS: 129166c3a757STejun Heo * The determined pcpu_unit_size which can be used to initialize 129266c3a757STejun Heo * percpu access on success, -errno on failure. 129366c3a757STejun Heo */ 129466c3a757STejun Heo ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, 129566c3a757STejun Heo ssize_t dyn_size, ssize_t unit_size) 129666c3a757STejun Heo { 129766c3a757STejun Heo unsigned int cpu; 129866c3a757STejun Heo 129966c3a757STejun Heo /* determine parameters and allocate */ 130066c3a757STejun Heo pcpue_size = PFN_ALIGN(static_size + reserved_size + 130166c3a757STejun Heo (dyn_size >= 0 ? dyn_size : 0)); 130266c3a757STejun Heo if (dyn_size != 0) 130366c3a757STejun Heo dyn_size = pcpue_size - static_size - reserved_size; 130466c3a757STejun Heo 130566c3a757STejun Heo if (unit_size >= 0) { 130666c3a757STejun Heo BUG_ON(unit_size < pcpue_size); 130766c3a757STejun Heo pcpue_unit_size = unit_size; 130866c3a757STejun Heo } else 130966c3a757STejun Heo pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); 131066c3a757STejun Heo 131166c3a757STejun Heo pcpue_ptr = __alloc_bootmem_nopanic( 131266c3a757STejun Heo num_possible_cpus() * pcpue_unit_size, 131366c3a757STejun Heo PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 131466c3a757STejun Heo if (!pcpue_ptr) 131566c3a757STejun Heo return -ENOMEM; 131666c3a757STejun Heo 131766c3a757STejun Heo /* return the leftover and copy */ 131866c3a757STejun Heo for_each_possible_cpu(cpu) { 131966c3a757STejun Heo void *ptr = pcpue_ptr + cpu * pcpue_unit_size; 132066c3a757STejun Heo 132166c3a757STejun Heo free_bootmem(__pa(ptr + pcpue_size), 132266c3a757STejun Heo pcpue_unit_size - pcpue_size); 132366c3a757STejun Heo memcpy(ptr, __per_cpu_load, static_size); 132466c3a757STejun Heo } 132566c3a757STejun Heo 132666c3a757STejun Heo /* we're ready, commit */ 132766c3a757STejun Heo pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", 132866c3a757STejun Heo pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); 132966c3a757STejun Heo 133066c3a757STejun Heo return pcpu_setup_first_chunk(pcpue_get_page, static_size, 133166c3a757STejun Heo reserved_size, dyn_size, 133266c3a757STejun Heo pcpue_unit_size, pcpue_ptr, NULL); 133366c3a757STejun Heo } 1334