1fbf59bc9STejun Heo /* 2fbf59bc9STejun Heo * linux/mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 10fbf59bc9STejun Heo * areas. Percpu areas are allocated in chunks in vmalloc area. Each 11fbf59bc9STejun Heo * chunk is consisted of num_possible_cpus() units and the first chunk 12fbf59bc9STejun Heo * is used for static percpu variables in the kernel image (special 13fbf59bc9STejun Heo * boot time alloc/init handling necessary as these areas need to be 14fbf59bc9STejun Heo * brought up before allocation services are running). Unit grows as 15fbf59bc9STejun Heo * necessary and all units grow or shrink in unison. When a chunk is 16fbf59bc9STejun Heo * filled up, another chunk is allocated. ie. in vmalloc area 17fbf59bc9STejun Heo * 18fbf59bc9STejun Heo * c0 c1 c2 19fbf59bc9STejun Heo * ------------------- ------------------- ------------ 20fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 22fbf59bc9STejun Heo * 23fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 24fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 25fbf59bc9STejun Heo * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring 26fbf59bc9STejun Heo * percpu base registers UNIT_SIZE apart. 27fbf59bc9STejun Heo * 28fbf59bc9STejun Heo * There are usually many small percpu allocations many of them as 29fbf59bc9STejun Heo * small as 4 bytes. The allocator organizes chunks into lists 30fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 31fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 32fbf59bc9STejun Heo * guaranteed to be eqaul to or larger than the maximum contiguous 33fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 34fbf59bc9STejun Heo * chunk maps unnecessarily. 35fbf59bc9STejun Heo * 36fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 37fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 38fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 39fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 40fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 41fbf59bc9STejun Heo * Chunks are also linked into a rb tree to ease address to chunk 42fbf59bc9STejun Heo * mapping during free. 43fbf59bc9STejun Heo * 44fbf59bc9STejun Heo * To use this allocator, arch code should do the followings. 45fbf59bc9STejun Heo * 46fbf59bc9STejun Heo * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 47fbf59bc9STejun Heo * 48fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49e0100983STejun Heo * regular address to percpu pointer and back if they need to be 50e0100983STejun Heo * different from the default 51fbf59bc9STejun Heo * 528d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 538d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 54fbf59bc9STejun Heo */ 55fbf59bc9STejun Heo 56fbf59bc9STejun Heo #include <linux/bitmap.h> 57fbf59bc9STejun Heo #include <linux/bootmem.h> 58fbf59bc9STejun Heo #include <linux/list.h> 59fbf59bc9STejun Heo #include <linux/mm.h> 60fbf59bc9STejun Heo #include <linux/module.h> 61fbf59bc9STejun Heo #include <linux/mutex.h> 62fbf59bc9STejun Heo #include <linux/percpu.h> 63fbf59bc9STejun Heo #include <linux/pfn.h> 64fbf59bc9STejun Heo #include <linux/rbtree.h> 65fbf59bc9STejun Heo #include <linux/slab.h> 66ccea34b5STejun Heo #include <linux/spinlock.h> 67fbf59bc9STejun Heo #include <linux/vmalloc.h> 68a56dbddfSTejun Heo #include <linux/workqueue.h> 69fbf59bc9STejun Heo 70fbf59bc9STejun Heo #include <asm/cacheflush.h> 71e0100983STejun Heo #include <asm/sections.h> 72fbf59bc9STejun Heo #include <asm/tlbflush.h> 73fbf59bc9STejun Heo 74fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 75fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 76fbf59bc9STejun Heo 77e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 78e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 79e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 80e0100983STejun Heo (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ 81e0100983STejun Heo + (unsigned long)__per_cpu_start) 82e0100983STejun Heo #endif 83e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 84e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 85e0100983STejun Heo (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ 86e0100983STejun Heo - (unsigned long)__per_cpu_start) 87e0100983STejun Heo #endif 88e0100983STejun Heo 89fbf59bc9STejun Heo struct pcpu_chunk { 90fbf59bc9STejun Heo struct list_head list; /* linked to pcpu_slot lists */ 91fbf59bc9STejun Heo struct rb_node rb_node; /* key is chunk->vm->addr */ 92fbf59bc9STejun Heo int free_size; /* free bytes in the chunk */ 93fbf59bc9STejun Heo int contig_hint; /* max contiguous size hint */ 94fbf59bc9STejun Heo struct vm_struct *vm; /* mapped vmalloc region */ 95fbf59bc9STejun Heo int map_used; /* # of map entries used */ 96fbf59bc9STejun Heo int map_alloc; /* # of map entries allocated */ 97fbf59bc9STejun Heo int *map; /* allocation map */ 988d408b4bSTejun Heo bool immutable; /* no [de]population allowed */ 993e24aa58STejun Heo struct page **page; /* points to page array */ 1003e24aa58STejun Heo struct page *page_ar[]; /* #cpus * UNIT_PAGES */ 101fbf59bc9STejun Heo }; 102fbf59bc9STejun Heo 10340150d37STejun Heo static int pcpu_unit_pages __read_mostly; 10440150d37STejun Heo static int pcpu_unit_size __read_mostly; 10540150d37STejun Heo static int pcpu_chunk_size __read_mostly; 10640150d37STejun Heo static int pcpu_nr_slots __read_mostly; 10740150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly; 108fbf59bc9STejun Heo 109fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 11040150d37STejun Heo void *pcpu_base_addr __read_mostly; 111fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 112fbf59bc9STejun Heo 113edcb4639STejun Heo /* optional reserved chunk, only accessible for reserved allocations */ 114edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk; 115edcb4639STejun Heo /* offset limit of the reserved chunk */ 116edcb4639STejun Heo static int pcpu_reserved_chunk_limit; 117edcb4639STejun Heo 118fbf59bc9STejun Heo /* 119ccea34b5STejun Heo * Synchronization rules. 120fbf59bc9STejun Heo * 121ccea34b5STejun Heo * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 122ccea34b5STejun Heo * protects allocation/reclaim paths, chunks and chunk->page arrays. 123ccea34b5STejun Heo * The latter is a spinlock and protects the index data structures - 124ccea34b5STejun Heo * chunk slots, rbtree, chunks and area maps in chunks. 125fbf59bc9STejun Heo * 126ccea34b5STejun Heo * During allocation, pcpu_alloc_mutex is kept locked all the time and 127ccea34b5STejun Heo * pcpu_lock is grabbed and released as necessary. All actual memory 128ccea34b5STejun Heo * allocations are done using GFP_KERNEL with pcpu_lock released. 129ccea34b5STejun Heo * 130ccea34b5STejun Heo * Free path accesses and alters only the index data structures, so it 131ccea34b5STejun Heo * can be safely called from atomic context. When memory needs to be 132ccea34b5STejun Heo * returned to the system, free path schedules reclaim_work which 133ccea34b5STejun Heo * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 134ccea34b5STejun Heo * reclaimed, release both locks and frees the chunks. Note that it's 135ccea34b5STejun Heo * necessary to grab both locks to remove a chunk from circulation as 136ccea34b5STejun Heo * allocation path might be referencing the chunk with only 137ccea34b5STejun Heo * pcpu_alloc_mutex locked. 138fbf59bc9STejun Heo */ 139ccea34b5STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 140ccea34b5STejun Heo static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 141fbf59bc9STejun Heo 14240150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 143fbf59bc9STejun Heo static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ 144fbf59bc9STejun Heo 145a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */ 146a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work); 147a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 148a56dbddfSTejun Heo 149d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 150fbf59bc9STejun Heo { 151cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 152fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 153fbf59bc9STejun Heo } 154fbf59bc9STejun Heo 155d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 156d9b55eebSTejun Heo { 157d9b55eebSTejun Heo if (size == pcpu_unit_size) 158d9b55eebSTejun Heo return pcpu_nr_slots - 1; 159d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 160d9b55eebSTejun Heo } 161d9b55eebSTejun Heo 162fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 163fbf59bc9STejun Heo { 164fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 165fbf59bc9STejun Heo return 0; 166fbf59bc9STejun Heo 167fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 168fbf59bc9STejun Heo } 169fbf59bc9STejun Heo 170fbf59bc9STejun Heo static int pcpu_page_idx(unsigned int cpu, int page_idx) 171fbf59bc9STejun Heo { 172d9b55eebSTejun Heo return cpu * pcpu_unit_pages + page_idx; 173fbf59bc9STejun Heo } 174fbf59bc9STejun Heo 175fbf59bc9STejun Heo static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk, 176fbf59bc9STejun Heo unsigned int cpu, int page_idx) 177fbf59bc9STejun Heo { 178fbf59bc9STejun Heo return &chunk->page[pcpu_page_idx(cpu, page_idx)]; 179fbf59bc9STejun Heo } 180fbf59bc9STejun Heo 181fbf59bc9STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 182fbf59bc9STejun Heo unsigned int cpu, int page_idx) 183fbf59bc9STejun Heo { 184fbf59bc9STejun Heo return (unsigned long)chunk->vm->addr + 185fbf59bc9STejun Heo (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); 186fbf59bc9STejun Heo } 187fbf59bc9STejun Heo 188fbf59bc9STejun Heo static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, 189fbf59bc9STejun Heo int page_idx) 190fbf59bc9STejun Heo { 191fbf59bc9STejun Heo return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; 192fbf59bc9STejun Heo } 193fbf59bc9STejun Heo 194fbf59bc9STejun Heo /** 1951880d93bSTejun Heo * pcpu_mem_alloc - allocate memory 1961880d93bSTejun Heo * @size: bytes to allocate 197fbf59bc9STejun Heo * 1981880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 1991880d93bSTejun Heo * kzalloc() is used; otherwise, vmalloc() is used. The returned 2001880d93bSTejun Heo * memory is always zeroed. 201fbf59bc9STejun Heo * 202ccea34b5STejun Heo * CONTEXT: 203ccea34b5STejun Heo * Does GFP_KERNEL allocation. 204ccea34b5STejun Heo * 205fbf59bc9STejun Heo * RETURNS: 2061880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 207fbf59bc9STejun Heo */ 2081880d93bSTejun Heo static void *pcpu_mem_alloc(size_t size) 209fbf59bc9STejun Heo { 210fbf59bc9STejun Heo if (size <= PAGE_SIZE) 2111880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 2121880d93bSTejun Heo else { 2131880d93bSTejun Heo void *ptr = vmalloc(size); 2141880d93bSTejun Heo if (ptr) 2151880d93bSTejun Heo memset(ptr, 0, size); 2161880d93bSTejun Heo return ptr; 2171880d93bSTejun Heo } 2181880d93bSTejun Heo } 219fbf59bc9STejun Heo 2201880d93bSTejun Heo /** 2211880d93bSTejun Heo * pcpu_mem_free - free memory 2221880d93bSTejun Heo * @ptr: memory to free 2231880d93bSTejun Heo * @size: size of the area 2241880d93bSTejun Heo * 2251880d93bSTejun Heo * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 2261880d93bSTejun Heo */ 2271880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size) 2281880d93bSTejun Heo { 2291880d93bSTejun Heo if (size <= PAGE_SIZE) 2301880d93bSTejun Heo kfree(ptr); 2311880d93bSTejun Heo else 2321880d93bSTejun Heo vfree(ptr); 233fbf59bc9STejun Heo } 234fbf59bc9STejun Heo 235fbf59bc9STejun Heo /** 236fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 237fbf59bc9STejun Heo * @chunk: chunk of interest 238fbf59bc9STejun Heo * @oslot: the previous slot it was on 239fbf59bc9STejun Heo * 240fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 241fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 242edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 243edcb4639STejun Heo * chunk slots. 244ccea34b5STejun Heo * 245ccea34b5STejun Heo * CONTEXT: 246ccea34b5STejun Heo * pcpu_lock. 247fbf59bc9STejun Heo */ 248fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 249fbf59bc9STejun Heo { 250fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 251fbf59bc9STejun Heo 252edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 253fbf59bc9STejun Heo if (oslot < nslot) 254fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 255fbf59bc9STejun Heo else 256fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 257fbf59bc9STejun Heo } 258fbf59bc9STejun Heo } 259fbf59bc9STejun Heo 260fbf59bc9STejun Heo static struct rb_node **pcpu_chunk_rb_search(void *addr, 261fbf59bc9STejun Heo struct rb_node **parentp) 262fbf59bc9STejun Heo { 263fbf59bc9STejun Heo struct rb_node **p = &pcpu_addr_root.rb_node; 264fbf59bc9STejun Heo struct rb_node *parent = NULL; 265fbf59bc9STejun Heo struct pcpu_chunk *chunk; 266fbf59bc9STejun Heo 267fbf59bc9STejun Heo while (*p) { 268fbf59bc9STejun Heo parent = *p; 269fbf59bc9STejun Heo chunk = rb_entry(parent, struct pcpu_chunk, rb_node); 270fbf59bc9STejun Heo 271fbf59bc9STejun Heo if (addr < chunk->vm->addr) 272fbf59bc9STejun Heo p = &(*p)->rb_left; 273fbf59bc9STejun Heo else if (addr > chunk->vm->addr) 274fbf59bc9STejun Heo p = &(*p)->rb_right; 275fbf59bc9STejun Heo else 276fbf59bc9STejun Heo break; 277fbf59bc9STejun Heo } 278fbf59bc9STejun Heo 279fbf59bc9STejun Heo if (parentp) 280fbf59bc9STejun Heo *parentp = parent; 281fbf59bc9STejun Heo return p; 282fbf59bc9STejun Heo } 283fbf59bc9STejun Heo 284fbf59bc9STejun Heo /** 285fbf59bc9STejun Heo * pcpu_chunk_addr_search - search for chunk containing specified address 286fbf59bc9STejun Heo * @addr: address to search for 287fbf59bc9STejun Heo * 288fbf59bc9STejun Heo * Look for chunk which might contain @addr. More specifically, it 289fbf59bc9STejun Heo * searchs for the chunk with the highest start address which isn't 290fbf59bc9STejun Heo * beyond @addr. 291fbf59bc9STejun Heo * 292ccea34b5STejun Heo * CONTEXT: 293ccea34b5STejun Heo * pcpu_lock. 294ccea34b5STejun Heo * 295fbf59bc9STejun Heo * RETURNS: 296fbf59bc9STejun Heo * The address of the found chunk. 297fbf59bc9STejun Heo */ 298fbf59bc9STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 299fbf59bc9STejun Heo { 300fbf59bc9STejun Heo struct rb_node *n, *parent; 301fbf59bc9STejun Heo struct pcpu_chunk *chunk; 302fbf59bc9STejun Heo 303edcb4639STejun Heo /* is it in the reserved chunk? */ 304edcb4639STejun Heo if (pcpu_reserved_chunk) { 305edcb4639STejun Heo void *start = pcpu_reserved_chunk->vm->addr; 306edcb4639STejun Heo 307edcb4639STejun Heo if (addr >= start && addr < start + pcpu_reserved_chunk_limit) 308edcb4639STejun Heo return pcpu_reserved_chunk; 309edcb4639STejun Heo } 310edcb4639STejun Heo 311edcb4639STejun Heo /* nah... search the regular ones */ 312fbf59bc9STejun Heo n = *pcpu_chunk_rb_search(addr, &parent); 313fbf59bc9STejun Heo if (!n) { 314fbf59bc9STejun Heo /* no exactly matching chunk, the parent is the closest */ 315fbf59bc9STejun Heo n = parent; 316fbf59bc9STejun Heo BUG_ON(!n); 317fbf59bc9STejun Heo } 318fbf59bc9STejun Heo chunk = rb_entry(n, struct pcpu_chunk, rb_node); 319fbf59bc9STejun Heo 320fbf59bc9STejun Heo if (addr < chunk->vm->addr) { 321fbf59bc9STejun Heo /* the parent was the next one, look for the previous one */ 322fbf59bc9STejun Heo n = rb_prev(n); 323fbf59bc9STejun Heo BUG_ON(!n); 324fbf59bc9STejun Heo chunk = rb_entry(n, struct pcpu_chunk, rb_node); 325fbf59bc9STejun Heo } 326fbf59bc9STejun Heo 327fbf59bc9STejun Heo return chunk; 328fbf59bc9STejun Heo } 329fbf59bc9STejun Heo 330fbf59bc9STejun Heo /** 331fbf59bc9STejun Heo * pcpu_chunk_addr_insert - insert chunk into address rb tree 332fbf59bc9STejun Heo * @new: chunk to insert 333fbf59bc9STejun Heo * 334fbf59bc9STejun Heo * Insert @new into address rb tree. 335ccea34b5STejun Heo * 336ccea34b5STejun Heo * CONTEXT: 337ccea34b5STejun Heo * pcpu_lock. 338fbf59bc9STejun Heo */ 339fbf59bc9STejun Heo static void pcpu_chunk_addr_insert(struct pcpu_chunk *new) 340fbf59bc9STejun Heo { 341fbf59bc9STejun Heo struct rb_node **p, *parent; 342fbf59bc9STejun Heo 343fbf59bc9STejun Heo p = pcpu_chunk_rb_search(new->vm->addr, &parent); 344fbf59bc9STejun Heo BUG_ON(*p); 345fbf59bc9STejun Heo rb_link_node(&new->rb_node, parent, p); 346fbf59bc9STejun Heo rb_insert_color(&new->rb_node, &pcpu_addr_root); 347fbf59bc9STejun Heo } 348fbf59bc9STejun Heo 349fbf59bc9STejun Heo /** 3509f7dcf22STejun Heo * pcpu_extend_area_map - extend area map for allocation 3519f7dcf22STejun Heo * @chunk: target chunk 3529f7dcf22STejun Heo * 3539f7dcf22STejun Heo * Extend area map of @chunk so that it can accomodate an allocation. 3549f7dcf22STejun Heo * A single allocation can split an area into three areas, so this 3559f7dcf22STejun Heo * function makes sure that @chunk->map has at least two extra slots. 3569f7dcf22STejun Heo * 357ccea34b5STejun Heo * CONTEXT: 358ccea34b5STejun Heo * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 359ccea34b5STejun Heo * if area map is extended. 360ccea34b5STejun Heo * 3619f7dcf22STejun Heo * RETURNS: 3629f7dcf22STejun Heo * 0 if noop, 1 if successfully extended, -errno on failure. 3639f7dcf22STejun Heo */ 3649f7dcf22STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk) 3659f7dcf22STejun Heo { 3669f7dcf22STejun Heo int new_alloc; 3679f7dcf22STejun Heo int *new; 3689f7dcf22STejun Heo size_t size; 3699f7dcf22STejun Heo 3709f7dcf22STejun Heo /* has enough? */ 3719f7dcf22STejun Heo if (chunk->map_alloc >= chunk->map_used + 2) 3729f7dcf22STejun Heo return 0; 3739f7dcf22STejun Heo 374ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 375ccea34b5STejun Heo 3769f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 3779f7dcf22STejun Heo while (new_alloc < chunk->map_used + 2) 3789f7dcf22STejun Heo new_alloc *= 2; 3799f7dcf22STejun Heo 3809f7dcf22STejun Heo new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 381ccea34b5STejun Heo if (!new) { 382ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 3839f7dcf22STejun Heo return -ENOMEM; 384ccea34b5STejun Heo } 385ccea34b5STejun Heo 386ccea34b5STejun Heo /* 387ccea34b5STejun Heo * Acquire pcpu_lock and switch to new area map. Only free 388ccea34b5STejun Heo * could have happened inbetween, so map_used couldn't have 389ccea34b5STejun Heo * grown. 390ccea34b5STejun Heo */ 391ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 392ccea34b5STejun Heo BUG_ON(new_alloc < chunk->map_used + 2); 3939f7dcf22STejun Heo 3949f7dcf22STejun Heo size = chunk->map_alloc * sizeof(chunk->map[0]); 3959f7dcf22STejun Heo memcpy(new, chunk->map, size); 3969f7dcf22STejun Heo 3979f7dcf22STejun Heo /* 3989f7dcf22STejun Heo * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 3999f7dcf22STejun Heo * one of the first chunks and still using static map. 4009f7dcf22STejun Heo */ 4019f7dcf22STejun Heo if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 4029f7dcf22STejun Heo pcpu_mem_free(chunk->map, size); 4039f7dcf22STejun Heo 4049f7dcf22STejun Heo chunk->map_alloc = new_alloc; 4059f7dcf22STejun Heo chunk->map = new; 4069f7dcf22STejun Heo return 0; 4079f7dcf22STejun Heo } 4089f7dcf22STejun Heo 4099f7dcf22STejun Heo /** 410fbf59bc9STejun Heo * pcpu_split_block - split a map block 411fbf59bc9STejun Heo * @chunk: chunk of interest 412fbf59bc9STejun Heo * @i: index of map block to split 413cae3aeb8STejun Heo * @head: head size in bytes (can be 0) 414cae3aeb8STejun Heo * @tail: tail size in bytes (can be 0) 415fbf59bc9STejun Heo * 416fbf59bc9STejun Heo * Split the @i'th map block into two or three blocks. If @head is 417fbf59bc9STejun Heo * non-zero, @head bytes block is inserted before block @i moving it 418fbf59bc9STejun Heo * to @i+1 and reducing its size by @head bytes. 419fbf59bc9STejun Heo * 420fbf59bc9STejun Heo * If @tail is non-zero, the target block, which can be @i or @i+1 421fbf59bc9STejun Heo * depending on @head, is reduced by @tail bytes and @tail byte block 422fbf59bc9STejun Heo * is inserted after the target block. 423fbf59bc9STejun Heo * 4249f7dcf22STejun Heo * @chunk->map must have enough free slots to accomodate the split. 425ccea34b5STejun Heo * 426ccea34b5STejun Heo * CONTEXT: 427ccea34b5STejun Heo * pcpu_lock. 428fbf59bc9STejun Heo */ 4299f7dcf22STejun Heo static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 4309f7dcf22STejun Heo int head, int tail) 431fbf59bc9STejun Heo { 432fbf59bc9STejun Heo int nr_extra = !!head + !!tail; 433fbf59bc9STejun Heo 4349f7dcf22STejun Heo BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 435fbf59bc9STejun Heo 4369f7dcf22STejun Heo /* insert new subblocks */ 437fbf59bc9STejun Heo memmove(&chunk->map[i + nr_extra], &chunk->map[i], 438fbf59bc9STejun Heo sizeof(chunk->map[0]) * (chunk->map_used - i)); 439fbf59bc9STejun Heo chunk->map_used += nr_extra; 440fbf59bc9STejun Heo 441fbf59bc9STejun Heo if (head) { 442fbf59bc9STejun Heo chunk->map[i + 1] = chunk->map[i] - head; 443fbf59bc9STejun Heo chunk->map[i++] = head; 444fbf59bc9STejun Heo } 445fbf59bc9STejun Heo if (tail) { 446fbf59bc9STejun Heo chunk->map[i++] -= tail; 447fbf59bc9STejun Heo chunk->map[i] = tail; 448fbf59bc9STejun Heo } 449fbf59bc9STejun Heo } 450fbf59bc9STejun Heo 451fbf59bc9STejun Heo /** 452fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 453fbf59bc9STejun Heo * @chunk: chunk of interest 454cae3aeb8STejun Heo * @size: wanted size in bytes 455fbf59bc9STejun Heo * @align: wanted align 456fbf59bc9STejun Heo * 457fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 458fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 459fbf59bc9STejun Heo * populate or map the area. 460fbf59bc9STejun Heo * 4619f7dcf22STejun Heo * @chunk->map must have at least two free slots. 4629f7dcf22STejun Heo * 463ccea34b5STejun Heo * CONTEXT: 464ccea34b5STejun Heo * pcpu_lock. 465ccea34b5STejun Heo * 466fbf59bc9STejun Heo * RETURNS: 4679f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 4689f7dcf22STejun Heo * found. 469fbf59bc9STejun Heo */ 470fbf59bc9STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 471fbf59bc9STejun Heo { 472fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 473fbf59bc9STejun Heo int max_contig = 0; 474fbf59bc9STejun Heo int i, off; 475fbf59bc9STejun Heo 476fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 477fbf59bc9STejun Heo bool is_last = i + 1 == chunk->map_used; 478fbf59bc9STejun Heo int head, tail; 479fbf59bc9STejun Heo 480fbf59bc9STejun Heo /* extra for alignment requirement */ 481fbf59bc9STejun Heo head = ALIGN(off, align) - off; 482fbf59bc9STejun Heo BUG_ON(i == 0 && head != 0); 483fbf59bc9STejun Heo 484fbf59bc9STejun Heo if (chunk->map[i] < 0) 485fbf59bc9STejun Heo continue; 486fbf59bc9STejun Heo if (chunk->map[i] < head + size) { 487fbf59bc9STejun Heo max_contig = max(chunk->map[i], max_contig); 488fbf59bc9STejun Heo continue; 489fbf59bc9STejun Heo } 490fbf59bc9STejun Heo 491fbf59bc9STejun Heo /* 492fbf59bc9STejun Heo * If head is small or the previous block is free, 493fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 494fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 495fbf59bc9STejun Heo * uncommon for percpu allocations. 496fbf59bc9STejun Heo */ 497fbf59bc9STejun Heo if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 498fbf59bc9STejun Heo if (chunk->map[i - 1] > 0) 499fbf59bc9STejun Heo chunk->map[i - 1] += head; 500fbf59bc9STejun Heo else { 501fbf59bc9STejun Heo chunk->map[i - 1] -= head; 502fbf59bc9STejun Heo chunk->free_size -= head; 503fbf59bc9STejun Heo } 504fbf59bc9STejun Heo chunk->map[i] -= head; 505fbf59bc9STejun Heo off += head; 506fbf59bc9STejun Heo head = 0; 507fbf59bc9STejun Heo } 508fbf59bc9STejun Heo 509fbf59bc9STejun Heo /* if tail is small, just keep it around */ 510fbf59bc9STejun Heo tail = chunk->map[i] - head - size; 511fbf59bc9STejun Heo if (tail < sizeof(int)) 512fbf59bc9STejun Heo tail = 0; 513fbf59bc9STejun Heo 514fbf59bc9STejun Heo /* split if warranted */ 515fbf59bc9STejun Heo if (head || tail) { 5169f7dcf22STejun Heo pcpu_split_block(chunk, i, head, tail); 517fbf59bc9STejun Heo if (head) { 518fbf59bc9STejun Heo i++; 519fbf59bc9STejun Heo off += head; 520fbf59bc9STejun Heo max_contig = max(chunk->map[i - 1], max_contig); 521fbf59bc9STejun Heo } 522fbf59bc9STejun Heo if (tail) 523fbf59bc9STejun Heo max_contig = max(chunk->map[i + 1], max_contig); 524fbf59bc9STejun Heo } 525fbf59bc9STejun Heo 526fbf59bc9STejun Heo /* update hint and mark allocated */ 527fbf59bc9STejun Heo if (is_last) 528fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 529fbf59bc9STejun Heo else 530fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 531fbf59bc9STejun Heo max_contig); 532fbf59bc9STejun Heo 533fbf59bc9STejun Heo chunk->free_size -= chunk->map[i]; 534fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 535fbf59bc9STejun Heo 536fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 537fbf59bc9STejun Heo return off; 538fbf59bc9STejun Heo } 539fbf59bc9STejun Heo 540fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 541fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 542fbf59bc9STejun Heo 5439f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 5449f7dcf22STejun Heo return -1; 545fbf59bc9STejun Heo } 546fbf59bc9STejun Heo 547fbf59bc9STejun Heo /** 548fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 549fbf59bc9STejun Heo * @chunk: chunk of interest 550fbf59bc9STejun Heo * @freeme: offset of area to free 551fbf59bc9STejun Heo * 552fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 553fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 554fbf59bc9STejun Heo * the area. 555ccea34b5STejun Heo * 556ccea34b5STejun Heo * CONTEXT: 557ccea34b5STejun Heo * pcpu_lock. 558fbf59bc9STejun Heo */ 559fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 560fbf59bc9STejun Heo { 561fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 562fbf59bc9STejun Heo int i, off; 563fbf59bc9STejun Heo 564fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 565fbf59bc9STejun Heo if (off == freeme) 566fbf59bc9STejun Heo break; 567fbf59bc9STejun Heo BUG_ON(off != freeme); 568fbf59bc9STejun Heo BUG_ON(chunk->map[i] > 0); 569fbf59bc9STejun Heo 570fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 571fbf59bc9STejun Heo chunk->free_size += chunk->map[i]; 572fbf59bc9STejun Heo 573fbf59bc9STejun Heo /* merge with previous? */ 574fbf59bc9STejun Heo if (i > 0 && chunk->map[i - 1] >= 0) { 575fbf59bc9STejun Heo chunk->map[i - 1] += chunk->map[i]; 576fbf59bc9STejun Heo chunk->map_used--; 577fbf59bc9STejun Heo memmove(&chunk->map[i], &chunk->map[i + 1], 578fbf59bc9STejun Heo (chunk->map_used - i) * sizeof(chunk->map[0])); 579fbf59bc9STejun Heo i--; 580fbf59bc9STejun Heo } 581fbf59bc9STejun Heo /* merge with next? */ 582fbf59bc9STejun Heo if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 583fbf59bc9STejun Heo chunk->map[i] += chunk->map[i + 1]; 584fbf59bc9STejun Heo chunk->map_used--; 585fbf59bc9STejun Heo memmove(&chunk->map[i + 1], &chunk->map[i + 2], 586fbf59bc9STejun Heo (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 587fbf59bc9STejun Heo } 588fbf59bc9STejun Heo 589fbf59bc9STejun Heo chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 590fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 591fbf59bc9STejun Heo } 592fbf59bc9STejun Heo 593fbf59bc9STejun Heo /** 594fbf59bc9STejun Heo * pcpu_unmap - unmap pages out of a pcpu_chunk 595fbf59bc9STejun Heo * @chunk: chunk of interest 596fbf59bc9STejun Heo * @page_start: page index of the first page to unmap 597fbf59bc9STejun Heo * @page_end: page index of the last page to unmap + 1 598fbf59bc9STejun Heo * @flush: whether to flush cache and tlb or not 599fbf59bc9STejun Heo * 600fbf59bc9STejun Heo * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 601fbf59bc9STejun Heo * If @flush is true, vcache is flushed before unmapping and tlb 602fbf59bc9STejun Heo * after. 603fbf59bc9STejun Heo */ 604fbf59bc9STejun Heo static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, 605fbf59bc9STejun Heo bool flush) 606fbf59bc9STejun Heo { 607fbf59bc9STejun Heo unsigned int last = num_possible_cpus() - 1; 608fbf59bc9STejun Heo unsigned int cpu; 609fbf59bc9STejun Heo 6108d408b4bSTejun Heo /* unmap must not be done on immutable chunk */ 6118d408b4bSTejun Heo WARN_ON(chunk->immutable); 6128d408b4bSTejun Heo 613fbf59bc9STejun Heo /* 614fbf59bc9STejun Heo * Each flushing trial can be very expensive, issue flush on 615fbf59bc9STejun Heo * the whole region at once rather than doing it for each cpu. 616fbf59bc9STejun Heo * This could be an overkill but is more scalable. 617fbf59bc9STejun Heo */ 618fbf59bc9STejun Heo if (flush) 619fbf59bc9STejun Heo flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), 620fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 621fbf59bc9STejun Heo 622fbf59bc9STejun Heo for_each_possible_cpu(cpu) 623fbf59bc9STejun Heo unmap_kernel_range_noflush( 624fbf59bc9STejun Heo pcpu_chunk_addr(chunk, cpu, page_start), 625fbf59bc9STejun Heo (page_end - page_start) << PAGE_SHIFT); 626fbf59bc9STejun Heo 627fbf59bc9STejun Heo /* ditto as flush_cache_vunmap() */ 628fbf59bc9STejun Heo if (flush) 629fbf59bc9STejun Heo flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), 630fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 631fbf59bc9STejun Heo } 632fbf59bc9STejun Heo 633fbf59bc9STejun Heo /** 634fbf59bc9STejun Heo * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 635fbf59bc9STejun Heo * @chunk: chunk to depopulate 636fbf59bc9STejun Heo * @off: offset to the area to depopulate 637cae3aeb8STejun Heo * @size: size of the area to depopulate in bytes 638fbf59bc9STejun Heo * @flush: whether to flush cache and tlb or not 639fbf59bc9STejun Heo * 640fbf59bc9STejun Heo * For each cpu, depopulate and unmap pages [@page_start,@page_end) 641fbf59bc9STejun Heo * from @chunk. If @flush is true, vcache is flushed before unmapping 642fbf59bc9STejun Heo * and tlb after. 643ccea34b5STejun Heo * 644ccea34b5STejun Heo * CONTEXT: 645ccea34b5STejun Heo * pcpu_alloc_mutex. 646fbf59bc9STejun Heo */ 647cae3aeb8STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size, 648cae3aeb8STejun Heo bool flush) 649fbf59bc9STejun Heo { 650fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 651fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 652fbf59bc9STejun Heo int unmap_start = -1; 653fbf59bc9STejun Heo int uninitialized_var(unmap_end); 654fbf59bc9STejun Heo unsigned int cpu; 655fbf59bc9STejun Heo int i; 656fbf59bc9STejun Heo 657fbf59bc9STejun Heo for (i = page_start; i < page_end; i++) { 658fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 659fbf59bc9STejun Heo struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); 660fbf59bc9STejun Heo 661fbf59bc9STejun Heo if (!*pagep) 662fbf59bc9STejun Heo continue; 663fbf59bc9STejun Heo 664fbf59bc9STejun Heo __free_page(*pagep); 665fbf59bc9STejun Heo 666fbf59bc9STejun Heo /* 667fbf59bc9STejun Heo * If it's partial depopulation, it might get 668fbf59bc9STejun Heo * populated or depopulated again. Mark the 669fbf59bc9STejun Heo * page gone. 670fbf59bc9STejun Heo */ 671fbf59bc9STejun Heo *pagep = NULL; 672fbf59bc9STejun Heo 673fbf59bc9STejun Heo unmap_start = unmap_start < 0 ? i : unmap_start; 674fbf59bc9STejun Heo unmap_end = i + 1; 675fbf59bc9STejun Heo } 676fbf59bc9STejun Heo } 677fbf59bc9STejun Heo 678fbf59bc9STejun Heo if (unmap_start >= 0) 679fbf59bc9STejun Heo pcpu_unmap(chunk, unmap_start, unmap_end, flush); 680fbf59bc9STejun Heo } 681fbf59bc9STejun Heo 682fbf59bc9STejun Heo /** 683fbf59bc9STejun Heo * pcpu_map - map pages into a pcpu_chunk 684fbf59bc9STejun Heo * @chunk: chunk of interest 685fbf59bc9STejun Heo * @page_start: page index of the first page to map 686fbf59bc9STejun Heo * @page_end: page index of the last page to map + 1 687fbf59bc9STejun Heo * 688fbf59bc9STejun Heo * For each cpu, map pages [@page_start,@page_end) into @chunk. 689fbf59bc9STejun Heo * vcache is flushed afterwards. 690fbf59bc9STejun Heo */ 691fbf59bc9STejun Heo static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) 692fbf59bc9STejun Heo { 693fbf59bc9STejun Heo unsigned int last = num_possible_cpus() - 1; 694fbf59bc9STejun Heo unsigned int cpu; 695fbf59bc9STejun Heo int err; 696fbf59bc9STejun Heo 6978d408b4bSTejun Heo /* map must not be done on immutable chunk */ 6988d408b4bSTejun Heo WARN_ON(chunk->immutable); 6998d408b4bSTejun Heo 700fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 701fbf59bc9STejun Heo err = map_kernel_range_noflush( 702fbf59bc9STejun Heo pcpu_chunk_addr(chunk, cpu, page_start), 703fbf59bc9STejun Heo (page_end - page_start) << PAGE_SHIFT, 704fbf59bc9STejun Heo PAGE_KERNEL, 705fbf59bc9STejun Heo pcpu_chunk_pagep(chunk, cpu, page_start)); 706fbf59bc9STejun Heo if (err < 0) 707fbf59bc9STejun Heo return err; 708fbf59bc9STejun Heo } 709fbf59bc9STejun Heo 710fbf59bc9STejun Heo /* flush at once, please read comments in pcpu_unmap() */ 711fbf59bc9STejun Heo flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start), 712fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 713fbf59bc9STejun Heo return 0; 714fbf59bc9STejun Heo } 715fbf59bc9STejun Heo 716fbf59bc9STejun Heo /** 717fbf59bc9STejun Heo * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 718fbf59bc9STejun Heo * @chunk: chunk of interest 719fbf59bc9STejun Heo * @off: offset to the area to populate 720cae3aeb8STejun Heo * @size: size of the area to populate in bytes 721fbf59bc9STejun Heo * 722fbf59bc9STejun Heo * For each cpu, populate and map pages [@page_start,@page_end) into 723fbf59bc9STejun Heo * @chunk. The area is cleared on return. 724ccea34b5STejun Heo * 725ccea34b5STejun Heo * CONTEXT: 726ccea34b5STejun Heo * pcpu_alloc_mutex, does GFP_KERNEL allocation. 727fbf59bc9STejun Heo */ 728fbf59bc9STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 729fbf59bc9STejun Heo { 730fbf59bc9STejun Heo const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 731fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 732fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 733fbf59bc9STejun Heo int map_start = -1; 73402d51fdfSTejun Heo int uninitialized_var(map_end); 735fbf59bc9STejun Heo unsigned int cpu; 736fbf59bc9STejun Heo int i; 737fbf59bc9STejun Heo 738fbf59bc9STejun Heo for (i = page_start; i < page_end; i++) { 739fbf59bc9STejun Heo if (pcpu_chunk_page_occupied(chunk, i)) { 740fbf59bc9STejun Heo if (map_start >= 0) { 741fbf59bc9STejun Heo if (pcpu_map(chunk, map_start, map_end)) 742fbf59bc9STejun Heo goto err; 743fbf59bc9STejun Heo map_start = -1; 744fbf59bc9STejun Heo } 745fbf59bc9STejun Heo continue; 746fbf59bc9STejun Heo } 747fbf59bc9STejun Heo 748fbf59bc9STejun Heo map_start = map_start < 0 ? i : map_start; 749fbf59bc9STejun Heo map_end = i + 1; 750fbf59bc9STejun Heo 751fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 752fbf59bc9STejun Heo struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); 753fbf59bc9STejun Heo 754fbf59bc9STejun Heo *pagep = alloc_pages_node(cpu_to_node(cpu), 755fbf59bc9STejun Heo alloc_mask, 0); 756fbf59bc9STejun Heo if (!*pagep) 757fbf59bc9STejun Heo goto err; 758fbf59bc9STejun Heo } 759fbf59bc9STejun Heo } 760fbf59bc9STejun Heo 761fbf59bc9STejun Heo if (map_start >= 0 && pcpu_map(chunk, map_start, map_end)) 762fbf59bc9STejun Heo goto err; 763fbf59bc9STejun Heo 764fbf59bc9STejun Heo for_each_possible_cpu(cpu) 765d9b55eebSTejun Heo memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0, 766fbf59bc9STejun Heo size); 767fbf59bc9STejun Heo 768fbf59bc9STejun Heo return 0; 769fbf59bc9STejun Heo err: 770fbf59bc9STejun Heo /* likely under heavy memory pressure, give memory back */ 771fbf59bc9STejun Heo pcpu_depopulate_chunk(chunk, off, size, true); 772fbf59bc9STejun Heo return -ENOMEM; 773fbf59bc9STejun Heo } 774fbf59bc9STejun Heo 775fbf59bc9STejun Heo static void free_pcpu_chunk(struct pcpu_chunk *chunk) 776fbf59bc9STejun Heo { 777fbf59bc9STejun Heo if (!chunk) 778fbf59bc9STejun Heo return; 779fbf59bc9STejun Heo if (chunk->vm) 780fbf59bc9STejun Heo free_vm_area(chunk->vm); 7811880d93bSTejun Heo pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 782fbf59bc9STejun Heo kfree(chunk); 783fbf59bc9STejun Heo } 784fbf59bc9STejun Heo 785fbf59bc9STejun Heo static struct pcpu_chunk *alloc_pcpu_chunk(void) 786fbf59bc9STejun Heo { 787fbf59bc9STejun Heo struct pcpu_chunk *chunk; 788fbf59bc9STejun Heo 789fbf59bc9STejun Heo chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 790fbf59bc9STejun Heo if (!chunk) 791fbf59bc9STejun Heo return NULL; 792fbf59bc9STejun Heo 7931880d93bSTejun Heo chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 794fbf59bc9STejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 795fbf59bc9STejun Heo chunk->map[chunk->map_used++] = pcpu_unit_size; 7963e24aa58STejun Heo chunk->page = chunk->page_ar; 797fbf59bc9STejun Heo 798fbf59bc9STejun Heo chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); 799fbf59bc9STejun Heo if (!chunk->vm) { 800fbf59bc9STejun Heo free_pcpu_chunk(chunk); 801fbf59bc9STejun Heo return NULL; 802fbf59bc9STejun Heo } 803fbf59bc9STejun Heo 804fbf59bc9STejun Heo INIT_LIST_HEAD(&chunk->list); 805fbf59bc9STejun Heo chunk->free_size = pcpu_unit_size; 806fbf59bc9STejun Heo chunk->contig_hint = pcpu_unit_size; 807fbf59bc9STejun Heo 808fbf59bc9STejun Heo return chunk; 809fbf59bc9STejun Heo } 810fbf59bc9STejun Heo 811fbf59bc9STejun Heo /** 812edcb4639STejun Heo * pcpu_alloc - the percpu allocator 813cae3aeb8STejun Heo * @size: size of area to allocate in bytes 814fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 815edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 816fbf59bc9STejun Heo * 817ccea34b5STejun Heo * Allocate percpu area of @size bytes aligned at @align. 818ccea34b5STejun Heo * 819ccea34b5STejun Heo * CONTEXT: 820ccea34b5STejun Heo * Does GFP_KERNEL allocation. 821fbf59bc9STejun Heo * 822fbf59bc9STejun Heo * RETURNS: 823fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 824fbf59bc9STejun Heo */ 825edcb4639STejun Heo static void *pcpu_alloc(size_t size, size_t align, bool reserved) 826fbf59bc9STejun Heo { 827fbf59bc9STejun Heo struct pcpu_chunk *chunk; 828fbf59bc9STejun Heo int slot, off; 829fbf59bc9STejun Heo 8308d408b4bSTejun Heo if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 831fbf59bc9STejun Heo WARN(true, "illegal size (%zu) or align (%zu) for " 832fbf59bc9STejun Heo "percpu allocation\n", size, align); 833fbf59bc9STejun Heo return NULL; 834fbf59bc9STejun Heo } 835fbf59bc9STejun Heo 836ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 837ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 838fbf59bc9STejun Heo 839edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 840edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 841edcb4639STejun Heo chunk = pcpu_reserved_chunk; 8429f7dcf22STejun Heo if (size > chunk->contig_hint || 8439f7dcf22STejun Heo pcpu_extend_area_map(chunk) < 0) 844ccea34b5STejun Heo goto fail_unlock; 845edcb4639STejun Heo off = pcpu_alloc_area(chunk, size, align); 846edcb4639STejun Heo if (off >= 0) 847edcb4639STejun Heo goto area_found; 848ccea34b5STejun Heo goto fail_unlock; 849edcb4639STejun Heo } 850edcb4639STejun Heo 851ccea34b5STejun Heo restart: 852edcb4639STejun Heo /* search through normal chunks */ 853fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 854fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 855fbf59bc9STejun Heo if (size > chunk->contig_hint) 856fbf59bc9STejun Heo continue; 857ccea34b5STejun Heo 858ccea34b5STejun Heo switch (pcpu_extend_area_map(chunk)) { 859ccea34b5STejun Heo case 0: 860ccea34b5STejun Heo break; 861ccea34b5STejun Heo case 1: 862ccea34b5STejun Heo goto restart; /* pcpu_lock dropped, restart */ 863ccea34b5STejun Heo default: 864ccea34b5STejun Heo goto fail_unlock; 865ccea34b5STejun Heo } 866ccea34b5STejun Heo 867fbf59bc9STejun Heo off = pcpu_alloc_area(chunk, size, align); 868fbf59bc9STejun Heo if (off >= 0) 869fbf59bc9STejun Heo goto area_found; 870fbf59bc9STejun Heo } 871fbf59bc9STejun Heo } 872fbf59bc9STejun Heo 873fbf59bc9STejun Heo /* hmmm... no space left, create a new chunk */ 874ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 875ccea34b5STejun Heo 876fbf59bc9STejun Heo chunk = alloc_pcpu_chunk(); 877fbf59bc9STejun Heo if (!chunk) 878ccea34b5STejun Heo goto fail_unlock_mutex; 879ccea34b5STejun Heo 880ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 881fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 882fbf59bc9STejun Heo pcpu_chunk_addr_insert(chunk); 883ccea34b5STejun Heo goto restart; 884fbf59bc9STejun Heo 885fbf59bc9STejun Heo area_found: 886ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 887ccea34b5STejun Heo 888fbf59bc9STejun Heo /* populate, map and clear the area */ 889fbf59bc9STejun Heo if (pcpu_populate_chunk(chunk, off, size)) { 890ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 891fbf59bc9STejun Heo pcpu_free_area(chunk, off); 892ccea34b5STejun Heo goto fail_unlock; 893fbf59bc9STejun Heo } 894fbf59bc9STejun Heo 895ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 896ccea34b5STejun Heo 897ccea34b5STejun Heo return __addr_to_pcpu_ptr(chunk->vm->addr + off); 898ccea34b5STejun Heo 899ccea34b5STejun Heo fail_unlock: 900ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 901ccea34b5STejun Heo fail_unlock_mutex: 902ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 903ccea34b5STejun Heo return NULL; 904fbf59bc9STejun Heo } 905edcb4639STejun Heo 906edcb4639STejun Heo /** 907edcb4639STejun Heo * __alloc_percpu - allocate dynamic percpu area 908edcb4639STejun Heo * @size: size of area to allocate in bytes 909edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 910edcb4639STejun Heo * 911edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align. Might 912edcb4639STejun Heo * sleep. Might trigger writeouts. 913edcb4639STejun Heo * 914ccea34b5STejun Heo * CONTEXT: 915ccea34b5STejun Heo * Does GFP_KERNEL allocation. 916ccea34b5STejun Heo * 917edcb4639STejun Heo * RETURNS: 918edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 919edcb4639STejun Heo */ 920edcb4639STejun Heo void *__alloc_percpu(size_t size, size_t align) 921edcb4639STejun Heo { 922edcb4639STejun Heo return pcpu_alloc(size, align, false); 923edcb4639STejun Heo } 924fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 925fbf59bc9STejun Heo 926edcb4639STejun Heo /** 927edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 928edcb4639STejun Heo * @size: size of area to allocate in bytes 929edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 930edcb4639STejun Heo * 931edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align from reserved 932edcb4639STejun Heo * percpu area if arch has set it up; otherwise, allocation is served 933edcb4639STejun Heo * from the same dynamic area. Might sleep. Might trigger writeouts. 934edcb4639STejun Heo * 935ccea34b5STejun Heo * CONTEXT: 936ccea34b5STejun Heo * Does GFP_KERNEL allocation. 937ccea34b5STejun Heo * 938edcb4639STejun Heo * RETURNS: 939edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 940edcb4639STejun Heo */ 941edcb4639STejun Heo void *__alloc_reserved_percpu(size_t size, size_t align) 942edcb4639STejun Heo { 943edcb4639STejun Heo return pcpu_alloc(size, align, true); 944edcb4639STejun Heo } 945edcb4639STejun Heo 946a56dbddfSTejun Heo /** 947a56dbddfSTejun Heo * pcpu_reclaim - reclaim fully free chunks, workqueue function 948a56dbddfSTejun Heo * @work: unused 949a56dbddfSTejun Heo * 950a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 951ccea34b5STejun Heo * 952ccea34b5STejun Heo * CONTEXT: 953ccea34b5STejun Heo * workqueue context. 954a56dbddfSTejun Heo */ 955a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work) 956fbf59bc9STejun Heo { 957a56dbddfSTejun Heo LIST_HEAD(todo); 958a56dbddfSTejun Heo struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 959a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 960a56dbddfSTejun Heo 961ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 962ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 963a56dbddfSTejun Heo 964a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, head, list) { 9658d408b4bSTejun Heo WARN_ON(chunk->immutable); 966a56dbddfSTejun Heo 967a56dbddfSTejun Heo /* spare the first one */ 968a56dbddfSTejun Heo if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 969a56dbddfSTejun Heo continue; 970a56dbddfSTejun Heo 971fbf59bc9STejun Heo rb_erase(&chunk->rb_node, &pcpu_addr_root); 972a56dbddfSTejun Heo list_move(&chunk->list, &todo); 973a56dbddfSTejun Heo } 974a56dbddfSTejun Heo 975ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 976ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 977a56dbddfSTejun Heo 978a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, &todo, list) { 979a56dbddfSTejun Heo pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); 980fbf59bc9STejun Heo free_pcpu_chunk(chunk); 981fbf59bc9STejun Heo } 982a56dbddfSTejun Heo } 983fbf59bc9STejun Heo 984fbf59bc9STejun Heo /** 985fbf59bc9STejun Heo * free_percpu - free percpu area 986fbf59bc9STejun Heo * @ptr: pointer to area to free 987fbf59bc9STejun Heo * 988ccea34b5STejun Heo * Free percpu area @ptr. 989ccea34b5STejun Heo * 990ccea34b5STejun Heo * CONTEXT: 991ccea34b5STejun Heo * Can be called from atomic context. 992fbf59bc9STejun Heo */ 993fbf59bc9STejun Heo void free_percpu(void *ptr) 994fbf59bc9STejun Heo { 995fbf59bc9STejun Heo void *addr = __pcpu_ptr_to_addr(ptr); 996fbf59bc9STejun Heo struct pcpu_chunk *chunk; 997ccea34b5STejun Heo unsigned long flags; 998fbf59bc9STejun Heo int off; 999fbf59bc9STejun Heo 1000fbf59bc9STejun Heo if (!ptr) 1001fbf59bc9STejun Heo return; 1002fbf59bc9STejun Heo 1003ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1004fbf59bc9STejun Heo 1005fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1006fbf59bc9STejun Heo off = addr - chunk->vm->addr; 1007fbf59bc9STejun Heo 1008fbf59bc9STejun Heo pcpu_free_area(chunk, off); 1009fbf59bc9STejun Heo 1010a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 1011fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 1012fbf59bc9STejun Heo struct pcpu_chunk *pos; 1013fbf59bc9STejun Heo 1014a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1015fbf59bc9STejun Heo if (pos != chunk) { 1016a56dbddfSTejun Heo schedule_work(&pcpu_reclaim_work); 1017fbf59bc9STejun Heo break; 1018fbf59bc9STejun Heo } 1019fbf59bc9STejun Heo } 1020fbf59bc9STejun Heo 1021ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1022fbf59bc9STejun Heo } 1023fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1024fbf59bc9STejun Heo 1025fbf59bc9STejun Heo /** 10268d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 10278d408b4bSTejun Heo * @get_page_fn: callback to fetch page pointer 10288d408b4bSTejun Heo * @static_size: the size of static percpu area in bytes 1029edcb4639STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1030cafe8816STejun Heo * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1031*6074d5b0STejun Heo * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto 10328d408b4bSTejun Heo * @base_addr: mapped address, NULL for auto 10338d408b4bSTejun Heo * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary 1034fbf59bc9STejun Heo * 10358d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 10368d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 10378d408b4bSTejun Heo * setup path. The first two parameters are mandatory. The rest are 10388d408b4bSTejun Heo * optional. 10398d408b4bSTejun Heo * 10408d408b4bSTejun Heo * @get_page_fn() should return pointer to percpu page given cpu 10418d408b4bSTejun Heo * number and page number. It should at least return enough pages to 10428d408b4bSTejun Heo * cover the static area. The returned pages for static area should 10438d408b4bSTejun Heo * have been initialized with valid data. If @unit_size is specified, 10448d408b4bSTejun Heo * it can also return pages after the static area. NULL return 10458d408b4bSTejun Heo * indicates end of pages for the cpu. Note that @get_page_fn() must 10468d408b4bSTejun Heo * return the same number of pages for all cpus. 10478d408b4bSTejun Heo * 1048edcb4639STejun Heo * @reserved_size, if non-zero, specifies the amount of bytes to 1049edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1050edcb4639STejun Heo * the first chunk such that it's available only through reserved 1051edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1052edcb4639STejun Heo * static areas on architectures where the addressing model has 1053edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1054edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1055edcb4639STejun Heo * 1056*6074d5b0STejun Heo * @dyn_size, if non-negative, determines the number of bytes 1057*6074d5b0STejun Heo * available for dynamic allocation in the first chunk. Specifying 1058*6074d5b0STejun Heo * non-negative value makes percpu leave alone the area beyond 1059*6074d5b0STejun Heo * @static_size + @reserved_size + @dyn_size. 1060*6074d5b0STejun Heo * 1061cafe8816STejun Heo * @unit_size, if non-negative, specifies unit size and must be 1062cafe8816STejun Heo * aligned to PAGE_SIZE and equal to or larger than @static_size + 1063*6074d5b0STejun Heo * @reserved_size + if non-negative, @dyn_size. 10648d408b4bSTejun Heo * 10658d408b4bSTejun Heo * Non-null @base_addr means that the caller already allocated virtual 10668d408b4bSTejun Heo * region for the first chunk and mapped it. percpu must not mess 10678d408b4bSTejun Heo * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL 10688d408b4bSTejun Heo * @populate_pte_fn doesn't make any sense. 10698d408b4bSTejun Heo * 10708d408b4bSTejun Heo * @populate_pte_fn is used to populate the pagetable. NULL means the 10718d408b4bSTejun Heo * caller already populated the pagetable. 1072fbf59bc9STejun Heo * 1073edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1074edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1075edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1076edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1077edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1078edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1079edcb4639STejun Heo * 1080fbf59bc9STejun Heo * RETURNS: 1081fbf59bc9STejun Heo * The determined pcpu_unit_size which can be used to initialize 1082fbf59bc9STejun Heo * percpu access. 1083fbf59bc9STejun Heo */ 10848d408b4bSTejun Heo size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 1085edcb4639STejun Heo size_t static_size, size_t reserved_size, 1086*6074d5b0STejun Heo ssize_t dyn_size, ssize_t unit_size, 1087cafe8816STejun Heo void *base_addr, 10888d408b4bSTejun Heo pcpu_populate_pte_fn_t populate_pte_fn) 1089fbf59bc9STejun Heo { 10902441d15cSTejun Heo static struct vm_struct first_vm; 1091edcb4639STejun Heo static int smap[2], dmap[2]; 1092*6074d5b0STejun Heo size_t size_sum = static_size + reserved_size + 1093*6074d5b0STejun Heo (dyn_size >= 0 ? dyn_size : 0); 1094edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 1095fbf59bc9STejun Heo unsigned int cpu; 10968d408b4bSTejun Heo int nr_pages; 1097fbf59bc9STejun Heo int err, i; 1098fbf59bc9STejun Heo 10998d408b4bSTejun Heo /* santiy checks */ 1100edcb4639STejun Heo BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1101edcb4639STejun Heo ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 11028d408b4bSTejun Heo BUG_ON(!static_size); 1103cafe8816STejun Heo if (unit_size >= 0) { 1104*6074d5b0STejun Heo BUG_ON(unit_size < size_sum); 11058d408b4bSTejun Heo BUG_ON(unit_size & ~PAGE_MASK); 1106*6074d5b0STejun Heo BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE); 1107*6074d5b0STejun Heo } else 1108cafe8816STejun Heo BUG_ON(base_addr); 11098d408b4bSTejun Heo BUG_ON(base_addr && populate_pte_fn); 1110fbf59bc9STejun Heo 1111cafe8816STejun Heo if (unit_size >= 0) 11128d408b4bSTejun Heo pcpu_unit_pages = unit_size >> PAGE_SHIFT; 11138d408b4bSTejun Heo else 11148d408b4bSTejun Heo pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, 1115*6074d5b0STejun Heo PFN_UP(size_sum)); 11168d408b4bSTejun Heo 1117d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1118fbf59bc9STejun Heo pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; 1119fbf59bc9STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) 1120cb83b42eSTejun Heo + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); 1121fbf59bc9STejun Heo 1122cafe8816STejun Heo if (dyn_size < 0) 1123edcb4639STejun Heo dyn_size = pcpu_unit_size - static_size - reserved_size; 1124cafe8816STejun Heo 1125d9b55eebSTejun Heo /* 1126d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1127d9b55eebSTejun Heo * empty chunks. 1128d9b55eebSTejun Heo */ 1129d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1130fbf59bc9STejun Heo pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1131fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1132fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1133fbf59bc9STejun Heo 1134edcb4639STejun Heo /* 1135edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1136edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1137edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1138edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1139edcb4639STejun Heo * static percpu allocation). 1140edcb4639STejun Heo */ 11412441d15cSTejun Heo schunk = alloc_bootmem(pcpu_chunk_struct_size); 11422441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 11432441d15cSTejun Heo schunk->vm = &first_vm; 114461ace7faSTejun Heo schunk->map = smap; 114561ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 11463e24aa58STejun Heo schunk->page = schunk->page_ar; 1147edcb4639STejun Heo 1148edcb4639STejun Heo if (reserved_size) { 1149edcb4639STejun Heo schunk->free_size = reserved_size; 1150edcb4639STejun Heo pcpu_reserved_chunk = schunk; /* not for dynamic alloc */ 1151edcb4639STejun Heo } else { 11522441d15cSTejun Heo schunk->free_size = dyn_size; 1153edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1154edcb4639STejun Heo } 11552441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1156fbf59bc9STejun Heo 115761ace7faSTejun Heo schunk->map[schunk->map_used++] = -static_size; 115861ace7faSTejun Heo if (schunk->free_size) 115961ace7faSTejun Heo schunk->map[schunk->map_used++] = schunk->free_size; 116061ace7faSTejun Heo 1161edcb4639STejun Heo pcpu_reserved_chunk_limit = static_size + schunk->free_size; 1162edcb4639STejun Heo 1163edcb4639STejun Heo /* init dynamic chunk if necessary */ 1164edcb4639STejun Heo if (dyn_size) { 1165edcb4639STejun Heo dchunk = alloc_bootmem(sizeof(struct pcpu_chunk)); 1166edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 1167edcb4639STejun Heo dchunk->vm = &first_vm; 1168edcb4639STejun Heo dchunk->map = dmap; 1169edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 1170edcb4639STejun Heo dchunk->page = schunk->page_ar; /* share page map with schunk */ 1171edcb4639STejun Heo 1172edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1173edcb4639STejun Heo dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1174edcb4639STejun Heo dchunk->map[dchunk->map_used++] = dchunk->free_size; 1175edcb4639STejun Heo } 1176edcb4639STejun Heo 11778d408b4bSTejun Heo /* allocate vm address */ 11782441d15cSTejun Heo first_vm.flags = VM_ALLOC; 11792441d15cSTejun Heo first_vm.size = pcpu_chunk_size; 11808d408b4bSTejun Heo 11818d408b4bSTejun Heo if (!base_addr) 11822441d15cSTejun Heo vm_area_register_early(&first_vm, PAGE_SIZE); 11838d408b4bSTejun Heo else { 11848d408b4bSTejun Heo /* 11858d408b4bSTejun Heo * Pages already mapped. No need to remap into 1186edcb4639STejun Heo * vmalloc area. In this case the first chunks can't 1187edcb4639STejun Heo * be mapped or unmapped by percpu and are marked 11888d408b4bSTejun Heo * immutable. 11898d408b4bSTejun Heo */ 11902441d15cSTejun Heo first_vm.addr = base_addr; 11912441d15cSTejun Heo schunk->immutable = true; 1192edcb4639STejun Heo if (dchunk) 1193edcb4639STejun Heo dchunk->immutable = true; 1194fbf59bc9STejun Heo } 1195fbf59bc9STejun Heo 11968d408b4bSTejun Heo /* assign pages */ 11978d408b4bSTejun Heo nr_pages = -1; 11988d408b4bSTejun Heo for_each_possible_cpu(cpu) { 11998d408b4bSTejun Heo for (i = 0; i < pcpu_unit_pages; i++) { 12008d408b4bSTejun Heo struct page *page = get_page_fn(cpu, i); 12018d408b4bSTejun Heo 12028d408b4bSTejun Heo if (!page) 12038d408b4bSTejun Heo break; 12042441d15cSTejun Heo *pcpu_chunk_pagep(schunk, cpu, i) = page; 12058d408b4bSTejun Heo } 12068d408b4bSTejun Heo 120761ace7faSTejun Heo BUG_ON(i < PFN_UP(static_size)); 12088d408b4bSTejun Heo 12098d408b4bSTejun Heo if (nr_pages < 0) 12108d408b4bSTejun Heo nr_pages = i; 12118d408b4bSTejun Heo else 12128d408b4bSTejun Heo BUG_ON(nr_pages != i); 12138d408b4bSTejun Heo } 12148d408b4bSTejun Heo 12158d408b4bSTejun Heo /* map them */ 12168d408b4bSTejun Heo if (populate_pte_fn) { 12178d408b4bSTejun Heo for_each_possible_cpu(cpu) 12188d408b4bSTejun Heo for (i = 0; i < nr_pages; i++) 12192441d15cSTejun Heo populate_pte_fn(pcpu_chunk_addr(schunk, 12208d408b4bSTejun Heo cpu, i)); 12218d408b4bSTejun Heo 12222441d15cSTejun Heo err = pcpu_map(schunk, 0, nr_pages); 1223fbf59bc9STejun Heo if (err) 12248d408b4bSTejun Heo panic("failed to setup static percpu area, err=%d\n", 12258d408b4bSTejun Heo err); 12268d408b4bSTejun Heo } 1227fbf59bc9STejun Heo 12282441d15cSTejun Heo /* link the first chunk in */ 1229edcb4639STejun Heo if (!dchunk) { 12302441d15cSTejun Heo pcpu_chunk_relocate(schunk, -1); 12312441d15cSTejun Heo pcpu_chunk_addr_insert(schunk); 1232edcb4639STejun Heo } else { 1233edcb4639STejun Heo pcpu_chunk_relocate(dchunk, -1); 1234edcb4639STejun Heo pcpu_chunk_addr_insert(dchunk); 1235edcb4639STejun Heo } 1236fbf59bc9STejun Heo 1237fbf59bc9STejun Heo /* we're done */ 12382441d15cSTejun Heo pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); 1239fbf59bc9STejun Heo return pcpu_unit_size; 1240fbf59bc9STejun Heo } 1241