1fbf59bc9STejun Heo /* 2fbf59bc9STejun Heo * linux/mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 10fbf59bc9STejun Heo * areas. Percpu areas are allocated in chunks in vmalloc area. Each 11fbf59bc9STejun Heo * chunk is consisted of num_possible_cpus() units and the first chunk 12fbf59bc9STejun Heo * is used for static percpu variables in the kernel image (special 13fbf59bc9STejun Heo * boot time alloc/init handling necessary as these areas need to be 14fbf59bc9STejun Heo * brought up before allocation services are running). Unit grows as 15fbf59bc9STejun Heo * necessary and all units grow or shrink in unison. When a chunk is 16fbf59bc9STejun Heo * filled up, another chunk is allocated. ie. in vmalloc area 17fbf59bc9STejun Heo * 18fbf59bc9STejun Heo * c0 c1 c2 19fbf59bc9STejun Heo * ------------------- ------------------- ------------ 20fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 22fbf59bc9STejun Heo * 23fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 24fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 25fbf59bc9STejun Heo * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring 26fbf59bc9STejun Heo * percpu base registers UNIT_SIZE apart. 27fbf59bc9STejun Heo * 28fbf59bc9STejun Heo * There are usually many small percpu allocations many of them as 29fbf59bc9STejun Heo * small as 4 bytes. The allocator organizes chunks into lists 30fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 31fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 32fbf59bc9STejun Heo * guaranteed to be eqaul to or larger than the maximum contiguous 33fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 34fbf59bc9STejun Heo * chunk maps unnecessarily. 35fbf59bc9STejun Heo * 36fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 37fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 38fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 39fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 40fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 41fbf59bc9STejun Heo * Chunks are also linked into a rb tree to ease address to chunk 42fbf59bc9STejun Heo * mapping during free. 43fbf59bc9STejun Heo * 44fbf59bc9STejun Heo * To use this allocator, arch code should do the followings. 45fbf59bc9STejun Heo * 46fbf59bc9STejun Heo * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 47fbf59bc9STejun Heo * 48fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49fbf59bc9STejun Heo * regular address to percpu pointer and back 50fbf59bc9STejun Heo * 518d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 528d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 53fbf59bc9STejun Heo */ 54fbf59bc9STejun Heo 55fbf59bc9STejun Heo #include <linux/bitmap.h> 56fbf59bc9STejun Heo #include <linux/bootmem.h> 57fbf59bc9STejun Heo #include <linux/list.h> 58fbf59bc9STejun Heo #include <linux/mm.h> 59fbf59bc9STejun Heo #include <linux/module.h> 60fbf59bc9STejun Heo #include <linux/mutex.h> 61fbf59bc9STejun Heo #include <linux/percpu.h> 62fbf59bc9STejun Heo #include <linux/pfn.h> 63fbf59bc9STejun Heo #include <linux/rbtree.h> 64fbf59bc9STejun Heo #include <linux/slab.h> 65*ccea34b5STejun Heo #include <linux/spinlock.h> 66fbf59bc9STejun Heo #include <linux/vmalloc.h> 67a56dbddfSTejun Heo #include <linux/workqueue.h> 68fbf59bc9STejun Heo 69fbf59bc9STejun Heo #include <asm/cacheflush.h> 70fbf59bc9STejun Heo #include <asm/tlbflush.h> 71fbf59bc9STejun Heo 72fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 73fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 74fbf59bc9STejun Heo 75fbf59bc9STejun Heo struct pcpu_chunk { 76fbf59bc9STejun Heo struct list_head list; /* linked to pcpu_slot lists */ 77fbf59bc9STejun Heo struct rb_node rb_node; /* key is chunk->vm->addr */ 78fbf59bc9STejun Heo int free_size; /* free bytes in the chunk */ 79fbf59bc9STejun Heo int contig_hint; /* max contiguous size hint */ 80fbf59bc9STejun Heo struct vm_struct *vm; /* mapped vmalloc region */ 81fbf59bc9STejun Heo int map_used; /* # of map entries used */ 82fbf59bc9STejun Heo int map_alloc; /* # of map entries allocated */ 83fbf59bc9STejun Heo int *map; /* allocation map */ 848d408b4bSTejun Heo bool immutable; /* no [de]population allowed */ 853e24aa58STejun Heo struct page **page; /* points to page array */ 863e24aa58STejun Heo struct page *page_ar[]; /* #cpus * UNIT_PAGES */ 87fbf59bc9STejun Heo }; 88fbf59bc9STejun Heo 8940150d37STejun Heo static int pcpu_unit_pages __read_mostly; 9040150d37STejun Heo static int pcpu_unit_size __read_mostly; 9140150d37STejun Heo static int pcpu_chunk_size __read_mostly; 9240150d37STejun Heo static int pcpu_nr_slots __read_mostly; 9340150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly; 94fbf59bc9STejun Heo 95fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 9640150d37STejun Heo void *pcpu_base_addr __read_mostly; 97fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 98fbf59bc9STejun Heo 99edcb4639STejun Heo /* optional reserved chunk, only accessible for reserved allocations */ 100edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk; 101edcb4639STejun Heo /* offset limit of the reserved chunk */ 102edcb4639STejun Heo static int pcpu_reserved_chunk_limit; 103edcb4639STejun Heo 104fbf59bc9STejun Heo /* 105*ccea34b5STejun Heo * Synchronization rules. 106fbf59bc9STejun Heo * 107*ccea34b5STejun Heo * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 108*ccea34b5STejun Heo * protects allocation/reclaim paths, chunks and chunk->page arrays. 109*ccea34b5STejun Heo * The latter is a spinlock and protects the index data structures - 110*ccea34b5STejun Heo * chunk slots, rbtree, chunks and area maps in chunks. 111fbf59bc9STejun Heo * 112*ccea34b5STejun Heo * During allocation, pcpu_alloc_mutex is kept locked all the time and 113*ccea34b5STejun Heo * pcpu_lock is grabbed and released as necessary. All actual memory 114*ccea34b5STejun Heo * allocations are done using GFP_KERNEL with pcpu_lock released. 115*ccea34b5STejun Heo * 116*ccea34b5STejun Heo * Free path accesses and alters only the index data structures, so it 117*ccea34b5STejun Heo * can be safely called from atomic context. When memory needs to be 118*ccea34b5STejun Heo * returned to the system, free path schedules reclaim_work which 119*ccea34b5STejun Heo * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 120*ccea34b5STejun Heo * reclaimed, release both locks and frees the chunks. Note that it's 121*ccea34b5STejun Heo * necessary to grab both locks to remove a chunk from circulation as 122*ccea34b5STejun Heo * allocation path might be referencing the chunk with only 123*ccea34b5STejun Heo * pcpu_alloc_mutex locked. 124fbf59bc9STejun Heo */ 125*ccea34b5STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 126*ccea34b5STejun Heo static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 127fbf59bc9STejun Heo 12840150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 129fbf59bc9STejun Heo static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ 130fbf59bc9STejun Heo 131a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */ 132a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work); 133a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 134a56dbddfSTejun Heo 135d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 136fbf59bc9STejun Heo { 137cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 138fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 139fbf59bc9STejun Heo } 140fbf59bc9STejun Heo 141d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 142d9b55eebSTejun Heo { 143d9b55eebSTejun Heo if (size == pcpu_unit_size) 144d9b55eebSTejun Heo return pcpu_nr_slots - 1; 145d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 146d9b55eebSTejun Heo } 147d9b55eebSTejun Heo 148fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 149fbf59bc9STejun Heo { 150fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 151fbf59bc9STejun Heo return 0; 152fbf59bc9STejun Heo 153fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 154fbf59bc9STejun Heo } 155fbf59bc9STejun Heo 156fbf59bc9STejun Heo static int pcpu_page_idx(unsigned int cpu, int page_idx) 157fbf59bc9STejun Heo { 158d9b55eebSTejun Heo return cpu * pcpu_unit_pages + page_idx; 159fbf59bc9STejun Heo } 160fbf59bc9STejun Heo 161fbf59bc9STejun Heo static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk, 162fbf59bc9STejun Heo unsigned int cpu, int page_idx) 163fbf59bc9STejun Heo { 164fbf59bc9STejun Heo return &chunk->page[pcpu_page_idx(cpu, page_idx)]; 165fbf59bc9STejun Heo } 166fbf59bc9STejun Heo 167fbf59bc9STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 168fbf59bc9STejun Heo unsigned int cpu, int page_idx) 169fbf59bc9STejun Heo { 170fbf59bc9STejun Heo return (unsigned long)chunk->vm->addr + 171fbf59bc9STejun Heo (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); 172fbf59bc9STejun Heo } 173fbf59bc9STejun Heo 174fbf59bc9STejun Heo static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, 175fbf59bc9STejun Heo int page_idx) 176fbf59bc9STejun Heo { 177fbf59bc9STejun Heo return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; 178fbf59bc9STejun Heo } 179fbf59bc9STejun Heo 180fbf59bc9STejun Heo /** 1811880d93bSTejun Heo * pcpu_mem_alloc - allocate memory 1821880d93bSTejun Heo * @size: bytes to allocate 183fbf59bc9STejun Heo * 1841880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 1851880d93bSTejun Heo * kzalloc() is used; otherwise, vmalloc() is used. The returned 1861880d93bSTejun Heo * memory is always zeroed. 187fbf59bc9STejun Heo * 188*ccea34b5STejun Heo * CONTEXT: 189*ccea34b5STejun Heo * Does GFP_KERNEL allocation. 190*ccea34b5STejun Heo * 191fbf59bc9STejun Heo * RETURNS: 1921880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 193fbf59bc9STejun Heo */ 1941880d93bSTejun Heo static void *pcpu_mem_alloc(size_t size) 195fbf59bc9STejun Heo { 196fbf59bc9STejun Heo if (size <= PAGE_SIZE) 1971880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 1981880d93bSTejun Heo else { 1991880d93bSTejun Heo void *ptr = vmalloc(size); 2001880d93bSTejun Heo if (ptr) 2011880d93bSTejun Heo memset(ptr, 0, size); 2021880d93bSTejun Heo return ptr; 2031880d93bSTejun Heo } 2041880d93bSTejun Heo } 205fbf59bc9STejun Heo 2061880d93bSTejun Heo /** 2071880d93bSTejun Heo * pcpu_mem_free - free memory 2081880d93bSTejun Heo * @ptr: memory to free 2091880d93bSTejun Heo * @size: size of the area 2101880d93bSTejun Heo * 2111880d93bSTejun Heo * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 2121880d93bSTejun Heo */ 2131880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size) 2141880d93bSTejun Heo { 2151880d93bSTejun Heo if (size <= PAGE_SIZE) 2161880d93bSTejun Heo kfree(ptr); 2171880d93bSTejun Heo else 2181880d93bSTejun Heo vfree(ptr); 219fbf59bc9STejun Heo } 220fbf59bc9STejun Heo 221fbf59bc9STejun Heo /** 222fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 223fbf59bc9STejun Heo * @chunk: chunk of interest 224fbf59bc9STejun Heo * @oslot: the previous slot it was on 225fbf59bc9STejun Heo * 226fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 227fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 228edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 229edcb4639STejun Heo * chunk slots. 230*ccea34b5STejun Heo * 231*ccea34b5STejun Heo * CONTEXT: 232*ccea34b5STejun Heo * pcpu_lock. 233fbf59bc9STejun Heo */ 234fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 235fbf59bc9STejun Heo { 236fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 237fbf59bc9STejun Heo 238edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 239fbf59bc9STejun Heo if (oslot < nslot) 240fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 241fbf59bc9STejun Heo else 242fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 243fbf59bc9STejun Heo } 244fbf59bc9STejun Heo } 245fbf59bc9STejun Heo 246fbf59bc9STejun Heo static struct rb_node **pcpu_chunk_rb_search(void *addr, 247fbf59bc9STejun Heo struct rb_node **parentp) 248fbf59bc9STejun Heo { 249fbf59bc9STejun Heo struct rb_node **p = &pcpu_addr_root.rb_node; 250fbf59bc9STejun Heo struct rb_node *parent = NULL; 251fbf59bc9STejun Heo struct pcpu_chunk *chunk; 252fbf59bc9STejun Heo 253fbf59bc9STejun Heo while (*p) { 254fbf59bc9STejun Heo parent = *p; 255fbf59bc9STejun Heo chunk = rb_entry(parent, struct pcpu_chunk, rb_node); 256fbf59bc9STejun Heo 257fbf59bc9STejun Heo if (addr < chunk->vm->addr) 258fbf59bc9STejun Heo p = &(*p)->rb_left; 259fbf59bc9STejun Heo else if (addr > chunk->vm->addr) 260fbf59bc9STejun Heo p = &(*p)->rb_right; 261fbf59bc9STejun Heo else 262fbf59bc9STejun Heo break; 263fbf59bc9STejun Heo } 264fbf59bc9STejun Heo 265fbf59bc9STejun Heo if (parentp) 266fbf59bc9STejun Heo *parentp = parent; 267fbf59bc9STejun Heo return p; 268fbf59bc9STejun Heo } 269fbf59bc9STejun Heo 270fbf59bc9STejun Heo /** 271fbf59bc9STejun Heo * pcpu_chunk_addr_search - search for chunk containing specified address 272fbf59bc9STejun Heo * @addr: address to search for 273fbf59bc9STejun Heo * 274fbf59bc9STejun Heo * Look for chunk which might contain @addr. More specifically, it 275fbf59bc9STejun Heo * searchs for the chunk with the highest start address which isn't 276fbf59bc9STejun Heo * beyond @addr. 277fbf59bc9STejun Heo * 278*ccea34b5STejun Heo * CONTEXT: 279*ccea34b5STejun Heo * pcpu_lock. 280*ccea34b5STejun Heo * 281fbf59bc9STejun Heo * RETURNS: 282fbf59bc9STejun Heo * The address of the found chunk. 283fbf59bc9STejun Heo */ 284fbf59bc9STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 285fbf59bc9STejun Heo { 286fbf59bc9STejun Heo struct rb_node *n, *parent; 287fbf59bc9STejun Heo struct pcpu_chunk *chunk; 288fbf59bc9STejun Heo 289edcb4639STejun Heo /* is it in the reserved chunk? */ 290edcb4639STejun Heo if (pcpu_reserved_chunk) { 291edcb4639STejun Heo void *start = pcpu_reserved_chunk->vm->addr; 292edcb4639STejun Heo 293edcb4639STejun Heo if (addr >= start && addr < start + pcpu_reserved_chunk_limit) 294edcb4639STejun Heo return pcpu_reserved_chunk; 295edcb4639STejun Heo } 296edcb4639STejun Heo 297edcb4639STejun Heo /* nah... search the regular ones */ 298fbf59bc9STejun Heo n = *pcpu_chunk_rb_search(addr, &parent); 299fbf59bc9STejun Heo if (!n) { 300fbf59bc9STejun Heo /* no exactly matching chunk, the parent is the closest */ 301fbf59bc9STejun Heo n = parent; 302fbf59bc9STejun Heo BUG_ON(!n); 303fbf59bc9STejun Heo } 304fbf59bc9STejun Heo chunk = rb_entry(n, struct pcpu_chunk, rb_node); 305fbf59bc9STejun Heo 306fbf59bc9STejun Heo if (addr < chunk->vm->addr) { 307fbf59bc9STejun Heo /* the parent was the next one, look for the previous one */ 308fbf59bc9STejun Heo n = rb_prev(n); 309fbf59bc9STejun Heo BUG_ON(!n); 310fbf59bc9STejun Heo chunk = rb_entry(n, struct pcpu_chunk, rb_node); 311fbf59bc9STejun Heo } 312fbf59bc9STejun Heo 313fbf59bc9STejun Heo return chunk; 314fbf59bc9STejun Heo } 315fbf59bc9STejun Heo 316fbf59bc9STejun Heo /** 317fbf59bc9STejun Heo * pcpu_chunk_addr_insert - insert chunk into address rb tree 318fbf59bc9STejun Heo * @new: chunk to insert 319fbf59bc9STejun Heo * 320fbf59bc9STejun Heo * Insert @new into address rb tree. 321*ccea34b5STejun Heo * 322*ccea34b5STejun Heo * CONTEXT: 323*ccea34b5STejun Heo * pcpu_lock. 324fbf59bc9STejun Heo */ 325fbf59bc9STejun Heo static void pcpu_chunk_addr_insert(struct pcpu_chunk *new) 326fbf59bc9STejun Heo { 327fbf59bc9STejun Heo struct rb_node **p, *parent; 328fbf59bc9STejun Heo 329fbf59bc9STejun Heo p = pcpu_chunk_rb_search(new->vm->addr, &parent); 330fbf59bc9STejun Heo BUG_ON(*p); 331fbf59bc9STejun Heo rb_link_node(&new->rb_node, parent, p); 332fbf59bc9STejun Heo rb_insert_color(&new->rb_node, &pcpu_addr_root); 333fbf59bc9STejun Heo } 334fbf59bc9STejun Heo 335fbf59bc9STejun Heo /** 3369f7dcf22STejun Heo * pcpu_extend_area_map - extend area map for allocation 3379f7dcf22STejun Heo * @chunk: target chunk 3389f7dcf22STejun Heo * 3399f7dcf22STejun Heo * Extend area map of @chunk so that it can accomodate an allocation. 3409f7dcf22STejun Heo * A single allocation can split an area into three areas, so this 3419f7dcf22STejun Heo * function makes sure that @chunk->map has at least two extra slots. 3429f7dcf22STejun Heo * 343*ccea34b5STejun Heo * CONTEXT: 344*ccea34b5STejun Heo * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 345*ccea34b5STejun Heo * if area map is extended. 346*ccea34b5STejun Heo * 3479f7dcf22STejun Heo * RETURNS: 3489f7dcf22STejun Heo * 0 if noop, 1 if successfully extended, -errno on failure. 3499f7dcf22STejun Heo */ 3509f7dcf22STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk) 3519f7dcf22STejun Heo { 3529f7dcf22STejun Heo int new_alloc; 3539f7dcf22STejun Heo int *new; 3549f7dcf22STejun Heo size_t size; 3559f7dcf22STejun Heo 3569f7dcf22STejun Heo /* has enough? */ 3579f7dcf22STejun Heo if (chunk->map_alloc >= chunk->map_used + 2) 3589f7dcf22STejun Heo return 0; 3599f7dcf22STejun Heo 360*ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 361*ccea34b5STejun Heo 3629f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 3639f7dcf22STejun Heo while (new_alloc < chunk->map_used + 2) 3649f7dcf22STejun Heo new_alloc *= 2; 3659f7dcf22STejun Heo 3669f7dcf22STejun Heo new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 367*ccea34b5STejun Heo if (!new) { 368*ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 3699f7dcf22STejun Heo return -ENOMEM; 370*ccea34b5STejun Heo } 371*ccea34b5STejun Heo 372*ccea34b5STejun Heo /* 373*ccea34b5STejun Heo * Acquire pcpu_lock and switch to new area map. Only free 374*ccea34b5STejun Heo * could have happened inbetween, so map_used couldn't have 375*ccea34b5STejun Heo * grown. 376*ccea34b5STejun Heo */ 377*ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 378*ccea34b5STejun Heo BUG_ON(new_alloc < chunk->map_used + 2); 3799f7dcf22STejun Heo 3809f7dcf22STejun Heo size = chunk->map_alloc * sizeof(chunk->map[0]); 3819f7dcf22STejun Heo memcpy(new, chunk->map, size); 3829f7dcf22STejun Heo 3839f7dcf22STejun Heo /* 3849f7dcf22STejun Heo * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 3859f7dcf22STejun Heo * one of the first chunks and still using static map. 3869f7dcf22STejun Heo */ 3879f7dcf22STejun Heo if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 3889f7dcf22STejun Heo pcpu_mem_free(chunk->map, size); 3899f7dcf22STejun Heo 3909f7dcf22STejun Heo chunk->map_alloc = new_alloc; 3919f7dcf22STejun Heo chunk->map = new; 3929f7dcf22STejun Heo return 0; 3939f7dcf22STejun Heo } 3949f7dcf22STejun Heo 3959f7dcf22STejun Heo /** 396fbf59bc9STejun Heo * pcpu_split_block - split a map block 397fbf59bc9STejun Heo * @chunk: chunk of interest 398fbf59bc9STejun Heo * @i: index of map block to split 399cae3aeb8STejun Heo * @head: head size in bytes (can be 0) 400cae3aeb8STejun Heo * @tail: tail size in bytes (can be 0) 401fbf59bc9STejun Heo * 402fbf59bc9STejun Heo * Split the @i'th map block into two or three blocks. If @head is 403fbf59bc9STejun Heo * non-zero, @head bytes block is inserted before block @i moving it 404fbf59bc9STejun Heo * to @i+1 and reducing its size by @head bytes. 405fbf59bc9STejun Heo * 406fbf59bc9STejun Heo * If @tail is non-zero, the target block, which can be @i or @i+1 407fbf59bc9STejun Heo * depending on @head, is reduced by @tail bytes and @tail byte block 408fbf59bc9STejun Heo * is inserted after the target block. 409fbf59bc9STejun Heo * 4109f7dcf22STejun Heo * @chunk->map must have enough free slots to accomodate the split. 411*ccea34b5STejun Heo * 412*ccea34b5STejun Heo * CONTEXT: 413*ccea34b5STejun Heo * pcpu_lock. 414fbf59bc9STejun Heo */ 4159f7dcf22STejun Heo static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 4169f7dcf22STejun Heo int head, int tail) 417fbf59bc9STejun Heo { 418fbf59bc9STejun Heo int nr_extra = !!head + !!tail; 419fbf59bc9STejun Heo 4209f7dcf22STejun Heo BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 421fbf59bc9STejun Heo 4229f7dcf22STejun Heo /* insert new subblocks */ 423fbf59bc9STejun Heo memmove(&chunk->map[i + nr_extra], &chunk->map[i], 424fbf59bc9STejun Heo sizeof(chunk->map[0]) * (chunk->map_used - i)); 425fbf59bc9STejun Heo chunk->map_used += nr_extra; 426fbf59bc9STejun Heo 427fbf59bc9STejun Heo if (head) { 428fbf59bc9STejun Heo chunk->map[i + 1] = chunk->map[i] - head; 429fbf59bc9STejun Heo chunk->map[i++] = head; 430fbf59bc9STejun Heo } 431fbf59bc9STejun Heo if (tail) { 432fbf59bc9STejun Heo chunk->map[i++] -= tail; 433fbf59bc9STejun Heo chunk->map[i] = tail; 434fbf59bc9STejun Heo } 435fbf59bc9STejun Heo } 436fbf59bc9STejun Heo 437fbf59bc9STejun Heo /** 438fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 439fbf59bc9STejun Heo * @chunk: chunk of interest 440cae3aeb8STejun Heo * @size: wanted size in bytes 441fbf59bc9STejun Heo * @align: wanted align 442fbf59bc9STejun Heo * 443fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 444fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 445fbf59bc9STejun Heo * populate or map the area. 446fbf59bc9STejun Heo * 4479f7dcf22STejun Heo * @chunk->map must have at least two free slots. 4489f7dcf22STejun Heo * 449*ccea34b5STejun Heo * CONTEXT: 450*ccea34b5STejun Heo * pcpu_lock. 451*ccea34b5STejun Heo * 452fbf59bc9STejun Heo * RETURNS: 4539f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 4549f7dcf22STejun Heo * found. 455fbf59bc9STejun Heo */ 456fbf59bc9STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 457fbf59bc9STejun Heo { 458fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 459fbf59bc9STejun Heo int max_contig = 0; 460fbf59bc9STejun Heo int i, off; 461fbf59bc9STejun Heo 462fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 463fbf59bc9STejun Heo bool is_last = i + 1 == chunk->map_used; 464fbf59bc9STejun Heo int head, tail; 465fbf59bc9STejun Heo 466fbf59bc9STejun Heo /* extra for alignment requirement */ 467fbf59bc9STejun Heo head = ALIGN(off, align) - off; 468fbf59bc9STejun Heo BUG_ON(i == 0 && head != 0); 469fbf59bc9STejun Heo 470fbf59bc9STejun Heo if (chunk->map[i] < 0) 471fbf59bc9STejun Heo continue; 472fbf59bc9STejun Heo if (chunk->map[i] < head + size) { 473fbf59bc9STejun Heo max_contig = max(chunk->map[i], max_contig); 474fbf59bc9STejun Heo continue; 475fbf59bc9STejun Heo } 476fbf59bc9STejun Heo 477fbf59bc9STejun Heo /* 478fbf59bc9STejun Heo * If head is small or the previous block is free, 479fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 480fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 481fbf59bc9STejun Heo * uncommon for percpu allocations. 482fbf59bc9STejun Heo */ 483fbf59bc9STejun Heo if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 484fbf59bc9STejun Heo if (chunk->map[i - 1] > 0) 485fbf59bc9STejun Heo chunk->map[i - 1] += head; 486fbf59bc9STejun Heo else { 487fbf59bc9STejun Heo chunk->map[i - 1] -= head; 488fbf59bc9STejun Heo chunk->free_size -= head; 489fbf59bc9STejun Heo } 490fbf59bc9STejun Heo chunk->map[i] -= head; 491fbf59bc9STejun Heo off += head; 492fbf59bc9STejun Heo head = 0; 493fbf59bc9STejun Heo } 494fbf59bc9STejun Heo 495fbf59bc9STejun Heo /* if tail is small, just keep it around */ 496fbf59bc9STejun Heo tail = chunk->map[i] - head - size; 497fbf59bc9STejun Heo if (tail < sizeof(int)) 498fbf59bc9STejun Heo tail = 0; 499fbf59bc9STejun Heo 500fbf59bc9STejun Heo /* split if warranted */ 501fbf59bc9STejun Heo if (head || tail) { 5029f7dcf22STejun Heo pcpu_split_block(chunk, i, head, tail); 503fbf59bc9STejun Heo if (head) { 504fbf59bc9STejun Heo i++; 505fbf59bc9STejun Heo off += head; 506fbf59bc9STejun Heo max_contig = max(chunk->map[i - 1], max_contig); 507fbf59bc9STejun Heo } 508fbf59bc9STejun Heo if (tail) 509fbf59bc9STejun Heo max_contig = max(chunk->map[i + 1], max_contig); 510fbf59bc9STejun Heo } 511fbf59bc9STejun Heo 512fbf59bc9STejun Heo /* update hint and mark allocated */ 513fbf59bc9STejun Heo if (is_last) 514fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 515fbf59bc9STejun Heo else 516fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 517fbf59bc9STejun Heo max_contig); 518fbf59bc9STejun Heo 519fbf59bc9STejun Heo chunk->free_size -= chunk->map[i]; 520fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 521fbf59bc9STejun Heo 522fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 523fbf59bc9STejun Heo return off; 524fbf59bc9STejun Heo } 525fbf59bc9STejun Heo 526fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 527fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 528fbf59bc9STejun Heo 5299f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 5309f7dcf22STejun Heo return -1; 531fbf59bc9STejun Heo } 532fbf59bc9STejun Heo 533fbf59bc9STejun Heo /** 534fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 535fbf59bc9STejun Heo * @chunk: chunk of interest 536fbf59bc9STejun Heo * @freeme: offset of area to free 537fbf59bc9STejun Heo * 538fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 539fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 540fbf59bc9STejun Heo * the area. 541*ccea34b5STejun Heo * 542*ccea34b5STejun Heo * CONTEXT: 543*ccea34b5STejun Heo * pcpu_lock. 544fbf59bc9STejun Heo */ 545fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 546fbf59bc9STejun Heo { 547fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 548fbf59bc9STejun Heo int i, off; 549fbf59bc9STejun Heo 550fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 551fbf59bc9STejun Heo if (off == freeme) 552fbf59bc9STejun Heo break; 553fbf59bc9STejun Heo BUG_ON(off != freeme); 554fbf59bc9STejun Heo BUG_ON(chunk->map[i] > 0); 555fbf59bc9STejun Heo 556fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 557fbf59bc9STejun Heo chunk->free_size += chunk->map[i]; 558fbf59bc9STejun Heo 559fbf59bc9STejun Heo /* merge with previous? */ 560fbf59bc9STejun Heo if (i > 0 && chunk->map[i - 1] >= 0) { 561fbf59bc9STejun Heo chunk->map[i - 1] += chunk->map[i]; 562fbf59bc9STejun Heo chunk->map_used--; 563fbf59bc9STejun Heo memmove(&chunk->map[i], &chunk->map[i + 1], 564fbf59bc9STejun Heo (chunk->map_used - i) * sizeof(chunk->map[0])); 565fbf59bc9STejun Heo i--; 566fbf59bc9STejun Heo } 567fbf59bc9STejun Heo /* merge with next? */ 568fbf59bc9STejun Heo if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 569fbf59bc9STejun Heo chunk->map[i] += chunk->map[i + 1]; 570fbf59bc9STejun Heo chunk->map_used--; 571fbf59bc9STejun Heo memmove(&chunk->map[i + 1], &chunk->map[i + 2], 572fbf59bc9STejun Heo (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 573fbf59bc9STejun Heo } 574fbf59bc9STejun Heo 575fbf59bc9STejun Heo chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 576fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 577fbf59bc9STejun Heo } 578fbf59bc9STejun Heo 579fbf59bc9STejun Heo /** 580fbf59bc9STejun Heo * pcpu_unmap - unmap pages out of a pcpu_chunk 581fbf59bc9STejun Heo * @chunk: chunk of interest 582fbf59bc9STejun Heo * @page_start: page index of the first page to unmap 583fbf59bc9STejun Heo * @page_end: page index of the last page to unmap + 1 584fbf59bc9STejun Heo * @flush: whether to flush cache and tlb or not 585fbf59bc9STejun Heo * 586fbf59bc9STejun Heo * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 587fbf59bc9STejun Heo * If @flush is true, vcache is flushed before unmapping and tlb 588fbf59bc9STejun Heo * after. 589fbf59bc9STejun Heo */ 590fbf59bc9STejun Heo static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, 591fbf59bc9STejun Heo bool flush) 592fbf59bc9STejun Heo { 593fbf59bc9STejun Heo unsigned int last = num_possible_cpus() - 1; 594fbf59bc9STejun Heo unsigned int cpu; 595fbf59bc9STejun Heo 5968d408b4bSTejun Heo /* unmap must not be done on immutable chunk */ 5978d408b4bSTejun Heo WARN_ON(chunk->immutable); 5988d408b4bSTejun Heo 599fbf59bc9STejun Heo /* 600fbf59bc9STejun Heo * Each flushing trial can be very expensive, issue flush on 601fbf59bc9STejun Heo * the whole region at once rather than doing it for each cpu. 602fbf59bc9STejun Heo * This could be an overkill but is more scalable. 603fbf59bc9STejun Heo */ 604fbf59bc9STejun Heo if (flush) 605fbf59bc9STejun Heo flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), 606fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 607fbf59bc9STejun Heo 608fbf59bc9STejun Heo for_each_possible_cpu(cpu) 609fbf59bc9STejun Heo unmap_kernel_range_noflush( 610fbf59bc9STejun Heo pcpu_chunk_addr(chunk, cpu, page_start), 611fbf59bc9STejun Heo (page_end - page_start) << PAGE_SHIFT); 612fbf59bc9STejun Heo 613fbf59bc9STejun Heo /* ditto as flush_cache_vunmap() */ 614fbf59bc9STejun Heo if (flush) 615fbf59bc9STejun Heo flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), 616fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 617fbf59bc9STejun Heo } 618fbf59bc9STejun Heo 619fbf59bc9STejun Heo /** 620fbf59bc9STejun Heo * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 621fbf59bc9STejun Heo * @chunk: chunk to depopulate 622fbf59bc9STejun Heo * @off: offset to the area to depopulate 623cae3aeb8STejun Heo * @size: size of the area to depopulate in bytes 624fbf59bc9STejun Heo * @flush: whether to flush cache and tlb or not 625fbf59bc9STejun Heo * 626fbf59bc9STejun Heo * For each cpu, depopulate and unmap pages [@page_start,@page_end) 627fbf59bc9STejun Heo * from @chunk. If @flush is true, vcache is flushed before unmapping 628fbf59bc9STejun Heo * and tlb after. 629*ccea34b5STejun Heo * 630*ccea34b5STejun Heo * CONTEXT: 631*ccea34b5STejun Heo * pcpu_alloc_mutex. 632fbf59bc9STejun Heo */ 633cae3aeb8STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size, 634cae3aeb8STejun Heo bool flush) 635fbf59bc9STejun Heo { 636fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 637fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 638fbf59bc9STejun Heo int unmap_start = -1; 639fbf59bc9STejun Heo int uninitialized_var(unmap_end); 640fbf59bc9STejun Heo unsigned int cpu; 641fbf59bc9STejun Heo int i; 642fbf59bc9STejun Heo 643fbf59bc9STejun Heo for (i = page_start; i < page_end; i++) { 644fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 645fbf59bc9STejun Heo struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); 646fbf59bc9STejun Heo 647fbf59bc9STejun Heo if (!*pagep) 648fbf59bc9STejun Heo continue; 649fbf59bc9STejun Heo 650fbf59bc9STejun Heo __free_page(*pagep); 651fbf59bc9STejun Heo 652fbf59bc9STejun Heo /* 653fbf59bc9STejun Heo * If it's partial depopulation, it might get 654fbf59bc9STejun Heo * populated or depopulated again. Mark the 655fbf59bc9STejun Heo * page gone. 656fbf59bc9STejun Heo */ 657fbf59bc9STejun Heo *pagep = NULL; 658fbf59bc9STejun Heo 659fbf59bc9STejun Heo unmap_start = unmap_start < 0 ? i : unmap_start; 660fbf59bc9STejun Heo unmap_end = i + 1; 661fbf59bc9STejun Heo } 662fbf59bc9STejun Heo } 663fbf59bc9STejun Heo 664fbf59bc9STejun Heo if (unmap_start >= 0) 665fbf59bc9STejun Heo pcpu_unmap(chunk, unmap_start, unmap_end, flush); 666fbf59bc9STejun Heo } 667fbf59bc9STejun Heo 668fbf59bc9STejun Heo /** 669fbf59bc9STejun Heo * pcpu_map - map pages into a pcpu_chunk 670fbf59bc9STejun Heo * @chunk: chunk of interest 671fbf59bc9STejun Heo * @page_start: page index of the first page to map 672fbf59bc9STejun Heo * @page_end: page index of the last page to map + 1 673fbf59bc9STejun Heo * 674fbf59bc9STejun Heo * For each cpu, map pages [@page_start,@page_end) into @chunk. 675fbf59bc9STejun Heo * vcache is flushed afterwards. 676fbf59bc9STejun Heo */ 677fbf59bc9STejun Heo static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) 678fbf59bc9STejun Heo { 679fbf59bc9STejun Heo unsigned int last = num_possible_cpus() - 1; 680fbf59bc9STejun Heo unsigned int cpu; 681fbf59bc9STejun Heo int err; 682fbf59bc9STejun Heo 6838d408b4bSTejun Heo /* map must not be done on immutable chunk */ 6848d408b4bSTejun Heo WARN_ON(chunk->immutable); 6858d408b4bSTejun Heo 686fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 687fbf59bc9STejun Heo err = map_kernel_range_noflush( 688fbf59bc9STejun Heo pcpu_chunk_addr(chunk, cpu, page_start), 689fbf59bc9STejun Heo (page_end - page_start) << PAGE_SHIFT, 690fbf59bc9STejun Heo PAGE_KERNEL, 691fbf59bc9STejun Heo pcpu_chunk_pagep(chunk, cpu, page_start)); 692fbf59bc9STejun Heo if (err < 0) 693fbf59bc9STejun Heo return err; 694fbf59bc9STejun Heo } 695fbf59bc9STejun Heo 696fbf59bc9STejun Heo /* flush at once, please read comments in pcpu_unmap() */ 697fbf59bc9STejun Heo flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start), 698fbf59bc9STejun Heo pcpu_chunk_addr(chunk, last, page_end)); 699fbf59bc9STejun Heo return 0; 700fbf59bc9STejun Heo } 701fbf59bc9STejun Heo 702fbf59bc9STejun Heo /** 703fbf59bc9STejun Heo * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 704fbf59bc9STejun Heo * @chunk: chunk of interest 705fbf59bc9STejun Heo * @off: offset to the area to populate 706cae3aeb8STejun Heo * @size: size of the area to populate in bytes 707fbf59bc9STejun Heo * 708fbf59bc9STejun Heo * For each cpu, populate and map pages [@page_start,@page_end) into 709fbf59bc9STejun Heo * @chunk. The area is cleared on return. 710*ccea34b5STejun Heo * 711*ccea34b5STejun Heo * CONTEXT: 712*ccea34b5STejun Heo * pcpu_alloc_mutex, does GFP_KERNEL allocation. 713fbf59bc9STejun Heo */ 714fbf59bc9STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 715fbf59bc9STejun Heo { 716fbf59bc9STejun Heo const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 717fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 718fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 719fbf59bc9STejun Heo int map_start = -1; 72002d51fdfSTejun Heo int uninitialized_var(map_end); 721fbf59bc9STejun Heo unsigned int cpu; 722fbf59bc9STejun Heo int i; 723fbf59bc9STejun Heo 724fbf59bc9STejun Heo for (i = page_start; i < page_end; i++) { 725fbf59bc9STejun Heo if (pcpu_chunk_page_occupied(chunk, i)) { 726fbf59bc9STejun Heo if (map_start >= 0) { 727fbf59bc9STejun Heo if (pcpu_map(chunk, map_start, map_end)) 728fbf59bc9STejun Heo goto err; 729fbf59bc9STejun Heo map_start = -1; 730fbf59bc9STejun Heo } 731fbf59bc9STejun Heo continue; 732fbf59bc9STejun Heo } 733fbf59bc9STejun Heo 734fbf59bc9STejun Heo map_start = map_start < 0 ? i : map_start; 735fbf59bc9STejun Heo map_end = i + 1; 736fbf59bc9STejun Heo 737fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 738fbf59bc9STejun Heo struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); 739fbf59bc9STejun Heo 740fbf59bc9STejun Heo *pagep = alloc_pages_node(cpu_to_node(cpu), 741fbf59bc9STejun Heo alloc_mask, 0); 742fbf59bc9STejun Heo if (!*pagep) 743fbf59bc9STejun Heo goto err; 744fbf59bc9STejun Heo } 745fbf59bc9STejun Heo } 746fbf59bc9STejun Heo 747fbf59bc9STejun Heo if (map_start >= 0 && pcpu_map(chunk, map_start, map_end)) 748fbf59bc9STejun Heo goto err; 749fbf59bc9STejun Heo 750fbf59bc9STejun Heo for_each_possible_cpu(cpu) 751d9b55eebSTejun Heo memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0, 752fbf59bc9STejun Heo size); 753fbf59bc9STejun Heo 754fbf59bc9STejun Heo return 0; 755fbf59bc9STejun Heo err: 756fbf59bc9STejun Heo /* likely under heavy memory pressure, give memory back */ 757fbf59bc9STejun Heo pcpu_depopulate_chunk(chunk, off, size, true); 758fbf59bc9STejun Heo return -ENOMEM; 759fbf59bc9STejun Heo } 760fbf59bc9STejun Heo 761fbf59bc9STejun Heo static void free_pcpu_chunk(struct pcpu_chunk *chunk) 762fbf59bc9STejun Heo { 763fbf59bc9STejun Heo if (!chunk) 764fbf59bc9STejun Heo return; 765fbf59bc9STejun Heo if (chunk->vm) 766fbf59bc9STejun Heo free_vm_area(chunk->vm); 7671880d93bSTejun Heo pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 768fbf59bc9STejun Heo kfree(chunk); 769fbf59bc9STejun Heo } 770fbf59bc9STejun Heo 771fbf59bc9STejun Heo static struct pcpu_chunk *alloc_pcpu_chunk(void) 772fbf59bc9STejun Heo { 773fbf59bc9STejun Heo struct pcpu_chunk *chunk; 774fbf59bc9STejun Heo 775fbf59bc9STejun Heo chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 776fbf59bc9STejun Heo if (!chunk) 777fbf59bc9STejun Heo return NULL; 778fbf59bc9STejun Heo 7791880d93bSTejun Heo chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 780fbf59bc9STejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 781fbf59bc9STejun Heo chunk->map[chunk->map_used++] = pcpu_unit_size; 7823e24aa58STejun Heo chunk->page = chunk->page_ar; 783fbf59bc9STejun Heo 784fbf59bc9STejun Heo chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); 785fbf59bc9STejun Heo if (!chunk->vm) { 786fbf59bc9STejun Heo free_pcpu_chunk(chunk); 787fbf59bc9STejun Heo return NULL; 788fbf59bc9STejun Heo } 789fbf59bc9STejun Heo 790fbf59bc9STejun Heo INIT_LIST_HEAD(&chunk->list); 791fbf59bc9STejun Heo chunk->free_size = pcpu_unit_size; 792fbf59bc9STejun Heo chunk->contig_hint = pcpu_unit_size; 793fbf59bc9STejun Heo 794fbf59bc9STejun Heo return chunk; 795fbf59bc9STejun Heo } 796fbf59bc9STejun Heo 797fbf59bc9STejun Heo /** 798edcb4639STejun Heo * pcpu_alloc - the percpu allocator 799cae3aeb8STejun Heo * @size: size of area to allocate in bytes 800fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 801edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 802fbf59bc9STejun Heo * 803*ccea34b5STejun Heo * Allocate percpu area of @size bytes aligned at @align. 804*ccea34b5STejun Heo * 805*ccea34b5STejun Heo * CONTEXT: 806*ccea34b5STejun Heo * Does GFP_KERNEL allocation. 807fbf59bc9STejun Heo * 808fbf59bc9STejun Heo * RETURNS: 809fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 810fbf59bc9STejun Heo */ 811edcb4639STejun Heo static void *pcpu_alloc(size_t size, size_t align, bool reserved) 812fbf59bc9STejun Heo { 813fbf59bc9STejun Heo struct pcpu_chunk *chunk; 814fbf59bc9STejun Heo int slot, off; 815fbf59bc9STejun Heo 8168d408b4bSTejun Heo if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 817fbf59bc9STejun Heo WARN(true, "illegal size (%zu) or align (%zu) for " 818fbf59bc9STejun Heo "percpu allocation\n", size, align); 819fbf59bc9STejun Heo return NULL; 820fbf59bc9STejun Heo } 821fbf59bc9STejun Heo 822*ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 823*ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 824fbf59bc9STejun Heo 825edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 826edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 827edcb4639STejun Heo chunk = pcpu_reserved_chunk; 8289f7dcf22STejun Heo if (size > chunk->contig_hint || 8299f7dcf22STejun Heo pcpu_extend_area_map(chunk) < 0) 830*ccea34b5STejun Heo goto fail_unlock; 831edcb4639STejun Heo off = pcpu_alloc_area(chunk, size, align); 832edcb4639STejun Heo if (off >= 0) 833edcb4639STejun Heo goto area_found; 834*ccea34b5STejun Heo goto fail_unlock; 835edcb4639STejun Heo } 836edcb4639STejun Heo 837*ccea34b5STejun Heo restart: 838edcb4639STejun Heo /* search through normal chunks */ 839fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 840fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 841fbf59bc9STejun Heo if (size > chunk->contig_hint) 842fbf59bc9STejun Heo continue; 843*ccea34b5STejun Heo 844*ccea34b5STejun Heo switch (pcpu_extend_area_map(chunk)) { 845*ccea34b5STejun Heo case 0: 846*ccea34b5STejun Heo break; 847*ccea34b5STejun Heo case 1: 848*ccea34b5STejun Heo goto restart; /* pcpu_lock dropped, restart */ 849*ccea34b5STejun Heo default: 850*ccea34b5STejun Heo goto fail_unlock; 851*ccea34b5STejun Heo } 852*ccea34b5STejun Heo 853fbf59bc9STejun Heo off = pcpu_alloc_area(chunk, size, align); 854fbf59bc9STejun Heo if (off >= 0) 855fbf59bc9STejun Heo goto area_found; 856fbf59bc9STejun Heo } 857fbf59bc9STejun Heo } 858fbf59bc9STejun Heo 859fbf59bc9STejun Heo /* hmmm... no space left, create a new chunk */ 860*ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 861*ccea34b5STejun Heo 862fbf59bc9STejun Heo chunk = alloc_pcpu_chunk(); 863fbf59bc9STejun Heo if (!chunk) 864*ccea34b5STejun Heo goto fail_unlock_mutex; 865*ccea34b5STejun Heo 866*ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 867fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 868fbf59bc9STejun Heo pcpu_chunk_addr_insert(chunk); 869*ccea34b5STejun Heo goto restart; 870fbf59bc9STejun Heo 871fbf59bc9STejun Heo area_found: 872*ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 873*ccea34b5STejun Heo 874fbf59bc9STejun Heo /* populate, map and clear the area */ 875fbf59bc9STejun Heo if (pcpu_populate_chunk(chunk, off, size)) { 876*ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 877fbf59bc9STejun Heo pcpu_free_area(chunk, off); 878*ccea34b5STejun Heo goto fail_unlock; 879fbf59bc9STejun Heo } 880fbf59bc9STejun Heo 881*ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 882*ccea34b5STejun Heo 883*ccea34b5STejun Heo return __addr_to_pcpu_ptr(chunk->vm->addr + off); 884*ccea34b5STejun Heo 885*ccea34b5STejun Heo fail_unlock: 886*ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 887*ccea34b5STejun Heo fail_unlock_mutex: 888*ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 889*ccea34b5STejun Heo return NULL; 890fbf59bc9STejun Heo } 891edcb4639STejun Heo 892edcb4639STejun Heo /** 893edcb4639STejun Heo * __alloc_percpu - allocate dynamic percpu area 894edcb4639STejun Heo * @size: size of area to allocate in bytes 895edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 896edcb4639STejun Heo * 897edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align. Might 898edcb4639STejun Heo * sleep. Might trigger writeouts. 899edcb4639STejun Heo * 900*ccea34b5STejun Heo * CONTEXT: 901*ccea34b5STejun Heo * Does GFP_KERNEL allocation. 902*ccea34b5STejun Heo * 903edcb4639STejun Heo * RETURNS: 904edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 905edcb4639STejun Heo */ 906edcb4639STejun Heo void *__alloc_percpu(size_t size, size_t align) 907edcb4639STejun Heo { 908edcb4639STejun Heo return pcpu_alloc(size, align, false); 909edcb4639STejun Heo } 910fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 911fbf59bc9STejun Heo 912edcb4639STejun Heo /** 913edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 914edcb4639STejun Heo * @size: size of area to allocate in bytes 915edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 916edcb4639STejun Heo * 917edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align from reserved 918edcb4639STejun Heo * percpu area if arch has set it up; otherwise, allocation is served 919edcb4639STejun Heo * from the same dynamic area. Might sleep. Might trigger writeouts. 920edcb4639STejun Heo * 921*ccea34b5STejun Heo * CONTEXT: 922*ccea34b5STejun Heo * Does GFP_KERNEL allocation. 923*ccea34b5STejun Heo * 924edcb4639STejun Heo * RETURNS: 925edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 926edcb4639STejun Heo */ 927edcb4639STejun Heo void *__alloc_reserved_percpu(size_t size, size_t align) 928edcb4639STejun Heo { 929edcb4639STejun Heo return pcpu_alloc(size, align, true); 930edcb4639STejun Heo } 931edcb4639STejun Heo 932a56dbddfSTejun Heo /** 933a56dbddfSTejun Heo * pcpu_reclaim - reclaim fully free chunks, workqueue function 934a56dbddfSTejun Heo * @work: unused 935a56dbddfSTejun Heo * 936a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 937*ccea34b5STejun Heo * 938*ccea34b5STejun Heo * CONTEXT: 939*ccea34b5STejun Heo * workqueue context. 940a56dbddfSTejun Heo */ 941a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work) 942fbf59bc9STejun Heo { 943a56dbddfSTejun Heo LIST_HEAD(todo); 944a56dbddfSTejun Heo struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 945a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 946a56dbddfSTejun Heo 947*ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 948*ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 949a56dbddfSTejun Heo 950a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, head, list) { 9518d408b4bSTejun Heo WARN_ON(chunk->immutable); 952a56dbddfSTejun Heo 953a56dbddfSTejun Heo /* spare the first one */ 954a56dbddfSTejun Heo if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 955a56dbddfSTejun Heo continue; 956a56dbddfSTejun Heo 957fbf59bc9STejun Heo rb_erase(&chunk->rb_node, &pcpu_addr_root); 958a56dbddfSTejun Heo list_move(&chunk->list, &todo); 959a56dbddfSTejun Heo } 960a56dbddfSTejun Heo 961*ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 962*ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 963a56dbddfSTejun Heo 964a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, &todo, list) { 965a56dbddfSTejun Heo pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); 966fbf59bc9STejun Heo free_pcpu_chunk(chunk); 967fbf59bc9STejun Heo } 968a56dbddfSTejun Heo } 969fbf59bc9STejun Heo 970fbf59bc9STejun Heo /** 971fbf59bc9STejun Heo * free_percpu - free percpu area 972fbf59bc9STejun Heo * @ptr: pointer to area to free 973fbf59bc9STejun Heo * 974*ccea34b5STejun Heo * Free percpu area @ptr. 975*ccea34b5STejun Heo * 976*ccea34b5STejun Heo * CONTEXT: 977*ccea34b5STejun Heo * Can be called from atomic context. 978fbf59bc9STejun Heo */ 979fbf59bc9STejun Heo void free_percpu(void *ptr) 980fbf59bc9STejun Heo { 981fbf59bc9STejun Heo void *addr = __pcpu_ptr_to_addr(ptr); 982fbf59bc9STejun Heo struct pcpu_chunk *chunk; 983*ccea34b5STejun Heo unsigned long flags; 984fbf59bc9STejun Heo int off; 985fbf59bc9STejun Heo 986fbf59bc9STejun Heo if (!ptr) 987fbf59bc9STejun Heo return; 988fbf59bc9STejun Heo 989*ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 990fbf59bc9STejun Heo 991fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 992fbf59bc9STejun Heo off = addr - chunk->vm->addr; 993fbf59bc9STejun Heo 994fbf59bc9STejun Heo pcpu_free_area(chunk, off); 995fbf59bc9STejun Heo 996a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 997fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 998fbf59bc9STejun Heo struct pcpu_chunk *pos; 999fbf59bc9STejun Heo 1000a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1001fbf59bc9STejun Heo if (pos != chunk) { 1002a56dbddfSTejun Heo schedule_work(&pcpu_reclaim_work); 1003fbf59bc9STejun Heo break; 1004fbf59bc9STejun Heo } 1005fbf59bc9STejun Heo } 1006fbf59bc9STejun Heo 1007*ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1008fbf59bc9STejun Heo } 1009fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1010fbf59bc9STejun Heo 1011fbf59bc9STejun Heo /** 10128d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 10138d408b4bSTejun Heo * @get_page_fn: callback to fetch page pointer 10148d408b4bSTejun Heo * @static_size: the size of static percpu area in bytes 1015edcb4639STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1016cafe8816STejun Heo * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto 1017cafe8816STejun Heo * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 10188d408b4bSTejun Heo * @base_addr: mapped address, NULL for auto 10198d408b4bSTejun Heo * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary 1020fbf59bc9STejun Heo * 10218d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 10228d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 10238d408b4bSTejun Heo * setup path. The first two parameters are mandatory. The rest are 10248d408b4bSTejun Heo * optional. 10258d408b4bSTejun Heo * 10268d408b4bSTejun Heo * @get_page_fn() should return pointer to percpu page given cpu 10278d408b4bSTejun Heo * number and page number. It should at least return enough pages to 10288d408b4bSTejun Heo * cover the static area. The returned pages for static area should 10298d408b4bSTejun Heo * have been initialized with valid data. If @unit_size is specified, 10308d408b4bSTejun Heo * it can also return pages after the static area. NULL return 10318d408b4bSTejun Heo * indicates end of pages for the cpu. Note that @get_page_fn() must 10328d408b4bSTejun Heo * return the same number of pages for all cpus. 10338d408b4bSTejun Heo * 1034edcb4639STejun Heo * @reserved_size, if non-zero, specifies the amount of bytes to 1035edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1036edcb4639STejun Heo * the first chunk such that it's available only through reserved 1037edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1038edcb4639STejun Heo * static areas on architectures where the addressing model has 1039edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1040edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1041edcb4639STejun Heo * 1042cafe8816STejun Heo * @unit_size, if non-negative, specifies unit size and must be 1043cafe8816STejun Heo * aligned to PAGE_SIZE and equal to or larger than @static_size + 1044edcb4639STejun Heo * @reserved_size + @dyn_size. 10458d408b4bSTejun Heo * 1046cafe8816STejun Heo * @dyn_size, if non-negative, limits the number of bytes available 1047cafe8816STejun Heo * for dynamic allocation in the first chunk. Specifying non-negative 1048cafe8816STejun Heo * value make percpu leave alone the area beyond @static_size + 1049edcb4639STejun Heo * @reserved_size + @dyn_size. 10508d408b4bSTejun Heo * 10518d408b4bSTejun Heo * Non-null @base_addr means that the caller already allocated virtual 10528d408b4bSTejun Heo * region for the first chunk and mapped it. percpu must not mess 10538d408b4bSTejun Heo * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL 10548d408b4bSTejun Heo * @populate_pte_fn doesn't make any sense. 10558d408b4bSTejun Heo * 10568d408b4bSTejun Heo * @populate_pte_fn is used to populate the pagetable. NULL means the 10578d408b4bSTejun Heo * caller already populated the pagetable. 1058fbf59bc9STejun Heo * 1059edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1060edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1061edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1062edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1063edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1064edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1065edcb4639STejun Heo * 1066fbf59bc9STejun Heo * RETURNS: 1067fbf59bc9STejun Heo * The determined pcpu_unit_size which can be used to initialize 1068fbf59bc9STejun Heo * percpu access. 1069fbf59bc9STejun Heo */ 10708d408b4bSTejun Heo size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 1071edcb4639STejun Heo size_t static_size, size_t reserved_size, 1072cafe8816STejun Heo ssize_t unit_size, ssize_t dyn_size, 1073cafe8816STejun Heo void *base_addr, 10748d408b4bSTejun Heo pcpu_populate_pte_fn_t populate_pte_fn) 1075fbf59bc9STejun Heo { 10762441d15cSTejun Heo static struct vm_struct first_vm; 1077edcb4639STejun Heo static int smap[2], dmap[2]; 1078edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 1079fbf59bc9STejun Heo unsigned int cpu; 10808d408b4bSTejun Heo int nr_pages; 1081fbf59bc9STejun Heo int err, i; 1082fbf59bc9STejun Heo 10838d408b4bSTejun Heo /* santiy checks */ 1084edcb4639STejun Heo BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1085edcb4639STejun Heo ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 10868d408b4bSTejun Heo BUG_ON(!static_size); 1087cafe8816STejun Heo if (unit_size >= 0) { 1088edcb4639STejun Heo BUG_ON(unit_size < static_size + reserved_size + 1089cafe8816STejun Heo (dyn_size >= 0 ? dyn_size : 0)); 10908d408b4bSTejun Heo BUG_ON(unit_size & ~PAGE_MASK); 1091cafe8816STejun Heo } else { 1092cafe8816STejun Heo BUG_ON(dyn_size >= 0); 1093cafe8816STejun Heo BUG_ON(base_addr); 1094cafe8816STejun Heo } 10958d408b4bSTejun Heo BUG_ON(base_addr && populate_pte_fn); 1096fbf59bc9STejun Heo 1097cafe8816STejun Heo if (unit_size >= 0) 10988d408b4bSTejun Heo pcpu_unit_pages = unit_size >> PAGE_SHIFT; 10998d408b4bSTejun Heo else 11008d408b4bSTejun Heo pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, 1101edcb4639STejun Heo PFN_UP(static_size + reserved_size)); 11028d408b4bSTejun Heo 1103d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1104fbf59bc9STejun Heo pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; 1105fbf59bc9STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) 1106cb83b42eSTejun Heo + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); 1107fbf59bc9STejun Heo 1108cafe8816STejun Heo if (dyn_size < 0) 1109edcb4639STejun Heo dyn_size = pcpu_unit_size - static_size - reserved_size; 1110cafe8816STejun Heo 1111d9b55eebSTejun Heo /* 1112d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1113d9b55eebSTejun Heo * empty chunks. 1114d9b55eebSTejun Heo */ 1115d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1116fbf59bc9STejun Heo pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1117fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1118fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1119fbf59bc9STejun Heo 1120edcb4639STejun Heo /* 1121edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1122edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1123edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1124edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1125edcb4639STejun Heo * static percpu allocation). 1126edcb4639STejun Heo */ 11272441d15cSTejun Heo schunk = alloc_bootmem(pcpu_chunk_struct_size); 11282441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 11292441d15cSTejun Heo schunk->vm = &first_vm; 113061ace7faSTejun Heo schunk->map = smap; 113161ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 11323e24aa58STejun Heo schunk->page = schunk->page_ar; 1133edcb4639STejun Heo 1134edcb4639STejun Heo if (reserved_size) { 1135edcb4639STejun Heo schunk->free_size = reserved_size; 1136edcb4639STejun Heo pcpu_reserved_chunk = schunk; /* not for dynamic alloc */ 1137edcb4639STejun Heo } else { 11382441d15cSTejun Heo schunk->free_size = dyn_size; 1139edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1140edcb4639STejun Heo } 11412441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1142fbf59bc9STejun Heo 114361ace7faSTejun Heo schunk->map[schunk->map_used++] = -static_size; 114461ace7faSTejun Heo if (schunk->free_size) 114561ace7faSTejun Heo schunk->map[schunk->map_used++] = schunk->free_size; 114661ace7faSTejun Heo 1147edcb4639STejun Heo pcpu_reserved_chunk_limit = static_size + schunk->free_size; 1148edcb4639STejun Heo 1149edcb4639STejun Heo /* init dynamic chunk if necessary */ 1150edcb4639STejun Heo if (dyn_size) { 1151edcb4639STejun Heo dchunk = alloc_bootmem(sizeof(struct pcpu_chunk)); 1152edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 1153edcb4639STejun Heo dchunk->vm = &first_vm; 1154edcb4639STejun Heo dchunk->map = dmap; 1155edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 1156edcb4639STejun Heo dchunk->page = schunk->page_ar; /* share page map with schunk */ 1157edcb4639STejun Heo 1158edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1159edcb4639STejun Heo dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1160edcb4639STejun Heo dchunk->map[dchunk->map_used++] = dchunk->free_size; 1161edcb4639STejun Heo } 1162edcb4639STejun Heo 11638d408b4bSTejun Heo /* allocate vm address */ 11642441d15cSTejun Heo first_vm.flags = VM_ALLOC; 11652441d15cSTejun Heo first_vm.size = pcpu_chunk_size; 11668d408b4bSTejun Heo 11678d408b4bSTejun Heo if (!base_addr) 11682441d15cSTejun Heo vm_area_register_early(&first_vm, PAGE_SIZE); 11698d408b4bSTejun Heo else { 11708d408b4bSTejun Heo /* 11718d408b4bSTejun Heo * Pages already mapped. No need to remap into 1172edcb4639STejun Heo * vmalloc area. In this case the first chunks can't 1173edcb4639STejun Heo * be mapped or unmapped by percpu and are marked 11748d408b4bSTejun Heo * immutable. 11758d408b4bSTejun Heo */ 11762441d15cSTejun Heo first_vm.addr = base_addr; 11772441d15cSTejun Heo schunk->immutable = true; 1178edcb4639STejun Heo if (dchunk) 1179edcb4639STejun Heo dchunk->immutable = true; 1180fbf59bc9STejun Heo } 1181fbf59bc9STejun Heo 11828d408b4bSTejun Heo /* assign pages */ 11838d408b4bSTejun Heo nr_pages = -1; 11848d408b4bSTejun Heo for_each_possible_cpu(cpu) { 11858d408b4bSTejun Heo for (i = 0; i < pcpu_unit_pages; i++) { 11868d408b4bSTejun Heo struct page *page = get_page_fn(cpu, i); 11878d408b4bSTejun Heo 11888d408b4bSTejun Heo if (!page) 11898d408b4bSTejun Heo break; 11902441d15cSTejun Heo *pcpu_chunk_pagep(schunk, cpu, i) = page; 11918d408b4bSTejun Heo } 11928d408b4bSTejun Heo 119361ace7faSTejun Heo BUG_ON(i < PFN_UP(static_size)); 11948d408b4bSTejun Heo 11958d408b4bSTejun Heo if (nr_pages < 0) 11968d408b4bSTejun Heo nr_pages = i; 11978d408b4bSTejun Heo else 11988d408b4bSTejun Heo BUG_ON(nr_pages != i); 11998d408b4bSTejun Heo } 12008d408b4bSTejun Heo 12018d408b4bSTejun Heo /* map them */ 12028d408b4bSTejun Heo if (populate_pte_fn) { 12038d408b4bSTejun Heo for_each_possible_cpu(cpu) 12048d408b4bSTejun Heo for (i = 0; i < nr_pages; i++) 12052441d15cSTejun Heo populate_pte_fn(pcpu_chunk_addr(schunk, 12068d408b4bSTejun Heo cpu, i)); 12078d408b4bSTejun Heo 12082441d15cSTejun Heo err = pcpu_map(schunk, 0, nr_pages); 1209fbf59bc9STejun Heo if (err) 12108d408b4bSTejun Heo panic("failed to setup static percpu area, err=%d\n", 12118d408b4bSTejun Heo err); 12128d408b4bSTejun Heo } 1213fbf59bc9STejun Heo 12142441d15cSTejun Heo /* link the first chunk in */ 1215edcb4639STejun Heo if (!dchunk) { 12162441d15cSTejun Heo pcpu_chunk_relocate(schunk, -1); 12172441d15cSTejun Heo pcpu_chunk_addr_insert(schunk); 1218edcb4639STejun Heo } else { 1219edcb4639STejun Heo pcpu_chunk_relocate(dchunk, -1); 1220edcb4639STejun Heo pcpu_chunk_addr_insert(dchunk); 1221edcb4639STejun Heo } 1222fbf59bc9STejun Heo 1223fbf59bc9STejun Heo /* we're done */ 12242441d15cSTejun Heo pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); 1225fbf59bc9STejun Heo return pcpu_unit_size; 1226fbf59bc9STejun Heo } 1227