1fbf59bc9STejun Heo /* 2fbf59bc9STejun Heo * linux/mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 10fbf59bc9STejun Heo * areas. Percpu areas are allocated in chunks in vmalloc area. Each 112f39e637STejun Heo * chunk is consisted of boot-time determined number of units and the 122f39e637STejun Heo * first chunk is used for static percpu variables in the kernel image 132f39e637STejun Heo * (special boot time alloc/init handling necessary as these areas 142f39e637STejun Heo * need to be brought up before allocation services are running). 152f39e637STejun Heo * Unit grows as necessary and all units grow or shrink in unison. 162f39e637STejun Heo * When a chunk is filled up, another chunk is allocated. ie. in 172f39e637STejun Heo * vmalloc area 18fbf59bc9STejun Heo * 19fbf59bc9STejun Heo * c0 c1 c2 20fbf59bc9STejun Heo * ------------------- ------------------- ------------ 21fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 22fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 23fbf59bc9STejun Heo * 24fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 25fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 262f39e637STejun Heo * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 272f39e637STejun Heo * cpus. On NUMA, the mapping can be non-linear and even sparse. 282f39e637STejun Heo * Percpu access can be done by configuring percpu base registers 292f39e637STejun Heo * according to cpu to unit mapping and pcpu_unit_size. 30fbf59bc9STejun Heo * 312f39e637STejun Heo * There are usually many small percpu allocations many of them being 322f39e637STejun Heo * as small as 4 bytes. The allocator organizes chunks into lists 33fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 34fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 35fbf59bc9STejun Heo * guaranteed to be eqaul to or larger than the maximum contiguous 36fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 37fbf59bc9STejun Heo * chunk maps unnecessarily. 38fbf59bc9STejun Heo * 39fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 40fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 41fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 42fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 43fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 44e1b9aa3fSChristoph Lameter * Chunks can be determined from the address using the index field 45e1b9aa3fSChristoph Lameter * in the page struct. The index field contains a pointer to the chunk. 46fbf59bc9STejun Heo * 47fbf59bc9STejun Heo * To use this allocator, arch code should do the followings. 48fbf59bc9STejun Heo * 49e74e3962STejun Heo * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA 50fbf59bc9STejun Heo * 51fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 52e0100983STejun Heo * regular address to percpu pointer and back if they need to be 53e0100983STejun Heo * different from the default 54fbf59bc9STejun Heo * 558d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 568d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 57fbf59bc9STejun Heo */ 58fbf59bc9STejun Heo 59fbf59bc9STejun Heo #include <linux/bitmap.h> 60fbf59bc9STejun Heo #include <linux/bootmem.h> 61fbf59bc9STejun Heo #include <linux/list.h> 62a530b795STejun Heo #include <linux/log2.h> 63fbf59bc9STejun Heo #include <linux/mm.h> 64fbf59bc9STejun Heo #include <linux/module.h> 65fbf59bc9STejun Heo #include <linux/mutex.h> 66fbf59bc9STejun Heo #include <linux/percpu.h> 67fbf59bc9STejun Heo #include <linux/pfn.h> 68fbf59bc9STejun Heo #include <linux/slab.h> 69ccea34b5STejun Heo #include <linux/spinlock.h> 70fbf59bc9STejun Heo #include <linux/vmalloc.h> 71a56dbddfSTejun Heo #include <linux/workqueue.h> 72fbf59bc9STejun Heo 73fbf59bc9STejun Heo #include <asm/cacheflush.h> 74e0100983STejun Heo #include <asm/sections.h> 75fbf59bc9STejun Heo #include <asm/tlbflush.h> 76fbf59bc9STejun Heo 77fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 78fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 79fbf59bc9STejun Heo 80e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 81e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 82e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 83e0100983STejun Heo (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ 84e0100983STejun Heo + (unsigned long)__per_cpu_start) 85e0100983STejun Heo #endif 86e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 87e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 88e0100983STejun Heo (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ 89e0100983STejun Heo - (unsigned long)__per_cpu_start) 90e0100983STejun Heo #endif 91e0100983STejun Heo 92fbf59bc9STejun Heo struct pcpu_chunk { 93fbf59bc9STejun Heo struct list_head list; /* linked to pcpu_slot lists */ 94fbf59bc9STejun Heo int free_size; /* free bytes in the chunk */ 95fbf59bc9STejun Heo int contig_hint; /* max contiguous size hint */ 96fbf59bc9STejun Heo struct vm_struct *vm; /* mapped vmalloc region */ 97fbf59bc9STejun Heo int map_used; /* # of map entries used */ 98fbf59bc9STejun Heo int map_alloc; /* # of map entries allocated */ 99fbf59bc9STejun Heo int *map; /* allocation map */ 1008d408b4bSTejun Heo bool immutable; /* no [de]population allowed */ 101ce3141a2STejun Heo unsigned long populated[]; /* populated bitmap */ 102fbf59bc9STejun Heo }; 103fbf59bc9STejun Heo 10440150d37STejun Heo static int pcpu_unit_pages __read_mostly; 10540150d37STejun Heo static int pcpu_unit_size __read_mostly; 1062f39e637STejun Heo static int pcpu_nr_units __read_mostly; 10740150d37STejun Heo static int pcpu_chunk_size __read_mostly; 10840150d37STejun Heo static int pcpu_nr_slots __read_mostly; 10940150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly; 110fbf59bc9STejun Heo 1112f39e637STejun Heo /* cpus with the lowest and highest unit numbers */ 1122f39e637STejun Heo static unsigned int pcpu_first_unit_cpu __read_mostly; 1132f39e637STejun Heo static unsigned int pcpu_last_unit_cpu __read_mostly; 1142f39e637STejun Heo 115fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 11640150d37STejun Heo void *pcpu_base_addr __read_mostly; 117fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 118fbf59bc9STejun Heo 1192f39e637STejun Heo /* cpu -> unit map */ 1202f39e637STejun Heo const int *pcpu_unit_map __read_mostly; 1212f39e637STejun Heo 122ae9e6bc9STejun Heo /* 123ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 124ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 125ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 126ae9e6bc9STejun Heo */ 127ae9e6bc9STejun Heo static struct pcpu_chunk *pcpu_first_chunk; 128ae9e6bc9STejun Heo 129ae9e6bc9STejun Heo /* 130ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 131ae9e6bc9STejun Heo * chunk and serves it for reserved allocations. The amount of 132ae9e6bc9STejun Heo * reserved offset is in pcpu_reserved_chunk_limit. When reserved 133ae9e6bc9STejun Heo * area doesn't exist, the following variables contain NULL and 0 134ae9e6bc9STejun Heo * respectively. 135ae9e6bc9STejun Heo */ 136edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk; 137edcb4639STejun Heo static int pcpu_reserved_chunk_limit; 138edcb4639STejun Heo 139fbf59bc9STejun Heo /* 140ccea34b5STejun Heo * Synchronization rules. 141fbf59bc9STejun Heo * 142ccea34b5STejun Heo * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 143ce3141a2STejun Heo * protects allocation/reclaim paths, chunks, populated bitmap and 144ce3141a2STejun Heo * vmalloc mapping. The latter is a spinlock and protects the index 145ce3141a2STejun Heo * data structures - chunk slots, chunks and area maps in chunks. 146fbf59bc9STejun Heo * 147ccea34b5STejun Heo * During allocation, pcpu_alloc_mutex is kept locked all the time and 148ccea34b5STejun Heo * pcpu_lock is grabbed and released as necessary. All actual memory 149ccea34b5STejun Heo * allocations are done using GFP_KERNEL with pcpu_lock released. 150ccea34b5STejun Heo * 151ccea34b5STejun Heo * Free path accesses and alters only the index data structures, so it 152ccea34b5STejun Heo * can be safely called from atomic context. When memory needs to be 153ccea34b5STejun Heo * returned to the system, free path schedules reclaim_work which 154ccea34b5STejun Heo * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 155ccea34b5STejun Heo * reclaimed, release both locks and frees the chunks. Note that it's 156ccea34b5STejun Heo * necessary to grab both locks to remove a chunk from circulation as 157ccea34b5STejun Heo * allocation path might be referencing the chunk with only 158ccea34b5STejun Heo * pcpu_alloc_mutex locked. 159fbf59bc9STejun Heo */ 160ccea34b5STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 161ccea34b5STejun Heo static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 162fbf59bc9STejun Heo 16340150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 164fbf59bc9STejun Heo 165a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */ 166a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work); 167a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 168a56dbddfSTejun Heo 169d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 170fbf59bc9STejun Heo { 171cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 172fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 173fbf59bc9STejun Heo } 174fbf59bc9STejun Heo 175d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 176d9b55eebSTejun Heo { 177d9b55eebSTejun Heo if (size == pcpu_unit_size) 178d9b55eebSTejun Heo return pcpu_nr_slots - 1; 179d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 180d9b55eebSTejun Heo } 181d9b55eebSTejun Heo 182fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 183fbf59bc9STejun Heo { 184fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 185fbf59bc9STejun Heo return 0; 186fbf59bc9STejun Heo 187fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 188fbf59bc9STejun Heo } 189fbf59bc9STejun Heo 190fbf59bc9STejun Heo static int pcpu_page_idx(unsigned int cpu, int page_idx) 191fbf59bc9STejun Heo { 1922f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 193fbf59bc9STejun Heo } 194fbf59bc9STejun Heo 195fbf59bc9STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 196fbf59bc9STejun Heo unsigned int cpu, int page_idx) 197fbf59bc9STejun Heo { 198fbf59bc9STejun Heo return (unsigned long)chunk->vm->addr + 199fbf59bc9STejun Heo (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); 200fbf59bc9STejun Heo } 201fbf59bc9STejun Heo 202ce3141a2STejun Heo static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 203c8a51be4STejun Heo unsigned int cpu, int page_idx) 204c8a51be4STejun Heo { 205ce3141a2STejun Heo /* must not be used on pre-mapped chunk */ 206ce3141a2STejun Heo WARN_ON(chunk->immutable); 207c8a51be4STejun Heo 208ce3141a2STejun Heo return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); 209fbf59bc9STejun Heo } 210fbf59bc9STejun Heo 211e1b9aa3fSChristoph Lameter /* set the pointer to a chunk in a page struct */ 212e1b9aa3fSChristoph Lameter static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 213e1b9aa3fSChristoph Lameter { 214e1b9aa3fSChristoph Lameter page->index = (unsigned long)pcpu; 215e1b9aa3fSChristoph Lameter } 216e1b9aa3fSChristoph Lameter 217e1b9aa3fSChristoph Lameter /* obtain pointer to a chunk from a page struct */ 218e1b9aa3fSChristoph Lameter static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 219e1b9aa3fSChristoph Lameter { 220e1b9aa3fSChristoph Lameter return (struct pcpu_chunk *)page->index; 221e1b9aa3fSChristoph Lameter } 222e1b9aa3fSChristoph Lameter 223ce3141a2STejun Heo static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 224ce3141a2STejun Heo { 225ce3141a2STejun Heo *rs = find_next_zero_bit(chunk->populated, end, *rs); 226ce3141a2STejun Heo *re = find_next_bit(chunk->populated, end, *rs + 1); 227ce3141a2STejun Heo } 228ce3141a2STejun Heo 229ce3141a2STejun Heo static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 230ce3141a2STejun Heo { 231ce3141a2STejun Heo *rs = find_next_bit(chunk->populated, end, *rs); 232ce3141a2STejun Heo *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 233ce3141a2STejun Heo } 234ce3141a2STejun Heo 235ce3141a2STejun Heo /* 236ce3141a2STejun Heo * (Un)populated page region iterators. Iterate over (un)populated 237ce3141a2STejun Heo * page regions betwen @start and @end in @chunk. @rs and @re should 238ce3141a2STejun Heo * be integer variables and will be set to start and end page index of 239ce3141a2STejun Heo * the current region. 240ce3141a2STejun Heo */ 241ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 242ce3141a2STejun Heo for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 243ce3141a2STejun Heo (rs) < (re); \ 244ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 245ce3141a2STejun Heo 246ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 247ce3141a2STejun Heo for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 248ce3141a2STejun Heo (rs) < (re); \ 249ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 250ce3141a2STejun Heo 251fbf59bc9STejun Heo /** 2521880d93bSTejun Heo * pcpu_mem_alloc - allocate memory 2531880d93bSTejun Heo * @size: bytes to allocate 254fbf59bc9STejun Heo * 2551880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 2561880d93bSTejun Heo * kzalloc() is used; otherwise, vmalloc() is used. The returned 2571880d93bSTejun Heo * memory is always zeroed. 258fbf59bc9STejun Heo * 259ccea34b5STejun Heo * CONTEXT: 260ccea34b5STejun Heo * Does GFP_KERNEL allocation. 261ccea34b5STejun Heo * 262fbf59bc9STejun Heo * RETURNS: 2631880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 264fbf59bc9STejun Heo */ 2651880d93bSTejun Heo static void *pcpu_mem_alloc(size_t size) 266fbf59bc9STejun Heo { 267fbf59bc9STejun Heo if (size <= PAGE_SIZE) 2681880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 2691880d93bSTejun Heo else { 2701880d93bSTejun Heo void *ptr = vmalloc(size); 2711880d93bSTejun Heo if (ptr) 2721880d93bSTejun Heo memset(ptr, 0, size); 2731880d93bSTejun Heo return ptr; 2741880d93bSTejun Heo } 2751880d93bSTejun Heo } 276fbf59bc9STejun Heo 2771880d93bSTejun Heo /** 2781880d93bSTejun Heo * pcpu_mem_free - free memory 2791880d93bSTejun Heo * @ptr: memory to free 2801880d93bSTejun Heo * @size: size of the area 2811880d93bSTejun Heo * 2821880d93bSTejun Heo * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 2831880d93bSTejun Heo */ 2841880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size) 2851880d93bSTejun Heo { 2861880d93bSTejun Heo if (size <= PAGE_SIZE) 2871880d93bSTejun Heo kfree(ptr); 2881880d93bSTejun Heo else 2891880d93bSTejun Heo vfree(ptr); 290fbf59bc9STejun Heo } 291fbf59bc9STejun Heo 292fbf59bc9STejun Heo /** 293fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 294fbf59bc9STejun Heo * @chunk: chunk of interest 295fbf59bc9STejun Heo * @oslot: the previous slot it was on 296fbf59bc9STejun Heo * 297fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 298fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 299edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 300edcb4639STejun Heo * chunk slots. 301ccea34b5STejun Heo * 302ccea34b5STejun Heo * CONTEXT: 303ccea34b5STejun Heo * pcpu_lock. 304fbf59bc9STejun Heo */ 305fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 306fbf59bc9STejun Heo { 307fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 308fbf59bc9STejun Heo 309edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 310fbf59bc9STejun Heo if (oslot < nslot) 311fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 312fbf59bc9STejun Heo else 313fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 314fbf59bc9STejun Heo } 315fbf59bc9STejun Heo } 316fbf59bc9STejun Heo 317fbf59bc9STejun Heo /** 318e1b9aa3fSChristoph Lameter * pcpu_chunk_addr_search - determine chunk containing specified address 319e1b9aa3fSChristoph Lameter * @addr: address for which the chunk needs to be determined. 320ccea34b5STejun Heo * 321fbf59bc9STejun Heo * RETURNS: 322fbf59bc9STejun Heo * The address of the found chunk. 323fbf59bc9STejun Heo */ 324fbf59bc9STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 325fbf59bc9STejun Heo { 326ae9e6bc9STejun Heo void *first_start = pcpu_first_chunk->vm->addr; 327fbf59bc9STejun Heo 328ae9e6bc9STejun Heo /* is it in the first chunk? */ 32979ba6ac8STejun Heo if (addr >= first_start && addr < first_start + pcpu_unit_size) { 330ae9e6bc9STejun Heo /* is it in the reserved area? */ 331ae9e6bc9STejun Heo if (addr < first_start + pcpu_reserved_chunk_limit) 332edcb4639STejun Heo return pcpu_reserved_chunk; 333ae9e6bc9STejun Heo return pcpu_first_chunk; 334edcb4639STejun Heo } 335edcb4639STejun Heo 3362f39e637STejun Heo /* 3372f39e637STejun Heo * The address is relative to unit0 which might be unused and 3382f39e637STejun Heo * thus unmapped. Offset the address to the unit space of the 3392f39e637STejun Heo * current processor before looking it up in the vmalloc 3402f39e637STejun Heo * space. Note that any possible cpu id can be used here, so 3412f39e637STejun Heo * there's no need to worry about preemption or cpu hotplug. 3422f39e637STejun Heo */ 3432f39e637STejun Heo addr += pcpu_unit_map[smp_processor_id()] * pcpu_unit_size; 344e1b9aa3fSChristoph Lameter return pcpu_get_page_chunk(vmalloc_to_page(addr)); 345fbf59bc9STejun Heo } 346fbf59bc9STejun Heo 347fbf59bc9STejun Heo /** 3489f7dcf22STejun Heo * pcpu_extend_area_map - extend area map for allocation 3499f7dcf22STejun Heo * @chunk: target chunk 3509f7dcf22STejun Heo * 3519f7dcf22STejun Heo * Extend area map of @chunk so that it can accomodate an allocation. 3529f7dcf22STejun Heo * A single allocation can split an area into three areas, so this 3539f7dcf22STejun Heo * function makes sure that @chunk->map has at least two extra slots. 3549f7dcf22STejun Heo * 355ccea34b5STejun Heo * CONTEXT: 356ccea34b5STejun Heo * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 357ccea34b5STejun Heo * if area map is extended. 358ccea34b5STejun Heo * 3599f7dcf22STejun Heo * RETURNS: 3609f7dcf22STejun Heo * 0 if noop, 1 if successfully extended, -errno on failure. 3619f7dcf22STejun Heo */ 3629f7dcf22STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk) 3639f7dcf22STejun Heo { 3649f7dcf22STejun Heo int new_alloc; 3659f7dcf22STejun Heo int *new; 3669f7dcf22STejun Heo size_t size; 3679f7dcf22STejun Heo 3689f7dcf22STejun Heo /* has enough? */ 3699f7dcf22STejun Heo if (chunk->map_alloc >= chunk->map_used + 2) 3709f7dcf22STejun Heo return 0; 3719f7dcf22STejun Heo 372ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 373ccea34b5STejun Heo 3749f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 3759f7dcf22STejun Heo while (new_alloc < chunk->map_used + 2) 3769f7dcf22STejun Heo new_alloc *= 2; 3779f7dcf22STejun Heo 3789f7dcf22STejun Heo new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 379ccea34b5STejun Heo if (!new) { 380ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 3819f7dcf22STejun Heo return -ENOMEM; 382ccea34b5STejun Heo } 383ccea34b5STejun Heo 384ccea34b5STejun Heo /* 385ccea34b5STejun Heo * Acquire pcpu_lock and switch to new area map. Only free 386ccea34b5STejun Heo * could have happened inbetween, so map_used couldn't have 387ccea34b5STejun Heo * grown. 388ccea34b5STejun Heo */ 389ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 390ccea34b5STejun Heo BUG_ON(new_alloc < chunk->map_used + 2); 3919f7dcf22STejun Heo 3929f7dcf22STejun Heo size = chunk->map_alloc * sizeof(chunk->map[0]); 3939f7dcf22STejun Heo memcpy(new, chunk->map, size); 3949f7dcf22STejun Heo 3959f7dcf22STejun Heo /* 3969f7dcf22STejun Heo * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 3979f7dcf22STejun Heo * one of the first chunks and still using static map. 3989f7dcf22STejun Heo */ 3999f7dcf22STejun Heo if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 4009f7dcf22STejun Heo pcpu_mem_free(chunk->map, size); 4019f7dcf22STejun Heo 4029f7dcf22STejun Heo chunk->map_alloc = new_alloc; 4039f7dcf22STejun Heo chunk->map = new; 4049f7dcf22STejun Heo return 0; 4059f7dcf22STejun Heo } 4069f7dcf22STejun Heo 4079f7dcf22STejun Heo /** 408fbf59bc9STejun Heo * pcpu_split_block - split a map block 409fbf59bc9STejun Heo * @chunk: chunk of interest 410fbf59bc9STejun Heo * @i: index of map block to split 411cae3aeb8STejun Heo * @head: head size in bytes (can be 0) 412cae3aeb8STejun Heo * @tail: tail size in bytes (can be 0) 413fbf59bc9STejun Heo * 414fbf59bc9STejun Heo * Split the @i'th map block into two or three blocks. If @head is 415fbf59bc9STejun Heo * non-zero, @head bytes block is inserted before block @i moving it 416fbf59bc9STejun Heo * to @i+1 and reducing its size by @head bytes. 417fbf59bc9STejun Heo * 418fbf59bc9STejun Heo * If @tail is non-zero, the target block, which can be @i or @i+1 419fbf59bc9STejun Heo * depending on @head, is reduced by @tail bytes and @tail byte block 420fbf59bc9STejun Heo * is inserted after the target block. 421fbf59bc9STejun Heo * 4229f7dcf22STejun Heo * @chunk->map must have enough free slots to accomodate the split. 423ccea34b5STejun Heo * 424ccea34b5STejun Heo * CONTEXT: 425ccea34b5STejun Heo * pcpu_lock. 426fbf59bc9STejun Heo */ 4279f7dcf22STejun Heo static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 4289f7dcf22STejun Heo int head, int tail) 429fbf59bc9STejun Heo { 430fbf59bc9STejun Heo int nr_extra = !!head + !!tail; 431fbf59bc9STejun Heo 4329f7dcf22STejun Heo BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 433fbf59bc9STejun Heo 4349f7dcf22STejun Heo /* insert new subblocks */ 435fbf59bc9STejun Heo memmove(&chunk->map[i + nr_extra], &chunk->map[i], 436fbf59bc9STejun Heo sizeof(chunk->map[0]) * (chunk->map_used - i)); 437fbf59bc9STejun Heo chunk->map_used += nr_extra; 438fbf59bc9STejun Heo 439fbf59bc9STejun Heo if (head) { 440fbf59bc9STejun Heo chunk->map[i + 1] = chunk->map[i] - head; 441fbf59bc9STejun Heo chunk->map[i++] = head; 442fbf59bc9STejun Heo } 443fbf59bc9STejun Heo if (tail) { 444fbf59bc9STejun Heo chunk->map[i++] -= tail; 445fbf59bc9STejun Heo chunk->map[i] = tail; 446fbf59bc9STejun Heo } 447fbf59bc9STejun Heo } 448fbf59bc9STejun Heo 449fbf59bc9STejun Heo /** 450fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 451fbf59bc9STejun Heo * @chunk: chunk of interest 452cae3aeb8STejun Heo * @size: wanted size in bytes 453fbf59bc9STejun Heo * @align: wanted align 454fbf59bc9STejun Heo * 455fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 456fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 457fbf59bc9STejun Heo * populate or map the area. 458fbf59bc9STejun Heo * 4599f7dcf22STejun Heo * @chunk->map must have at least two free slots. 4609f7dcf22STejun Heo * 461ccea34b5STejun Heo * CONTEXT: 462ccea34b5STejun Heo * pcpu_lock. 463ccea34b5STejun Heo * 464fbf59bc9STejun Heo * RETURNS: 4659f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 4669f7dcf22STejun Heo * found. 467fbf59bc9STejun Heo */ 468fbf59bc9STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 469fbf59bc9STejun Heo { 470fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 471fbf59bc9STejun Heo int max_contig = 0; 472fbf59bc9STejun Heo int i, off; 473fbf59bc9STejun Heo 474fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 475fbf59bc9STejun Heo bool is_last = i + 1 == chunk->map_used; 476fbf59bc9STejun Heo int head, tail; 477fbf59bc9STejun Heo 478fbf59bc9STejun Heo /* extra for alignment requirement */ 479fbf59bc9STejun Heo head = ALIGN(off, align) - off; 480fbf59bc9STejun Heo BUG_ON(i == 0 && head != 0); 481fbf59bc9STejun Heo 482fbf59bc9STejun Heo if (chunk->map[i] < 0) 483fbf59bc9STejun Heo continue; 484fbf59bc9STejun Heo if (chunk->map[i] < head + size) { 485fbf59bc9STejun Heo max_contig = max(chunk->map[i], max_contig); 486fbf59bc9STejun Heo continue; 487fbf59bc9STejun Heo } 488fbf59bc9STejun Heo 489fbf59bc9STejun Heo /* 490fbf59bc9STejun Heo * If head is small or the previous block is free, 491fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 492fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 493fbf59bc9STejun Heo * uncommon for percpu allocations. 494fbf59bc9STejun Heo */ 495fbf59bc9STejun Heo if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 496fbf59bc9STejun Heo if (chunk->map[i - 1] > 0) 497fbf59bc9STejun Heo chunk->map[i - 1] += head; 498fbf59bc9STejun Heo else { 499fbf59bc9STejun Heo chunk->map[i - 1] -= head; 500fbf59bc9STejun Heo chunk->free_size -= head; 501fbf59bc9STejun Heo } 502fbf59bc9STejun Heo chunk->map[i] -= head; 503fbf59bc9STejun Heo off += head; 504fbf59bc9STejun Heo head = 0; 505fbf59bc9STejun Heo } 506fbf59bc9STejun Heo 507fbf59bc9STejun Heo /* if tail is small, just keep it around */ 508fbf59bc9STejun Heo tail = chunk->map[i] - head - size; 509fbf59bc9STejun Heo if (tail < sizeof(int)) 510fbf59bc9STejun Heo tail = 0; 511fbf59bc9STejun Heo 512fbf59bc9STejun Heo /* split if warranted */ 513fbf59bc9STejun Heo if (head || tail) { 5149f7dcf22STejun Heo pcpu_split_block(chunk, i, head, tail); 515fbf59bc9STejun Heo if (head) { 516fbf59bc9STejun Heo i++; 517fbf59bc9STejun Heo off += head; 518fbf59bc9STejun Heo max_contig = max(chunk->map[i - 1], max_contig); 519fbf59bc9STejun Heo } 520fbf59bc9STejun Heo if (tail) 521fbf59bc9STejun Heo max_contig = max(chunk->map[i + 1], max_contig); 522fbf59bc9STejun Heo } 523fbf59bc9STejun Heo 524fbf59bc9STejun Heo /* update hint and mark allocated */ 525fbf59bc9STejun Heo if (is_last) 526fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 527fbf59bc9STejun Heo else 528fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 529fbf59bc9STejun Heo max_contig); 530fbf59bc9STejun Heo 531fbf59bc9STejun Heo chunk->free_size -= chunk->map[i]; 532fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 533fbf59bc9STejun Heo 534fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 535fbf59bc9STejun Heo return off; 536fbf59bc9STejun Heo } 537fbf59bc9STejun Heo 538fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 539fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 540fbf59bc9STejun Heo 5419f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 5429f7dcf22STejun Heo return -1; 543fbf59bc9STejun Heo } 544fbf59bc9STejun Heo 545fbf59bc9STejun Heo /** 546fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 547fbf59bc9STejun Heo * @chunk: chunk of interest 548fbf59bc9STejun Heo * @freeme: offset of area to free 549fbf59bc9STejun Heo * 550fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 551fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 552fbf59bc9STejun Heo * the area. 553ccea34b5STejun Heo * 554ccea34b5STejun Heo * CONTEXT: 555ccea34b5STejun Heo * pcpu_lock. 556fbf59bc9STejun Heo */ 557fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 558fbf59bc9STejun Heo { 559fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 560fbf59bc9STejun Heo int i, off; 561fbf59bc9STejun Heo 562fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 563fbf59bc9STejun Heo if (off == freeme) 564fbf59bc9STejun Heo break; 565fbf59bc9STejun Heo BUG_ON(off != freeme); 566fbf59bc9STejun Heo BUG_ON(chunk->map[i] > 0); 567fbf59bc9STejun Heo 568fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 569fbf59bc9STejun Heo chunk->free_size += chunk->map[i]; 570fbf59bc9STejun Heo 571fbf59bc9STejun Heo /* merge with previous? */ 572fbf59bc9STejun Heo if (i > 0 && chunk->map[i - 1] >= 0) { 573fbf59bc9STejun Heo chunk->map[i - 1] += chunk->map[i]; 574fbf59bc9STejun Heo chunk->map_used--; 575fbf59bc9STejun Heo memmove(&chunk->map[i], &chunk->map[i + 1], 576fbf59bc9STejun Heo (chunk->map_used - i) * sizeof(chunk->map[0])); 577fbf59bc9STejun Heo i--; 578fbf59bc9STejun Heo } 579fbf59bc9STejun Heo /* merge with next? */ 580fbf59bc9STejun Heo if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 581fbf59bc9STejun Heo chunk->map[i] += chunk->map[i + 1]; 582fbf59bc9STejun Heo chunk->map_used--; 583fbf59bc9STejun Heo memmove(&chunk->map[i + 1], &chunk->map[i + 2], 584fbf59bc9STejun Heo (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 585fbf59bc9STejun Heo } 586fbf59bc9STejun Heo 587fbf59bc9STejun Heo chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 588fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 589fbf59bc9STejun Heo } 590fbf59bc9STejun Heo 591fbf59bc9STejun Heo /** 592ce3141a2STejun Heo * pcpu_get_pages_and_bitmap - get temp pages array and bitmap 593fbf59bc9STejun Heo * @chunk: chunk of interest 594ce3141a2STejun Heo * @bitmapp: output parameter for bitmap 595ce3141a2STejun Heo * @may_alloc: may allocate the array 596fbf59bc9STejun Heo * 597ce3141a2STejun Heo * Returns pointer to array of pointers to struct page and bitmap, 598ce3141a2STejun Heo * both of which can be indexed with pcpu_page_idx(). The returned 599ce3141a2STejun Heo * array is cleared to zero and *@bitmapp is copied from 600ce3141a2STejun Heo * @chunk->populated. Note that there is only one array and bitmap 601ce3141a2STejun Heo * and access exclusion is the caller's responsibility. 602ce3141a2STejun Heo * 603ce3141a2STejun Heo * CONTEXT: 604ce3141a2STejun Heo * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. 605ce3141a2STejun Heo * Otherwise, don't care. 606ce3141a2STejun Heo * 607ce3141a2STejun Heo * RETURNS: 608ce3141a2STejun Heo * Pointer to temp pages array on success, NULL on failure. 609fbf59bc9STejun Heo */ 610ce3141a2STejun Heo static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, 611ce3141a2STejun Heo unsigned long **bitmapp, 612ce3141a2STejun Heo bool may_alloc) 613ce3141a2STejun Heo { 614ce3141a2STejun Heo static struct page **pages; 615ce3141a2STejun Heo static unsigned long *bitmap; 6162f39e637STejun Heo size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 617ce3141a2STejun Heo size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * 618ce3141a2STejun Heo sizeof(unsigned long); 619ce3141a2STejun Heo 620ce3141a2STejun Heo if (!pages || !bitmap) { 621ce3141a2STejun Heo if (may_alloc && !pages) 622ce3141a2STejun Heo pages = pcpu_mem_alloc(pages_size); 623ce3141a2STejun Heo if (may_alloc && !bitmap) 624ce3141a2STejun Heo bitmap = pcpu_mem_alloc(bitmap_size); 625ce3141a2STejun Heo if (!pages || !bitmap) 626ce3141a2STejun Heo return NULL; 627ce3141a2STejun Heo } 628ce3141a2STejun Heo 629ce3141a2STejun Heo memset(pages, 0, pages_size); 630ce3141a2STejun Heo bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); 631ce3141a2STejun Heo 632ce3141a2STejun Heo *bitmapp = bitmap; 633ce3141a2STejun Heo return pages; 634ce3141a2STejun Heo } 635ce3141a2STejun Heo 636ce3141a2STejun Heo /** 637ce3141a2STejun Heo * pcpu_free_pages - free pages which were allocated for @chunk 638ce3141a2STejun Heo * @chunk: chunk pages were allocated for 639ce3141a2STejun Heo * @pages: array of pages to be freed, indexed by pcpu_page_idx() 640ce3141a2STejun Heo * @populated: populated bitmap 641ce3141a2STejun Heo * @page_start: page index of the first page to be freed 642ce3141a2STejun Heo * @page_end: page index of the last page to be freed + 1 643ce3141a2STejun Heo * 644ce3141a2STejun Heo * Free pages [@page_start and @page_end) in @pages for all units. 645ce3141a2STejun Heo * The pages were allocated for @chunk. 646ce3141a2STejun Heo */ 647ce3141a2STejun Heo static void pcpu_free_pages(struct pcpu_chunk *chunk, 648ce3141a2STejun Heo struct page **pages, unsigned long *populated, 649ce3141a2STejun Heo int page_start, int page_end) 650ce3141a2STejun Heo { 651ce3141a2STejun Heo unsigned int cpu; 652ce3141a2STejun Heo int i; 653ce3141a2STejun Heo 654ce3141a2STejun Heo for_each_possible_cpu(cpu) { 655ce3141a2STejun Heo for (i = page_start; i < page_end; i++) { 656ce3141a2STejun Heo struct page *page = pages[pcpu_page_idx(cpu, i)]; 657ce3141a2STejun Heo 658ce3141a2STejun Heo if (page) 659ce3141a2STejun Heo __free_page(page); 660ce3141a2STejun Heo } 661ce3141a2STejun Heo } 662ce3141a2STejun Heo } 663ce3141a2STejun Heo 664ce3141a2STejun Heo /** 665ce3141a2STejun Heo * pcpu_alloc_pages - allocates pages for @chunk 666ce3141a2STejun Heo * @chunk: target chunk 667ce3141a2STejun Heo * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 668ce3141a2STejun Heo * @populated: populated bitmap 669ce3141a2STejun Heo * @page_start: page index of the first page to be allocated 670ce3141a2STejun Heo * @page_end: page index of the last page to be allocated + 1 671ce3141a2STejun Heo * 672ce3141a2STejun Heo * Allocate pages [@page_start,@page_end) into @pages for all units. 673ce3141a2STejun Heo * The allocation is for @chunk. Percpu core doesn't care about the 674ce3141a2STejun Heo * content of @pages and will pass it verbatim to pcpu_map_pages(). 675ce3141a2STejun Heo */ 676ce3141a2STejun Heo static int pcpu_alloc_pages(struct pcpu_chunk *chunk, 677ce3141a2STejun Heo struct page **pages, unsigned long *populated, 678ce3141a2STejun Heo int page_start, int page_end) 679ce3141a2STejun Heo { 680ce3141a2STejun Heo const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 681ce3141a2STejun Heo unsigned int cpu; 682ce3141a2STejun Heo int i; 683ce3141a2STejun Heo 684ce3141a2STejun Heo for_each_possible_cpu(cpu) { 685ce3141a2STejun Heo for (i = page_start; i < page_end; i++) { 686ce3141a2STejun Heo struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 687ce3141a2STejun Heo 688ce3141a2STejun Heo *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); 689ce3141a2STejun Heo if (!*pagep) { 690ce3141a2STejun Heo pcpu_free_pages(chunk, pages, populated, 691ce3141a2STejun Heo page_start, page_end); 692ce3141a2STejun Heo return -ENOMEM; 693ce3141a2STejun Heo } 694ce3141a2STejun Heo } 695ce3141a2STejun Heo } 696ce3141a2STejun Heo return 0; 697ce3141a2STejun Heo } 698ce3141a2STejun Heo 699ce3141a2STejun Heo /** 700ce3141a2STejun Heo * pcpu_pre_unmap_flush - flush cache prior to unmapping 701ce3141a2STejun Heo * @chunk: chunk the regions to be flushed belongs to 702ce3141a2STejun Heo * @page_start: page index of the first page to be flushed 703ce3141a2STejun Heo * @page_end: page index of the last page to be flushed + 1 704ce3141a2STejun Heo * 705ce3141a2STejun Heo * Pages in [@page_start,@page_end) of @chunk are about to be 706ce3141a2STejun Heo * unmapped. Flush cache. As each flushing trial can be very 707ce3141a2STejun Heo * expensive, issue flush on the whole region at once rather than 708ce3141a2STejun Heo * doing it for each cpu. This could be an overkill but is more 709ce3141a2STejun Heo * scalable. 710ce3141a2STejun Heo */ 711ce3141a2STejun Heo static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, 712ce3141a2STejun Heo int page_start, int page_end) 713fbf59bc9STejun Heo { 7142f39e637STejun Heo flush_cache_vunmap( 7152f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 7162f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 717ce3141a2STejun Heo } 718fbf59bc9STejun Heo 719ce3141a2STejun Heo static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 720ce3141a2STejun Heo { 721ce3141a2STejun Heo unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); 722ce3141a2STejun Heo } 723fbf59bc9STejun Heo 724ce3141a2STejun Heo /** 725ce3141a2STejun Heo * pcpu_unmap_pages - unmap pages out of a pcpu_chunk 726ce3141a2STejun Heo * @chunk: chunk of interest 727ce3141a2STejun Heo * @pages: pages array which can be used to pass information to free 728ce3141a2STejun Heo * @populated: populated bitmap 729ce3141a2STejun Heo * @page_start: page index of the first page to unmap 730ce3141a2STejun Heo * @page_end: page index of the last page to unmap + 1 731ce3141a2STejun Heo * 732ce3141a2STejun Heo * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 733ce3141a2STejun Heo * Corresponding elements in @pages were cleared by the caller and can 734ce3141a2STejun Heo * be used to carry information to pcpu_free_pages() which will be 735ce3141a2STejun Heo * called after all unmaps are finished. The caller should call 736ce3141a2STejun Heo * proper pre/post flush functions. 737ce3141a2STejun Heo */ 738ce3141a2STejun Heo static void pcpu_unmap_pages(struct pcpu_chunk *chunk, 739ce3141a2STejun Heo struct page **pages, unsigned long *populated, 740ce3141a2STejun Heo int page_start, int page_end) 741ce3141a2STejun Heo { 742ce3141a2STejun Heo unsigned int cpu; 743ce3141a2STejun Heo int i; 744ce3141a2STejun Heo 745ce3141a2STejun Heo for_each_possible_cpu(cpu) { 746ce3141a2STejun Heo for (i = page_start; i < page_end; i++) { 747ce3141a2STejun Heo struct page *page; 748ce3141a2STejun Heo 749ce3141a2STejun Heo page = pcpu_chunk_page(chunk, cpu, i); 750ce3141a2STejun Heo WARN_ON(!page); 751ce3141a2STejun Heo pages[pcpu_page_idx(cpu, i)] = page; 752ce3141a2STejun Heo } 753ce3141a2STejun Heo __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), 754ce3141a2STejun Heo page_end - page_start); 755ce3141a2STejun Heo } 756ce3141a2STejun Heo 757ce3141a2STejun Heo for (i = page_start; i < page_end; i++) 758ce3141a2STejun Heo __clear_bit(i, populated); 759ce3141a2STejun Heo } 760ce3141a2STejun Heo 761ce3141a2STejun Heo /** 762ce3141a2STejun Heo * pcpu_post_unmap_tlb_flush - flush TLB after unmapping 763ce3141a2STejun Heo * @chunk: pcpu_chunk the regions to be flushed belong to 764ce3141a2STejun Heo * @page_start: page index of the first page to be flushed 765ce3141a2STejun Heo * @page_end: page index of the last page to be flushed + 1 766ce3141a2STejun Heo * 767ce3141a2STejun Heo * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush 768ce3141a2STejun Heo * TLB for the regions. This can be skipped if the area is to be 769ce3141a2STejun Heo * returned to vmalloc as vmalloc will handle TLB flushing lazily. 770ce3141a2STejun Heo * 771ce3141a2STejun Heo * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 772ce3141a2STejun Heo * for the whole region. 773ce3141a2STejun Heo */ 774ce3141a2STejun Heo static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 775ce3141a2STejun Heo int page_start, int page_end) 776ce3141a2STejun Heo { 7772f39e637STejun Heo flush_tlb_kernel_range( 7782f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 7792f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 780fbf59bc9STejun Heo } 781fbf59bc9STejun Heo 782c8a51be4STejun Heo static int __pcpu_map_pages(unsigned long addr, struct page **pages, 783c8a51be4STejun Heo int nr_pages) 784c8a51be4STejun Heo { 785c8a51be4STejun Heo return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, 786c8a51be4STejun Heo PAGE_KERNEL, pages); 787c8a51be4STejun Heo } 788c8a51be4STejun Heo 789c8a51be4STejun Heo /** 790ce3141a2STejun Heo * pcpu_map_pages - map pages into a pcpu_chunk 791c8a51be4STejun Heo * @chunk: chunk of interest 792ce3141a2STejun Heo * @pages: pages array containing pages to be mapped 793ce3141a2STejun Heo * @populated: populated bitmap 794c8a51be4STejun Heo * @page_start: page index of the first page to map 795c8a51be4STejun Heo * @page_end: page index of the last page to map + 1 796c8a51be4STejun Heo * 797ce3141a2STejun Heo * For each cpu, map pages [@page_start,@page_end) into @chunk. The 798ce3141a2STejun Heo * caller is responsible for calling pcpu_post_map_flush() after all 799ce3141a2STejun Heo * mappings are complete. 800ce3141a2STejun Heo * 801ce3141a2STejun Heo * This function is responsible for setting corresponding bits in 802ce3141a2STejun Heo * @chunk->populated bitmap and whatever is necessary for reverse 803ce3141a2STejun Heo * lookup (addr -> chunk). 804c8a51be4STejun Heo */ 805ce3141a2STejun Heo static int pcpu_map_pages(struct pcpu_chunk *chunk, 806ce3141a2STejun Heo struct page **pages, unsigned long *populated, 807ce3141a2STejun Heo int page_start, int page_end) 808c8a51be4STejun Heo { 809ce3141a2STejun Heo unsigned int cpu, tcpu; 810ce3141a2STejun Heo int i, err; 811c8a51be4STejun Heo 812c8a51be4STejun Heo for_each_possible_cpu(cpu) { 813c8a51be4STejun Heo err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), 814ce3141a2STejun Heo &pages[pcpu_page_idx(cpu, page_start)], 815c8a51be4STejun Heo page_end - page_start); 816c8a51be4STejun Heo if (err < 0) 817ce3141a2STejun Heo goto err; 818ce3141a2STejun Heo } 819ce3141a2STejun Heo 820ce3141a2STejun Heo /* mapping successful, link chunk and mark populated */ 821ce3141a2STejun Heo for (i = page_start; i < page_end; i++) { 822ce3141a2STejun Heo for_each_possible_cpu(cpu) 823ce3141a2STejun Heo pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], 824ce3141a2STejun Heo chunk); 825ce3141a2STejun Heo __set_bit(i, populated); 826ce3141a2STejun Heo } 827ce3141a2STejun Heo 828ce3141a2STejun Heo return 0; 829ce3141a2STejun Heo 830ce3141a2STejun Heo err: 831ce3141a2STejun Heo for_each_possible_cpu(tcpu) { 832ce3141a2STejun Heo if (tcpu == cpu) 833ce3141a2STejun Heo break; 834ce3141a2STejun Heo __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), 835ce3141a2STejun Heo page_end - page_start); 836ce3141a2STejun Heo } 837c8a51be4STejun Heo return err; 838c8a51be4STejun Heo } 839c8a51be4STejun Heo 840ce3141a2STejun Heo /** 841ce3141a2STejun Heo * pcpu_post_map_flush - flush cache after mapping 842ce3141a2STejun Heo * @chunk: pcpu_chunk the regions to be flushed belong to 843ce3141a2STejun Heo * @page_start: page index of the first page to be flushed 844ce3141a2STejun Heo * @page_end: page index of the last page to be flushed + 1 845ce3141a2STejun Heo * 846ce3141a2STejun Heo * Pages [@page_start,@page_end) of @chunk have been mapped. Flush 847ce3141a2STejun Heo * cache. 848ce3141a2STejun Heo * 849ce3141a2STejun Heo * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 850ce3141a2STejun Heo * for the whole region. 851ce3141a2STejun Heo */ 852ce3141a2STejun Heo static void pcpu_post_map_flush(struct pcpu_chunk *chunk, 853ce3141a2STejun Heo int page_start, int page_end) 854ce3141a2STejun Heo { 8552f39e637STejun Heo flush_cache_vmap( 8562f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 8572f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 858c8a51be4STejun Heo } 859c8a51be4STejun Heo 860fbf59bc9STejun Heo /** 861fbf59bc9STejun Heo * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 862fbf59bc9STejun Heo * @chunk: chunk to depopulate 863fbf59bc9STejun Heo * @off: offset to the area to depopulate 864cae3aeb8STejun Heo * @size: size of the area to depopulate in bytes 865fbf59bc9STejun Heo * @flush: whether to flush cache and tlb or not 866fbf59bc9STejun Heo * 867fbf59bc9STejun Heo * For each cpu, depopulate and unmap pages [@page_start,@page_end) 868fbf59bc9STejun Heo * from @chunk. If @flush is true, vcache is flushed before unmapping 869fbf59bc9STejun Heo * and tlb after. 870ccea34b5STejun Heo * 871ccea34b5STejun Heo * CONTEXT: 872ccea34b5STejun Heo * pcpu_alloc_mutex. 873fbf59bc9STejun Heo */ 874ce3141a2STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 875fbf59bc9STejun Heo { 876fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 877fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 878ce3141a2STejun Heo struct page **pages; 879ce3141a2STejun Heo unsigned long *populated; 880ce3141a2STejun Heo int rs, re; 881fbf59bc9STejun Heo 882ce3141a2STejun Heo /* quick path, check whether it's empty already */ 883ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 884ce3141a2STejun Heo if (rs == page_start && re == page_end) 885ce3141a2STejun Heo return; 886ce3141a2STejun Heo break; 887ce3141a2STejun Heo } 888fbf59bc9STejun Heo 889ce3141a2STejun Heo /* immutable chunks can't be depopulated */ 890ce3141a2STejun Heo WARN_ON(chunk->immutable); 891fbf59bc9STejun Heo 892fbf59bc9STejun Heo /* 893ce3141a2STejun Heo * If control reaches here, there must have been at least one 894ce3141a2STejun Heo * successful population attempt so the temp pages array must 895ce3141a2STejun Heo * be available now. 896fbf59bc9STejun Heo */ 897ce3141a2STejun Heo pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); 898ce3141a2STejun Heo BUG_ON(!pages); 899fbf59bc9STejun Heo 900ce3141a2STejun Heo /* unmap and free */ 901ce3141a2STejun Heo pcpu_pre_unmap_flush(chunk, page_start, page_end); 902fbf59bc9STejun Heo 903ce3141a2STejun Heo pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 904ce3141a2STejun Heo pcpu_unmap_pages(chunk, pages, populated, rs, re); 905ce3141a2STejun Heo 906ce3141a2STejun Heo /* no need to flush tlb, vmalloc will handle it lazily */ 907ce3141a2STejun Heo 908ce3141a2STejun Heo pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 909ce3141a2STejun Heo pcpu_free_pages(chunk, pages, populated, rs, re); 910ce3141a2STejun Heo 911ce3141a2STejun Heo /* commit new bitmap */ 912ce3141a2STejun Heo bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 913fbf59bc9STejun Heo } 914fbf59bc9STejun Heo 915fbf59bc9STejun Heo /** 916fbf59bc9STejun Heo * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 917fbf59bc9STejun Heo * @chunk: chunk of interest 918fbf59bc9STejun Heo * @off: offset to the area to populate 919cae3aeb8STejun Heo * @size: size of the area to populate in bytes 920fbf59bc9STejun Heo * 921fbf59bc9STejun Heo * For each cpu, populate and map pages [@page_start,@page_end) into 922fbf59bc9STejun Heo * @chunk. The area is cleared on return. 923ccea34b5STejun Heo * 924ccea34b5STejun Heo * CONTEXT: 925ccea34b5STejun Heo * pcpu_alloc_mutex, does GFP_KERNEL allocation. 926fbf59bc9STejun Heo */ 927fbf59bc9STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 928fbf59bc9STejun Heo { 929fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 930fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 931ce3141a2STejun Heo int free_end = page_start, unmap_end = page_start; 932ce3141a2STejun Heo struct page **pages; 933ce3141a2STejun Heo unsigned long *populated; 934fbf59bc9STejun Heo unsigned int cpu; 935ce3141a2STejun Heo int rs, re, rc; 936fbf59bc9STejun Heo 937ce3141a2STejun Heo /* quick path, check whether all pages are already there */ 938ce3141a2STejun Heo pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { 939ce3141a2STejun Heo if (rs == page_start && re == page_end) 940ce3141a2STejun Heo goto clear; 941ce3141a2STejun Heo break; 942fbf59bc9STejun Heo } 943fbf59bc9STejun Heo 944ce3141a2STejun Heo /* need to allocate and map pages, this chunk can't be immutable */ 945ce3141a2STejun Heo WARN_ON(chunk->immutable); 946fbf59bc9STejun Heo 947ce3141a2STejun Heo pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); 948ce3141a2STejun Heo if (!pages) 949ce3141a2STejun Heo return -ENOMEM; 950fbf59bc9STejun Heo 951ce3141a2STejun Heo /* alloc and map */ 952ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 953ce3141a2STejun Heo rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); 954ce3141a2STejun Heo if (rc) 955ce3141a2STejun Heo goto err_free; 956ce3141a2STejun Heo free_end = re; 957fbf59bc9STejun Heo } 958fbf59bc9STejun Heo 959ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 960ce3141a2STejun Heo rc = pcpu_map_pages(chunk, pages, populated, rs, re); 961ce3141a2STejun Heo if (rc) 962ce3141a2STejun Heo goto err_unmap; 963ce3141a2STejun Heo unmap_end = re; 964ce3141a2STejun Heo } 965ce3141a2STejun Heo pcpu_post_map_flush(chunk, page_start, page_end); 966fbf59bc9STejun Heo 967ce3141a2STejun Heo /* commit new bitmap */ 968ce3141a2STejun Heo bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 969ce3141a2STejun Heo clear: 970fbf59bc9STejun Heo for_each_possible_cpu(cpu) 9712f39e637STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 972fbf59bc9STejun Heo return 0; 973ce3141a2STejun Heo 974ce3141a2STejun Heo err_unmap: 975ce3141a2STejun Heo pcpu_pre_unmap_flush(chunk, page_start, unmap_end); 976ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) 977ce3141a2STejun Heo pcpu_unmap_pages(chunk, pages, populated, rs, re); 978ce3141a2STejun Heo pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); 979ce3141a2STejun Heo err_free: 980ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) 981ce3141a2STejun Heo pcpu_free_pages(chunk, pages, populated, rs, re); 982ce3141a2STejun Heo return rc; 983fbf59bc9STejun Heo } 984fbf59bc9STejun Heo 985fbf59bc9STejun Heo static void free_pcpu_chunk(struct pcpu_chunk *chunk) 986fbf59bc9STejun Heo { 987fbf59bc9STejun Heo if (!chunk) 988fbf59bc9STejun Heo return; 989fbf59bc9STejun Heo if (chunk->vm) 990fbf59bc9STejun Heo free_vm_area(chunk->vm); 9911880d93bSTejun Heo pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 992fbf59bc9STejun Heo kfree(chunk); 993fbf59bc9STejun Heo } 994fbf59bc9STejun Heo 995fbf59bc9STejun Heo static struct pcpu_chunk *alloc_pcpu_chunk(void) 996fbf59bc9STejun Heo { 997fbf59bc9STejun Heo struct pcpu_chunk *chunk; 998fbf59bc9STejun Heo 999fbf59bc9STejun Heo chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 1000fbf59bc9STejun Heo if (!chunk) 1001fbf59bc9STejun Heo return NULL; 1002fbf59bc9STejun Heo 10031880d93bSTejun Heo chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 1004fbf59bc9STejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 1005fbf59bc9STejun Heo chunk->map[chunk->map_used++] = pcpu_unit_size; 1006fbf59bc9STejun Heo 1007142d44b0SAmerigo Wang chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC); 1008fbf59bc9STejun Heo if (!chunk->vm) { 1009fbf59bc9STejun Heo free_pcpu_chunk(chunk); 1010fbf59bc9STejun Heo return NULL; 1011fbf59bc9STejun Heo } 1012fbf59bc9STejun Heo 1013fbf59bc9STejun Heo INIT_LIST_HEAD(&chunk->list); 1014fbf59bc9STejun Heo chunk->free_size = pcpu_unit_size; 1015fbf59bc9STejun Heo chunk->contig_hint = pcpu_unit_size; 1016fbf59bc9STejun Heo 1017fbf59bc9STejun Heo return chunk; 1018fbf59bc9STejun Heo } 1019fbf59bc9STejun Heo 1020fbf59bc9STejun Heo /** 1021edcb4639STejun Heo * pcpu_alloc - the percpu allocator 1022cae3aeb8STejun Heo * @size: size of area to allocate in bytes 1023fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 1024edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 1025fbf59bc9STejun Heo * 1026ccea34b5STejun Heo * Allocate percpu area of @size bytes aligned at @align. 1027ccea34b5STejun Heo * 1028ccea34b5STejun Heo * CONTEXT: 1029ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1030fbf59bc9STejun Heo * 1031fbf59bc9STejun Heo * RETURNS: 1032fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1033fbf59bc9STejun Heo */ 1034edcb4639STejun Heo static void *pcpu_alloc(size_t size, size_t align, bool reserved) 1035fbf59bc9STejun Heo { 1036fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1037fbf59bc9STejun Heo int slot, off; 1038fbf59bc9STejun Heo 10398d408b4bSTejun Heo if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1040fbf59bc9STejun Heo WARN(true, "illegal size (%zu) or align (%zu) for " 1041fbf59bc9STejun Heo "percpu allocation\n", size, align); 1042fbf59bc9STejun Heo return NULL; 1043fbf59bc9STejun Heo } 1044fbf59bc9STejun Heo 1045ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1046ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1047fbf59bc9STejun Heo 1048edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 1049edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 1050edcb4639STejun Heo chunk = pcpu_reserved_chunk; 10519f7dcf22STejun Heo if (size > chunk->contig_hint || 10529f7dcf22STejun Heo pcpu_extend_area_map(chunk) < 0) 1053ccea34b5STejun Heo goto fail_unlock; 1054edcb4639STejun Heo off = pcpu_alloc_area(chunk, size, align); 1055edcb4639STejun Heo if (off >= 0) 1056edcb4639STejun Heo goto area_found; 1057ccea34b5STejun Heo goto fail_unlock; 1058edcb4639STejun Heo } 1059edcb4639STejun Heo 1060ccea34b5STejun Heo restart: 1061edcb4639STejun Heo /* search through normal chunks */ 1062fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 1063fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 1064fbf59bc9STejun Heo if (size > chunk->contig_hint) 1065fbf59bc9STejun Heo continue; 1066ccea34b5STejun Heo 1067ccea34b5STejun Heo switch (pcpu_extend_area_map(chunk)) { 1068ccea34b5STejun Heo case 0: 1069ccea34b5STejun Heo break; 1070ccea34b5STejun Heo case 1: 1071ccea34b5STejun Heo goto restart; /* pcpu_lock dropped, restart */ 1072ccea34b5STejun Heo default: 1073ccea34b5STejun Heo goto fail_unlock; 1074ccea34b5STejun Heo } 1075ccea34b5STejun Heo 1076fbf59bc9STejun Heo off = pcpu_alloc_area(chunk, size, align); 1077fbf59bc9STejun Heo if (off >= 0) 1078fbf59bc9STejun Heo goto area_found; 1079fbf59bc9STejun Heo } 1080fbf59bc9STejun Heo } 1081fbf59bc9STejun Heo 1082fbf59bc9STejun Heo /* hmmm... no space left, create a new chunk */ 1083ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1084ccea34b5STejun Heo 1085fbf59bc9STejun Heo chunk = alloc_pcpu_chunk(); 1086fbf59bc9STejun Heo if (!chunk) 1087ccea34b5STejun Heo goto fail_unlock_mutex; 1088ccea34b5STejun Heo 1089ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1090fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1091ccea34b5STejun Heo goto restart; 1092fbf59bc9STejun Heo 1093fbf59bc9STejun Heo area_found: 1094ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1095ccea34b5STejun Heo 1096fbf59bc9STejun Heo /* populate, map and clear the area */ 1097fbf59bc9STejun Heo if (pcpu_populate_chunk(chunk, off, size)) { 1098ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1099fbf59bc9STejun Heo pcpu_free_area(chunk, off); 1100ccea34b5STejun Heo goto fail_unlock; 1101fbf59bc9STejun Heo } 1102fbf59bc9STejun Heo 1103ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1104ccea34b5STejun Heo 11052f39e637STejun Heo /* return address relative to unit0 */ 1106ccea34b5STejun Heo return __addr_to_pcpu_ptr(chunk->vm->addr + off); 1107ccea34b5STejun Heo 1108ccea34b5STejun Heo fail_unlock: 1109ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1110ccea34b5STejun Heo fail_unlock_mutex: 1111ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1112ccea34b5STejun Heo return NULL; 1113fbf59bc9STejun Heo } 1114edcb4639STejun Heo 1115edcb4639STejun Heo /** 1116edcb4639STejun Heo * __alloc_percpu - allocate dynamic percpu area 1117edcb4639STejun Heo * @size: size of area to allocate in bytes 1118edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1119edcb4639STejun Heo * 1120edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align. Might 1121edcb4639STejun Heo * sleep. Might trigger writeouts. 1122edcb4639STejun Heo * 1123ccea34b5STejun Heo * CONTEXT: 1124ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1125ccea34b5STejun Heo * 1126edcb4639STejun Heo * RETURNS: 1127edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1128edcb4639STejun Heo */ 1129edcb4639STejun Heo void *__alloc_percpu(size_t size, size_t align) 1130edcb4639STejun Heo { 1131edcb4639STejun Heo return pcpu_alloc(size, align, false); 1132edcb4639STejun Heo } 1133fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1134fbf59bc9STejun Heo 1135edcb4639STejun Heo /** 1136edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1137edcb4639STejun Heo * @size: size of area to allocate in bytes 1138edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1139edcb4639STejun Heo * 1140edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align from reserved 1141edcb4639STejun Heo * percpu area if arch has set it up; otherwise, allocation is served 1142edcb4639STejun Heo * from the same dynamic area. Might sleep. Might trigger writeouts. 1143edcb4639STejun Heo * 1144ccea34b5STejun Heo * CONTEXT: 1145ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1146ccea34b5STejun Heo * 1147edcb4639STejun Heo * RETURNS: 1148edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1149edcb4639STejun Heo */ 1150edcb4639STejun Heo void *__alloc_reserved_percpu(size_t size, size_t align) 1151edcb4639STejun Heo { 1152edcb4639STejun Heo return pcpu_alloc(size, align, true); 1153edcb4639STejun Heo } 1154edcb4639STejun Heo 1155a56dbddfSTejun Heo /** 1156a56dbddfSTejun Heo * pcpu_reclaim - reclaim fully free chunks, workqueue function 1157a56dbddfSTejun Heo * @work: unused 1158a56dbddfSTejun Heo * 1159a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 1160ccea34b5STejun Heo * 1161ccea34b5STejun Heo * CONTEXT: 1162ccea34b5STejun Heo * workqueue context. 1163a56dbddfSTejun Heo */ 1164a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work) 1165fbf59bc9STejun Heo { 1166a56dbddfSTejun Heo LIST_HEAD(todo); 1167a56dbddfSTejun Heo struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 1168a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 1169a56dbddfSTejun Heo 1170ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1171ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1172a56dbddfSTejun Heo 1173a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, head, list) { 11748d408b4bSTejun Heo WARN_ON(chunk->immutable); 1175a56dbddfSTejun Heo 1176a56dbddfSTejun Heo /* spare the first one */ 1177a56dbddfSTejun Heo if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 1178a56dbddfSTejun Heo continue; 1179a56dbddfSTejun Heo 1180a56dbddfSTejun Heo list_move(&chunk->list, &todo); 1181a56dbddfSTejun Heo } 1182a56dbddfSTejun Heo 1183ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1184a56dbddfSTejun Heo 1185a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, &todo, list) { 1186ce3141a2STejun Heo pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 1187fbf59bc9STejun Heo free_pcpu_chunk(chunk); 1188fbf59bc9STejun Heo } 1189*971f3918STejun Heo 1190*971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1191a56dbddfSTejun Heo } 1192fbf59bc9STejun Heo 1193fbf59bc9STejun Heo /** 1194fbf59bc9STejun Heo * free_percpu - free percpu area 1195fbf59bc9STejun Heo * @ptr: pointer to area to free 1196fbf59bc9STejun Heo * 1197ccea34b5STejun Heo * Free percpu area @ptr. 1198ccea34b5STejun Heo * 1199ccea34b5STejun Heo * CONTEXT: 1200ccea34b5STejun Heo * Can be called from atomic context. 1201fbf59bc9STejun Heo */ 1202fbf59bc9STejun Heo void free_percpu(void *ptr) 1203fbf59bc9STejun Heo { 1204fbf59bc9STejun Heo void *addr = __pcpu_ptr_to_addr(ptr); 1205fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1206ccea34b5STejun Heo unsigned long flags; 1207fbf59bc9STejun Heo int off; 1208fbf59bc9STejun Heo 1209fbf59bc9STejun Heo if (!ptr) 1210fbf59bc9STejun Heo return; 1211fbf59bc9STejun Heo 1212ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1213fbf59bc9STejun Heo 1214fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1215fbf59bc9STejun Heo off = addr - chunk->vm->addr; 1216fbf59bc9STejun Heo 1217fbf59bc9STejun Heo pcpu_free_area(chunk, off); 1218fbf59bc9STejun Heo 1219a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 1220fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 1221fbf59bc9STejun Heo struct pcpu_chunk *pos; 1222fbf59bc9STejun Heo 1223a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1224fbf59bc9STejun Heo if (pos != chunk) { 1225a56dbddfSTejun Heo schedule_work(&pcpu_reclaim_work); 1226fbf59bc9STejun Heo break; 1227fbf59bc9STejun Heo } 1228fbf59bc9STejun Heo } 1229fbf59bc9STejun Heo 1230ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1231fbf59bc9STejun Heo } 1232fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1233fbf59bc9STejun Heo 1234fbf59bc9STejun Heo /** 12358d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 12368d408b4bSTejun Heo * @static_size: the size of static percpu area in bytes 123738a6be52STejun Heo * @reserved_size: the size of reserved percpu area in bytes, 0 for none 1238cafe8816STejun Heo * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 123938a6be52STejun Heo * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE 124038a6be52STejun Heo * @base_addr: mapped address 12412f39e637STejun Heo * @unit_map: cpu -> unit map, NULL for sequential mapping 1242fbf59bc9STejun Heo * 12438d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 12448d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 124538a6be52STejun Heo * setup path. 12468d408b4bSTejun Heo * 1247edcb4639STejun Heo * @reserved_size, if non-zero, specifies the amount of bytes to 1248edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1249edcb4639STejun Heo * the first chunk such that it's available only through reserved 1250edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1251edcb4639STejun Heo * static areas on architectures where the addressing model has 1252edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1253edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1254edcb4639STejun Heo * 12556074d5b0STejun Heo * @dyn_size, if non-negative, determines the number of bytes 12566074d5b0STejun Heo * available for dynamic allocation in the first chunk. Specifying 12576074d5b0STejun Heo * non-negative value makes percpu leave alone the area beyond 12586074d5b0STejun Heo * @static_size + @reserved_size + @dyn_size. 12596074d5b0STejun Heo * 126038a6be52STejun Heo * @unit_size specifies unit size and must be aligned to PAGE_SIZE and 126138a6be52STejun Heo * equal to or larger than @static_size + @reserved_size + if 126238a6be52STejun Heo * non-negative, @dyn_size. 12638d408b4bSTejun Heo * 126438a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 126538a6be52STejun Heo * copied static data to each unit. 1266fbf59bc9STejun Heo * 1267edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1268edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1269edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1270edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1271edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1272edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1273edcb4639STejun Heo * 1274fbf59bc9STejun Heo * RETURNS: 1275fbf59bc9STejun Heo * The determined pcpu_unit_size which can be used to initialize 1276fbf59bc9STejun Heo * percpu access. 1277fbf59bc9STejun Heo */ 1278ce3141a2STejun Heo size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, 127938a6be52STejun Heo ssize_t dyn_size, size_t unit_size, 12802f39e637STejun Heo void *base_addr, const int *unit_map) 1281fbf59bc9STejun Heo { 12822441d15cSTejun Heo static struct vm_struct first_vm; 1283edcb4639STejun Heo static int smap[2], dmap[2]; 12846074d5b0STejun Heo size_t size_sum = static_size + reserved_size + 12856074d5b0STejun Heo (dyn_size >= 0 ? dyn_size : 0); 1286edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 12872f39e637STejun Heo unsigned int cpu, tcpu; 1288ce3141a2STejun Heo int i; 1289fbf59bc9STejun Heo 12902f39e637STejun Heo /* sanity checks */ 1291edcb4639STejun Heo BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1292edcb4639STejun Heo ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 12938d408b4bSTejun Heo BUG_ON(!static_size); 129438a6be52STejun Heo BUG_ON(!base_addr); 12956074d5b0STejun Heo BUG_ON(unit_size < size_sum); 12968d408b4bSTejun Heo BUG_ON(unit_size & ~PAGE_MASK); 12976074d5b0STejun Heo BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE); 1298fbf59bc9STejun Heo 12992f39e637STejun Heo /* determine number of units and verify and initialize pcpu_unit_map */ 13002f39e637STejun Heo if (unit_map) { 13012f39e637STejun Heo int first_unit = INT_MAX, last_unit = INT_MIN; 13022f39e637STejun Heo 13032f39e637STejun Heo for_each_possible_cpu(cpu) { 13042f39e637STejun Heo int unit = unit_map[cpu]; 13052f39e637STejun Heo 13062f39e637STejun Heo BUG_ON(unit < 0); 13072f39e637STejun Heo for_each_possible_cpu(tcpu) { 13082f39e637STejun Heo if (tcpu == cpu) 13092f39e637STejun Heo break; 13102f39e637STejun Heo /* the mapping should be one-to-one */ 13112f39e637STejun Heo BUG_ON(unit_map[tcpu] == unit); 13122f39e637STejun Heo } 13132f39e637STejun Heo 13142f39e637STejun Heo if (unit < first_unit) { 13152f39e637STejun Heo pcpu_first_unit_cpu = cpu; 13162f39e637STejun Heo first_unit = unit; 13172f39e637STejun Heo } 13182f39e637STejun Heo if (unit > last_unit) { 13192f39e637STejun Heo pcpu_last_unit_cpu = cpu; 13202f39e637STejun Heo last_unit = unit; 13212f39e637STejun Heo } 13222f39e637STejun Heo } 13232f39e637STejun Heo pcpu_nr_units = last_unit + 1; 13242f39e637STejun Heo pcpu_unit_map = unit_map; 13252f39e637STejun Heo } else { 13262f39e637STejun Heo int *identity_map; 13272f39e637STejun Heo 13282f39e637STejun Heo /* #units == #cpus, identity mapped */ 1329384be2b1STejun Heo identity_map = alloc_bootmem(nr_cpu_ids * 13302f39e637STejun Heo sizeof(identity_map[0])); 13312f39e637STejun Heo 13322f39e637STejun Heo for_each_possible_cpu(cpu) 13332f39e637STejun Heo identity_map[cpu] = cpu; 13342f39e637STejun Heo 13352f39e637STejun Heo pcpu_first_unit_cpu = 0; 13362f39e637STejun Heo pcpu_last_unit_cpu = pcpu_nr_units - 1; 1337384be2b1STejun Heo pcpu_nr_units = nr_cpu_ids; 13382f39e637STejun Heo pcpu_unit_map = identity_map; 13392f39e637STejun Heo } 13402f39e637STejun Heo 13412f39e637STejun Heo /* determine basic parameters */ 13428d408b4bSTejun Heo pcpu_unit_pages = unit_size >> PAGE_SHIFT; 1343d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 13442f39e637STejun Heo pcpu_chunk_size = pcpu_nr_units * pcpu_unit_size; 1345ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1346ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1347fbf59bc9STejun Heo 1348cafe8816STejun Heo if (dyn_size < 0) 1349edcb4639STejun Heo dyn_size = pcpu_unit_size - static_size - reserved_size; 1350cafe8816STejun Heo 135138a6be52STejun Heo first_vm.flags = VM_ALLOC; 135238a6be52STejun Heo first_vm.size = pcpu_chunk_size; 135338a6be52STejun Heo first_vm.addr = base_addr; 135438a6be52STejun Heo 1355d9b55eebSTejun Heo /* 1356d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1357d9b55eebSTejun Heo * empty chunks. 1358d9b55eebSTejun Heo */ 1359d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1360fbf59bc9STejun Heo pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1361fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1362fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1363fbf59bc9STejun Heo 1364edcb4639STejun Heo /* 1365edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1366edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1367edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1368edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1369edcb4639STejun Heo * static percpu allocation). 1370edcb4639STejun Heo */ 13712441d15cSTejun Heo schunk = alloc_bootmem(pcpu_chunk_struct_size); 13722441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 13732441d15cSTejun Heo schunk->vm = &first_vm; 137461ace7faSTejun Heo schunk->map = smap; 137561ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 137638a6be52STejun Heo schunk->immutable = true; 1377ce3141a2STejun Heo bitmap_fill(schunk->populated, pcpu_unit_pages); 1378edcb4639STejun Heo 1379edcb4639STejun Heo if (reserved_size) { 1380edcb4639STejun Heo schunk->free_size = reserved_size; 1381ae9e6bc9STejun Heo pcpu_reserved_chunk = schunk; 1382ae9e6bc9STejun Heo pcpu_reserved_chunk_limit = static_size + reserved_size; 1383edcb4639STejun Heo } else { 13842441d15cSTejun Heo schunk->free_size = dyn_size; 1385edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1386edcb4639STejun Heo } 13872441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1388fbf59bc9STejun Heo 138961ace7faSTejun Heo schunk->map[schunk->map_used++] = -static_size; 139061ace7faSTejun Heo if (schunk->free_size) 139161ace7faSTejun Heo schunk->map[schunk->map_used++] = schunk->free_size; 139261ace7faSTejun Heo 1393edcb4639STejun Heo /* init dynamic chunk if necessary */ 1394edcb4639STejun Heo if (dyn_size) { 1395ce3141a2STejun Heo dchunk = alloc_bootmem(pcpu_chunk_struct_size); 1396edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 1397edcb4639STejun Heo dchunk->vm = &first_vm; 1398edcb4639STejun Heo dchunk->map = dmap; 1399edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 140038a6be52STejun Heo dchunk->immutable = true; 1401ce3141a2STejun Heo bitmap_fill(dchunk->populated, pcpu_unit_pages); 1402edcb4639STejun Heo 1403edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1404edcb4639STejun Heo dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1405edcb4639STejun Heo dchunk->map[dchunk->map_used++] = dchunk->free_size; 1406edcb4639STejun Heo } 1407edcb4639STejun Heo 14082441d15cSTejun Heo /* link the first chunk in */ 1409ae9e6bc9STejun Heo pcpu_first_chunk = dchunk ?: schunk; 1410ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 1411fbf59bc9STejun Heo 1412fbf59bc9STejun Heo /* we're done */ 14132f39e637STejun Heo pcpu_base_addr = schunk->vm->addr; 1414fbf59bc9STejun Heo return pcpu_unit_size; 1415fbf59bc9STejun Heo } 141666c3a757STejun Heo 14178c4bfc6eSTejun Heo static size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size, 14188c4bfc6eSTejun Heo ssize_t *dyn_sizep) 14198c4bfc6eSTejun Heo { 14208c4bfc6eSTejun Heo size_t size_sum; 14218c4bfc6eSTejun Heo 14228c4bfc6eSTejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 14238c4bfc6eSTejun Heo (*dyn_sizep >= 0 ? *dyn_sizep : 0)); 14248c4bfc6eSTejun Heo if (*dyn_sizep != 0) 14258c4bfc6eSTejun Heo *dyn_sizep = size_sum - static_size - reserved_size; 14268c4bfc6eSTejun Heo 14278c4bfc6eSTejun Heo return size_sum; 14288c4bfc6eSTejun Heo } 14298c4bfc6eSTejun Heo 143066c3a757STejun Heo /** 143166c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 143266c3a757STejun Heo * @static_size: the size of static percpu area in bytes 143366c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 143466c3a757STejun Heo * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 143566c3a757STejun Heo * 143666c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 143766c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 143866c3a757STejun Heo * 143966c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 144066c3a757STejun Heo * as a contiguous area using bootmem allocator and used as-is without 144166c3a757STejun Heo * being mapped into vmalloc area. This enables the first chunk to 144266c3a757STejun Heo * piggy back on the linear physical mapping which often uses larger 144366c3a757STejun Heo * page size. 144466c3a757STejun Heo * 144566c3a757STejun Heo * When @dyn_size is positive, dynamic area might be larger than 1446788e5abcSTejun Heo * specified to fill page alignment. When @dyn_size is auto, 1447788e5abcSTejun Heo * @dyn_size is just big enough to fill page alignment after static 1448788e5abcSTejun Heo * and reserved areas. 144966c3a757STejun Heo * 145066c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 145166c3a757STejun Heo * size, the leftover is returned to the bootmem allocator. 145266c3a757STejun Heo * 145366c3a757STejun Heo * RETURNS: 145466c3a757STejun Heo * The determined pcpu_unit_size which can be used to initialize 145566c3a757STejun Heo * percpu access on success, -errno on failure. 145666c3a757STejun Heo */ 145766c3a757STejun Heo ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, 1458788e5abcSTejun Heo ssize_t dyn_size) 145966c3a757STejun Heo { 1460ce3141a2STejun Heo size_t size_sum, unit_size, chunk_size; 1461ce3141a2STejun Heo void *base; 146266c3a757STejun Heo unsigned int cpu; 146366c3a757STejun Heo 146466c3a757STejun Heo /* determine parameters and allocate */ 1465ce3141a2STejun Heo size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); 146666c3a757STejun Heo 1467ce3141a2STejun Heo unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1468384be2b1STejun Heo chunk_size = unit_size * nr_cpu_ids; 1469fa8a7094STejun Heo 1470ce3141a2STejun Heo base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, 1471fa8a7094STejun Heo __pa(MAX_DMA_ADDRESS)); 1472ce3141a2STejun Heo if (!base) { 1473fa8a7094STejun Heo pr_warning("PERCPU: failed to allocate %zu bytes for " 1474fa8a7094STejun Heo "embedding\n", chunk_size); 147566c3a757STejun Heo return -ENOMEM; 1476fa8a7094STejun Heo } 147766c3a757STejun Heo 147866c3a757STejun Heo /* return the leftover and copy */ 147974d46d6bSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 1480ce3141a2STejun Heo void *ptr = base + cpu * unit_size; 148166c3a757STejun Heo 148274d46d6bSTejun Heo if (cpu_possible(cpu)) { 1483384be2b1STejun Heo free_bootmem(__pa(ptr + size_sum), 1484384be2b1STejun Heo unit_size - size_sum); 148566c3a757STejun Heo memcpy(ptr, __per_cpu_load, static_size); 148674d46d6bSTejun Heo } else 1487384be2b1STejun Heo free_bootmem(__pa(ptr), unit_size); 148866c3a757STejun Heo } 148966c3a757STejun Heo 149066c3a757STejun Heo /* we're ready, commit */ 149166c3a757STejun Heo pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", 1492ce3141a2STejun Heo size_sum >> PAGE_SHIFT, base, static_size); 149366c3a757STejun Heo 1494ce3141a2STejun Heo return pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, 14952f39e637STejun Heo unit_size, base, NULL); 1496d4b95f80STejun Heo } 1497d4b95f80STejun Heo 1498d4b95f80STejun Heo /** 1499d4b95f80STejun Heo * pcpu_4k_first_chunk - map the first chunk using PAGE_SIZE pages 1500d4b95f80STejun Heo * @static_size: the size of static percpu area in bytes 1501d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1502d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 1503d4b95f80STejun Heo * @free_fn: funtion to free percpu page, always called with PAGE_SIZE 1504d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 1505d4b95f80STejun Heo * 1506d4b95f80STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 1507d4b95f80STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 1508d4b95f80STejun Heo * 1509d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 1510d4b95f80STejun Heo * page-by-page into vmalloc area. 1511d4b95f80STejun Heo * 1512d4b95f80STejun Heo * RETURNS: 1513d4b95f80STejun Heo * The determined pcpu_unit_size which can be used to initialize 1514d4b95f80STejun Heo * percpu access on success, -errno on failure. 1515d4b95f80STejun Heo */ 1516d4b95f80STejun Heo ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size, 1517d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1518d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 1519d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 1520d4b95f80STejun Heo { 15218f05a6a6STejun Heo static struct vm_struct vm; 1522ce3141a2STejun Heo int unit_pages; 1523d4b95f80STejun Heo size_t pages_size; 1524ce3141a2STejun Heo struct page **pages; 1525d4b95f80STejun Heo unsigned int cpu; 1526d4b95f80STejun Heo int i, j; 1527d4b95f80STejun Heo ssize_t ret; 1528d4b95f80STejun Heo 1529ce3141a2STejun Heo unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size, 15308f05a6a6STejun Heo PCPU_MIN_UNIT_SIZE)); 1531d4b95f80STejun Heo 1532d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 1533384be2b1STejun Heo pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0])); 1534ce3141a2STejun Heo pages = alloc_bootmem(pages_size); 1535d4b95f80STejun Heo 15368f05a6a6STejun Heo /* allocate pages */ 1537d4b95f80STejun Heo j = 0; 1538d4b95f80STejun Heo for_each_possible_cpu(cpu) 1539ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) { 1540d4b95f80STejun Heo void *ptr; 1541d4b95f80STejun Heo 1542d4b95f80STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE); 1543d4b95f80STejun Heo if (!ptr) { 1544d4b95f80STejun Heo pr_warning("PERCPU: failed to allocate " 1545d4b95f80STejun Heo "4k page for cpu%u\n", cpu); 1546d4b95f80STejun Heo goto enomem; 1547d4b95f80STejun Heo } 1548ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 1549d4b95f80STejun Heo } 1550d4b95f80STejun Heo 15518f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 15528f05a6a6STejun Heo vm.flags = VM_ALLOC; 1553384be2b1STejun Heo vm.size = nr_cpu_ids * unit_pages << PAGE_SHIFT; 15548f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 15558f05a6a6STejun Heo 15568f05a6a6STejun Heo for_each_possible_cpu(cpu) { 15578f05a6a6STejun Heo unsigned long unit_addr = (unsigned long)vm.addr + 1558ce3141a2STejun Heo (cpu * unit_pages << PAGE_SHIFT); 15598f05a6a6STejun Heo 1560ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 15618f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 15628f05a6a6STejun Heo 15638f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 1564ce3141a2STejun Heo ret = __pcpu_map_pages(unit_addr, &pages[cpu * unit_pages], 1565ce3141a2STejun Heo unit_pages); 15668f05a6a6STejun Heo if (ret < 0) 15678f05a6a6STejun Heo panic("failed to map percpu area, err=%zd\n", ret); 15688f05a6a6STejun Heo 15698f05a6a6STejun Heo /* 15708f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 15718f05a6a6STejun Heo * cache for the linear mapping here - something 15728f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 15738f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 15748f05a6a6STejun Heo * data structures are not set up yet. 15758f05a6a6STejun Heo */ 15768f05a6a6STejun Heo 15778f05a6a6STejun Heo /* copy static data */ 15788f05a6a6STejun Heo memcpy((void *)unit_addr, __per_cpu_load, static_size); 15798f05a6a6STejun Heo } 15808f05a6a6STejun Heo 1581d4b95f80STejun Heo /* we're ready, commit */ 15828f05a6a6STejun Heo pr_info("PERCPU: %d 4k pages per cpu, static data %zu bytes\n", 1583ce3141a2STejun Heo unit_pages, static_size); 1584d4b95f80STejun Heo 1585ce3141a2STejun Heo ret = pcpu_setup_first_chunk(static_size, reserved_size, -1, 15862f39e637STejun Heo unit_pages << PAGE_SHIFT, vm.addr, NULL); 1587d4b95f80STejun Heo goto out_free_ar; 1588d4b95f80STejun Heo 1589d4b95f80STejun Heo enomem: 1590d4b95f80STejun Heo while (--j >= 0) 1591ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 1592d4b95f80STejun Heo ret = -ENOMEM; 1593d4b95f80STejun Heo out_free_ar: 1594ce3141a2STejun Heo free_bootmem(__pa(pages), pages_size); 1595d4b95f80STejun Heo return ret; 1596d4b95f80STejun Heo } 1597d4b95f80STejun Heo 1598d4b95f80STejun Heo /* 15998c4bfc6eSTejun Heo * Large page remapping first chunk setup helper 16008c4bfc6eSTejun Heo */ 16018c4bfc6eSTejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES 1602a530b795STejun Heo 1603a530b795STejun Heo /** 1604a530b795STejun Heo * pcpu_lpage_build_unit_map - build unit_map for large page remapping 1605a530b795STejun Heo * @static_size: the size of static percpu area in bytes 1606a530b795STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1607a530b795STejun Heo * @dyn_sizep: in/out parameter for dynamic size, -1 for auto 1608a530b795STejun Heo * @unit_sizep: out parameter for unit size 1609a530b795STejun Heo * @unit_map: unit_map to be filled 1610a530b795STejun Heo * @cpu_distance_fn: callback to determine distance between cpus 1611a530b795STejun Heo * 1612a530b795STejun Heo * This function builds cpu -> unit map and determine other parameters 1613a530b795STejun Heo * considering needed percpu size, large page size and distances 1614a530b795STejun Heo * between CPUs in NUMA. 1615a530b795STejun Heo * 1616a530b795STejun Heo * CPUs which are of LOCAL_DISTANCE both ways are grouped together and 1617a530b795STejun Heo * may share units in the same large page. The returned configuration 1618a530b795STejun Heo * is guaranteed to have CPUs on different nodes on different large 1619a530b795STejun Heo * pages and >=75% usage of allocated virtual address space. 1620a530b795STejun Heo * 1621a530b795STejun Heo * RETURNS: 1622a530b795STejun Heo * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and 1623a530b795STejun Heo * returns the number of units to be allocated. -errno on failure. 1624a530b795STejun Heo */ 1625a530b795STejun Heo int __init pcpu_lpage_build_unit_map(size_t static_size, size_t reserved_size, 1626a530b795STejun Heo ssize_t *dyn_sizep, size_t *unit_sizep, 1627a530b795STejun Heo size_t lpage_size, int *unit_map, 1628a530b795STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1629a530b795STejun Heo { 1630a530b795STejun Heo static int group_map[NR_CPUS] __initdata; 1631a530b795STejun Heo static int group_cnt[NR_CPUS] __initdata; 1632a530b795STejun Heo int group_cnt_max = 0; 1633a530b795STejun Heo size_t size_sum, min_unit_size, alloc_size; 1634a530b795STejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1635a530b795STejun Heo int last_allocs; 1636a530b795STejun Heo unsigned int cpu, tcpu; 1637a530b795STejun Heo int group, unit; 1638a530b795STejun Heo 1639a530b795STejun Heo /* 1640a530b795STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 1641a530b795STejun Heo * alloc_size is multiple of lpage_size and is the smallest 1642a530b795STejun Heo * which can accomodate 4k aligned segments which are equal to 1643a530b795STejun Heo * or larger than min_unit_size. 1644a530b795STejun Heo */ 1645a530b795STejun Heo size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, dyn_sizep); 1646a530b795STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1647a530b795STejun Heo 1648a530b795STejun Heo alloc_size = roundup(min_unit_size, lpage_size); 1649a530b795STejun Heo upa = alloc_size / min_unit_size; 1650a530b795STejun Heo while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1651a530b795STejun Heo upa--; 1652a530b795STejun Heo max_upa = upa; 1653a530b795STejun Heo 1654a530b795STejun Heo /* group cpus according to their proximity */ 1655a530b795STejun Heo for_each_possible_cpu(cpu) { 1656a530b795STejun Heo group = 0; 1657a530b795STejun Heo next_group: 1658a530b795STejun Heo for_each_possible_cpu(tcpu) { 1659a530b795STejun Heo if (cpu == tcpu) 1660a530b795STejun Heo break; 1661a530b795STejun Heo if (group_map[tcpu] == group && 1662a530b795STejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1663a530b795STejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1664a530b795STejun Heo group++; 1665a530b795STejun Heo goto next_group; 1666a530b795STejun Heo } 1667a530b795STejun Heo } 1668a530b795STejun Heo group_map[cpu] = group; 1669a530b795STejun Heo group_cnt[group]++; 1670a530b795STejun Heo group_cnt_max = max(group_cnt_max, group_cnt[group]); 1671a530b795STejun Heo } 1672a530b795STejun Heo 1673a530b795STejun Heo /* 1674a530b795STejun Heo * Expand unit size until address space usage goes over 75% 1675a530b795STejun Heo * and then as much as possible without using more address 1676a530b795STejun Heo * space. 1677a530b795STejun Heo */ 1678a530b795STejun Heo last_allocs = INT_MAX; 1679a530b795STejun Heo for (upa = max_upa; upa; upa--) { 1680a530b795STejun Heo int allocs = 0, wasted = 0; 1681a530b795STejun Heo 1682a530b795STejun Heo if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1683a530b795STejun Heo continue; 1684a530b795STejun Heo 1685a530b795STejun Heo for (group = 0; group_cnt[group]; group++) { 1686a530b795STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1687a530b795STejun Heo allocs += this_allocs; 1688a530b795STejun Heo wasted += this_allocs * upa - group_cnt[group]; 1689a530b795STejun Heo } 1690a530b795STejun Heo 1691a530b795STejun Heo /* 1692a530b795STejun Heo * Don't accept if wastage is over 25%. The 1693a530b795STejun Heo * greater-than comparison ensures upa==1 always 1694a530b795STejun Heo * passes the following check. 1695a530b795STejun Heo */ 1696a530b795STejun Heo if (wasted > num_possible_cpus() / 3) 1697a530b795STejun Heo continue; 1698a530b795STejun Heo 1699a530b795STejun Heo /* and then don't consume more memory */ 1700a530b795STejun Heo if (allocs > last_allocs) 1701a530b795STejun Heo break; 1702a530b795STejun Heo last_allocs = allocs; 1703a530b795STejun Heo best_upa = upa; 1704a530b795STejun Heo } 1705a530b795STejun Heo *unit_sizep = alloc_size / best_upa; 1706a530b795STejun Heo 1707a530b795STejun Heo /* assign units to cpus accordingly */ 1708a530b795STejun Heo unit = 0; 1709a530b795STejun Heo for (group = 0; group_cnt[group]; group++) { 1710a530b795STejun Heo for_each_possible_cpu(cpu) 1711a530b795STejun Heo if (group_map[cpu] == group) 1712a530b795STejun Heo unit_map[cpu] = unit++; 1713a530b795STejun Heo unit = roundup(unit, best_upa); 1714a530b795STejun Heo } 1715a530b795STejun Heo 1716a530b795STejun Heo return unit; /* unit contains aligned number of units */ 1717a530b795STejun Heo } 1718a530b795STejun Heo 17198c4bfc6eSTejun Heo struct pcpul_ent { 17208c4bfc6eSTejun Heo void *ptr; 1721a530b795STejun Heo void *map_addr; 17228c4bfc6eSTejun Heo }; 17238c4bfc6eSTejun Heo 17248c4bfc6eSTejun Heo static size_t pcpul_size; 1725a530b795STejun Heo static size_t pcpul_lpage_size; 1726a530b795STejun Heo static int pcpul_nr_lpages; 17278c4bfc6eSTejun Heo static struct pcpul_ent *pcpul_map; 1728a530b795STejun Heo 1729a530b795STejun Heo static bool __init pcpul_unit_to_cpu(int unit, const int *unit_map, 1730a530b795STejun Heo unsigned int *cpup) 1731a530b795STejun Heo { 1732a530b795STejun Heo unsigned int cpu; 1733a530b795STejun Heo 1734a530b795STejun Heo for_each_possible_cpu(cpu) 1735a530b795STejun Heo if (unit_map[cpu] == unit) { 1736a530b795STejun Heo if (cpup) 1737a530b795STejun Heo *cpup = cpu; 1738a530b795STejun Heo return true; 1739a530b795STejun Heo } 1740a530b795STejun Heo 1741a530b795STejun Heo return false; 1742a530b795STejun Heo } 1743a530b795STejun Heo 1744a530b795STejun Heo static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size, 1745a530b795STejun Heo size_t reserved_size, size_t dyn_size, 1746a530b795STejun Heo size_t unit_size, size_t lpage_size, 1747a530b795STejun Heo const int *unit_map, int nr_units) 1748a530b795STejun Heo { 1749a530b795STejun Heo int width = 1, v = nr_units; 1750a530b795STejun Heo char empty_str[] = "--------"; 1751a530b795STejun Heo int upl, lpl; /* units per lpage, lpage per line */ 1752a530b795STejun Heo unsigned int cpu; 1753a530b795STejun Heo int lpage, unit; 1754a530b795STejun Heo 1755a530b795STejun Heo while (v /= 10) 1756a530b795STejun Heo width++; 1757a530b795STejun Heo empty_str[min_t(int, width, sizeof(empty_str) - 1)] = '\0'; 1758a530b795STejun Heo 1759a530b795STejun Heo upl = max_t(int, lpage_size / unit_size, 1); 1760a530b795STejun Heo lpl = rounddown_pow_of_two(max_t(int, 60 / (upl * (width + 1) + 2), 1)); 1761a530b795STejun Heo 1762a530b795STejun Heo printk("%spcpu-lpage: sta/res/dyn=%zu/%zu/%zu unit=%zu lpage=%zu", lvl, 1763a530b795STejun Heo static_size, reserved_size, dyn_size, unit_size, lpage_size); 1764a530b795STejun Heo 1765a530b795STejun Heo for (lpage = 0, unit = 0; unit < nr_units; unit++) { 1766a530b795STejun Heo if (!(unit % upl)) { 1767a530b795STejun Heo if (!(lpage++ % lpl)) { 1768a530b795STejun Heo printk("\n"); 1769a530b795STejun Heo printk("%spcpu-lpage: ", lvl); 1770a530b795STejun Heo } else 1771a530b795STejun Heo printk("| "); 1772a530b795STejun Heo } 1773a530b795STejun Heo if (pcpul_unit_to_cpu(unit, unit_map, &cpu)) 1774a530b795STejun Heo printk("%0*d ", width, cpu); 1775a530b795STejun Heo else 1776a530b795STejun Heo printk("%s ", empty_str); 1777a530b795STejun Heo } 1778a530b795STejun Heo printk("\n"); 1779a530b795STejun Heo } 17808c4bfc6eSTejun Heo 17818c4bfc6eSTejun Heo /** 17828c4bfc6eSTejun Heo * pcpu_lpage_first_chunk - remap the first percpu chunk using large page 17838c4bfc6eSTejun Heo * @static_size: the size of static percpu area in bytes 17848c4bfc6eSTejun Heo * @reserved_size: the size of reserved percpu area in bytes 1785a530b795STejun Heo * @dyn_size: free size for dynamic allocation in bytes 1786a530b795STejun Heo * @unit_size: unit size in bytes 17878c4bfc6eSTejun Heo * @lpage_size: the size of a large page 1788a530b795STejun Heo * @unit_map: cpu -> unit mapping 1789a530b795STejun Heo * @nr_units: the number of units 17908c4bfc6eSTejun Heo * @alloc_fn: function to allocate percpu lpage, always called with lpage_size 17918c4bfc6eSTejun Heo * @free_fn: function to free percpu memory, @size <= lpage_size 17928c4bfc6eSTejun Heo * @map_fn: function to map percpu lpage, always called with lpage_size 17938c4bfc6eSTejun Heo * 1794a530b795STejun Heo * This allocator uses large page to build and map the first chunk. 1795a530b795STejun Heo * Unlike other helpers, the caller should always specify @dyn_size 1796a530b795STejun Heo * and @unit_size. These parameters along with @unit_map and 1797a530b795STejun Heo * @nr_units can be determined using pcpu_lpage_build_unit_map(). 1798a530b795STejun Heo * This two stage initialization is to allow arch code to evaluate the 1799a530b795STejun Heo * parameters before committing to it. 18008c4bfc6eSTejun Heo * 1801a530b795STejun Heo * Large pages are allocated as directed by @unit_map and other 1802a530b795STejun Heo * parameters and mapped to vmalloc space. Unused holes are returned 1803a530b795STejun Heo * to the page allocator. Note that these holes end up being actively 1804a530b795STejun Heo * mapped twice - once to the physical mapping and to the vmalloc area 1805a530b795STejun Heo * for the first percpu chunk. Depending on architecture, this might 1806a530b795STejun Heo * cause problem when changing page attributes of the returned area. 1807a530b795STejun Heo * These double mapped areas can be detected using 1808a530b795STejun Heo * pcpu_lpage_remapped(). 18098c4bfc6eSTejun Heo * 18108c4bfc6eSTejun Heo * RETURNS: 18118c4bfc6eSTejun Heo * The determined pcpu_unit_size which can be used to initialize 18128c4bfc6eSTejun Heo * percpu access on success, -errno on failure. 18138c4bfc6eSTejun Heo */ 18148c4bfc6eSTejun Heo ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size, 1815a530b795STejun Heo size_t dyn_size, size_t unit_size, 1816a530b795STejun Heo size_t lpage_size, const int *unit_map, 1817a530b795STejun Heo int nr_units, 18188c4bfc6eSTejun Heo pcpu_fc_alloc_fn_t alloc_fn, 18198c4bfc6eSTejun Heo pcpu_fc_free_fn_t free_fn, 18208c4bfc6eSTejun Heo pcpu_fc_map_fn_t map_fn) 18218c4bfc6eSTejun Heo { 1822a530b795STejun Heo static struct vm_struct vm; 1823a530b795STejun Heo size_t chunk_size = unit_size * nr_units; 18248c4bfc6eSTejun Heo size_t map_size; 18258c4bfc6eSTejun Heo unsigned int cpu; 18268c4bfc6eSTejun Heo ssize_t ret; 1827a530b795STejun Heo int i, j, unit; 18288c4bfc6eSTejun Heo 1829a530b795STejun Heo pcpul_lpage_dump_cfg(KERN_DEBUG, static_size, reserved_size, dyn_size, 1830a530b795STejun Heo unit_size, lpage_size, unit_map, nr_units); 18318c4bfc6eSTejun Heo 1832a530b795STejun Heo BUG_ON(chunk_size % lpage_size); 1833a530b795STejun Heo 1834a530b795STejun Heo pcpul_size = static_size + reserved_size + dyn_size; 1835a530b795STejun Heo pcpul_lpage_size = lpage_size; 1836a530b795STejun Heo pcpul_nr_lpages = chunk_size / lpage_size; 18378c4bfc6eSTejun Heo 18388c4bfc6eSTejun Heo /* allocate pointer array and alloc large pages */ 1839a530b795STejun Heo map_size = pcpul_nr_lpages * sizeof(pcpul_map[0]); 18408c4bfc6eSTejun Heo pcpul_map = alloc_bootmem(map_size); 18418c4bfc6eSTejun Heo 1842a530b795STejun Heo /* allocate all pages */ 1843a530b795STejun Heo for (i = 0; i < pcpul_nr_lpages; i++) { 1844a530b795STejun Heo size_t offset = i * lpage_size; 1845a530b795STejun Heo int first_unit = offset / unit_size; 1846a530b795STejun Heo int last_unit = (offset + lpage_size - 1) / unit_size; 18478c4bfc6eSTejun Heo void *ptr; 18488c4bfc6eSTejun Heo 1849a530b795STejun Heo /* find out which cpu is mapped to this unit */ 1850a530b795STejun Heo for (unit = first_unit; unit <= last_unit; unit++) 1851a530b795STejun Heo if (pcpul_unit_to_cpu(unit, unit_map, &cpu)) 1852a530b795STejun Heo goto found; 1853a530b795STejun Heo continue; 1854a530b795STejun Heo found: 18558c4bfc6eSTejun Heo ptr = alloc_fn(cpu, lpage_size); 18568c4bfc6eSTejun Heo if (!ptr) { 18578c4bfc6eSTejun Heo pr_warning("PERCPU: failed to allocate large page " 18588c4bfc6eSTejun Heo "for cpu%u\n", cpu); 18598c4bfc6eSTejun Heo goto enomem; 18608c4bfc6eSTejun Heo } 18618c4bfc6eSTejun Heo 1862a530b795STejun Heo pcpul_map[i].ptr = ptr; 18638c4bfc6eSTejun Heo } 18648c4bfc6eSTejun Heo 1865a530b795STejun Heo /* return unused holes */ 1866a530b795STejun Heo for (unit = 0; unit < nr_units; unit++) { 1867a530b795STejun Heo size_t start = unit * unit_size; 1868a530b795STejun Heo size_t end = start + unit_size; 1869a530b795STejun Heo size_t off, next; 1870a530b795STejun Heo 1871a530b795STejun Heo /* don't free used part of occupied unit */ 1872a530b795STejun Heo if (pcpul_unit_to_cpu(unit, unit_map, NULL)) 1873a530b795STejun Heo start += pcpul_size; 1874a530b795STejun Heo 1875a530b795STejun Heo /* unit can span more than one page, punch the holes */ 1876a530b795STejun Heo for (off = start; off < end; off = next) { 1877a530b795STejun Heo void *ptr = pcpul_map[off / lpage_size].ptr; 1878a530b795STejun Heo next = min(roundup(off + 1, lpage_size), end); 1879a530b795STejun Heo if (ptr) 1880a530b795STejun Heo free_fn(ptr + off % lpage_size, next - off); 1881a530b795STejun Heo } 1882a530b795STejun Heo } 1883a530b795STejun Heo 1884a530b795STejun Heo /* allocate address, map and copy */ 1885a530b795STejun Heo vm.flags = VM_ALLOC; 1886a530b795STejun Heo vm.size = chunk_size; 1887a530b795STejun Heo vm_area_register_early(&vm, unit_size); 1888a530b795STejun Heo 1889a530b795STejun Heo for (i = 0; i < pcpul_nr_lpages; i++) { 1890a530b795STejun Heo if (!pcpul_map[i].ptr) 1891a530b795STejun Heo continue; 1892a530b795STejun Heo pcpul_map[i].map_addr = vm.addr + i * lpage_size; 1893a530b795STejun Heo map_fn(pcpul_map[i].ptr, lpage_size, pcpul_map[i].map_addr); 1894a530b795STejun Heo } 18958c4bfc6eSTejun Heo 18968c4bfc6eSTejun Heo for_each_possible_cpu(cpu) 1897a530b795STejun Heo memcpy(vm.addr + unit_map[cpu] * unit_size, __per_cpu_load, 1898a530b795STejun Heo static_size); 18998c4bfc6eSTejun Heo 19008c4bfc6eSTejun Heo /* we're ready, commit */ 19018c4bfc6eSTejun Heo pr_info("PERCPU: Remapped at %p with large pages, static data " 1902a530b795STejun Heo "%zu bytes\n", vm.addr, static_size); 19038c4bfc6eSTejun Heo 1904ce3141a2STejun Heo ret = pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, 1905a530b795STejun Heo unit_size, vm.addr, unit_map); 19068c4bfc6eSTejun Heo 1907a530b795STejun Heo /* 1908a530b795STejun Heo * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped 1909a530b795STejun Heo * lpages are pushed to the end and trimmed. 1910a530b795STejun Heo */ 1911a530b795STejun Heo for (i = 0; i < pcpul_nr_lpages - 1; i++) 1912a530b795STejun Heo for (j = i + 1; j < pcpul_nr_lpages; j++) { 1913a530b795STejun Heo struct pcpul_ent tmp; 1914a530b795STejun Heo 1915a530b795STejun Heo if (!pcpul_map[j].ptr) 1916a530b795STejun Heo continue; 1917a530b795STejun Heo if (pcpul_map[i].ptr && 1918a530b795STejun Heo pcpul_map[i].ptr < pcpul_map[j].ptr) 1919a530b795STejun Heo continue; 1920a530b795STejun Heo 1921a530b795STejun Heo tmp = pcpul_map[i]; 19228c4bfc6eSTejun Heo pcpul_map[i] = pcpul_map[j]; 19238c4bfc6eSTejun Heo pcpul_map[j] = tmp; 19248c4bfc6eSTejun Heo } 19258c4bfc6eSTejun Heo 1926a530b795STejun Heo while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr) 1927a530b795STejun Heo pcpul_nr_lpages--; 1928a530b795STejun Heo 19298c4bfc6eSTejun Heo return ret; 19308c4bfc6eSTejun Heo 19318c4bfc6eSTejun Heo enomem: 1932a530b795STejun Heo for (i = 0; i < pcpul_nr_lpages; i++) 1933a530b795STejun Heo if (pcpul_map[i].ptr) 1934a530b795STejun Heo free_fn(pcpul_map[i].ptr, lpage_size); 19358c4bfc6eSTejun Heo free_bootmem(__pa(pcpul_map), map_size); 19368c4bfc6eSTejun Heo return -ENOMEM; 19378c4bfc6eSTejun Heo } 19388c4bfc6eSTejun Heo 19398c4bfc6eSTejun Heo /** 19408c4bfc6eSTejun Heo * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area 19418c4bfc6eSTejun Heo * @kaddr: the kernel address in question 19428c4bfc6eSTejun Heo * 19438c4bfc6eSTejun Heo * Determine whether @kaddr falls in the pcpul recycled area. This is 19448c4bfc6eSTejun Heo * used by pageattr to detect VM aliases and break up the pcpu large 19458c4bfc6eSTejun Heo * page mapping such that the same physical page is not mapped under 19468c4bfc6eSTejun Heo * different attributes. 19478c4bfc6eSTejun Heo * 19488c4bfc6eSTejun Heo * The recycled area is always at the tail of a partially used large 19498c4bfc6eSTejun Heo * page. 19508c4bfc6eSTejun Heo * 19518c4bfc6eSTejun Heo * RETURNS: 19528c4bfc6eSTejun Heo * Address of corresponding remapped pcpu address if match is found; 19538c4bfc6eSTejun Heo * otherwise, NULL. 19548c4bfc6eSTejun Heo */ 19558c4bfc6eSTejun Heo void *pcpu_lpage_remapped(void *kaddr) 19568c4bfc6eSTejun Heo { 1957a530b795STejun Heo unsigned long lpage_mask = pcpul_lpage_size - 1; 1958a530b795STejun Heo void *lpage_addr = (void *)((unsigned long)kaddr & ~lpage_mask); 1959a530b795STejun Heo unsigned long offset = (unsigned long)kaddr & lpage_mask; 1960a530b795STejun Heo int left = 0, right = pcpul_nr_lpages - 1; 19618c4bfc6eSTejun Heo int pos; 19628c4bfc6eSTejun Heo 19638c4bfc6eSTejun Heo /* pcpul in use at all? */ 19648c4bfc6eSTejun Heo if (!pcpul_map) 19658c4bfc6eSTejun Heo return NULL; 19668c4bfc6eSTejun Heo 19678c4bfc6eSTejun Heo /* okay, perform binary search */ 19688c4bfc6eSTejun Heo while (left <= right) { 19698c4bfc6eSTejun Heo pos = (left + right) / 2; 19708c4bfc6eSTejun Heo 19718c4bfc6eSTejun Heo if (pcpul_map[pos].ptr < lpage_addr) 19728c4bfc6eSTejun Heo left = pos + 1; 19738c4bfc6eSTejun Heo else if (pcpul_map[pos].ptr > lpage_addr) 19748c4bfc6eSTejun Heo right = pos - 1; 1975a530b795STejun Heo else 1976a530b795STejun Heo return pcpul_map[pos].map_addr + offset; 19778c4bfc6eSTejun Heo } 19788c4bfc6eSTejun Heo 19798c4bfc6eSTejun Heo return NULL; 19808c4bfc6eSTejun Heo } 19818c4bfc6eSTejun Heo #endif 19828c4bfc6eSTejun Heo 19838c4bfc6eSTejun Heo /* 1984e74e3962STejun Heo * Generic percpu area setup. 1985e74e3962STejun Heo * 1986e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 1987e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 1988e74e3962STejun Heo * important because many archs have addressing restrictions and might 1989e74e3962STejun Heo * fail if the percpu area is located far away from the previous 1990e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 1991e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 1992e74e3962STejun Heo * on the physical linear memory mapping which uses large page 1993e74e3962STejun Heo * mappings on applicable archs. 1994e74e3962STejun Heo */ 1995e74e3962STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 1996e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 1997e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 1998e74e3962STejun Heo 1999e74e3962STejun Heo void __init setup_per_cpu_areas(void) 2000e74e3962STejun Heo { 2001e74e3962STejun Heo size_t static_size = __per_cpu_end - __per_cpu_start; 2002e74e3962STejun Heo ssize_t unit_size; 2003e74e3962STejun Heo unsigned long delta; 2004e74e3962STejun Heo unsigned int cpu; 2005e74e3962STejun Heo 2006e74e3962STejun Heo /* 2007e74e3962STejun Heo * Always reserve area for module percpu variables. That's 2008e74e3962STejun Heo * what the legacy allocator did. 2009e74e3962STejun Heo */ 2010e74e3962STejun Heo unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, 2011788e5abcSTejun Heo PERCPU_DYNAMIC_RESERVE); 2012e74e3962STejun Heo if (unit_size < 0) 2013e74e3962STejun Heo panic("Failed to initialized percpu areas."); 2014e74e3962STejun Heo 2015e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2016e74e3962STejun Heo for_each_possible_cpu(cpu) 2017e74e3962STejun Heo __per_cpu_offset[cpu] = delta + cpu * unit_size; 2018e74e3962STejun Heo } 2019e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2020