1fbf59bc9STejun Heo /* 2fbf59bc9STejun Heo * linux/mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 7fbf59bc9STejun Heo * This file is released under the GPLv2. 8fbf59bc9STejun Heo * 9fbf59bc9STejun Heo * This is percpu allocator which can handle both static and dynamic 10fbf59bc9STejun Heo * areas. Percpu areas are allocated in chunks in vmalloc area. Each 112f39e637STejun Heo * chunk is consisted of boot-time determined number of units and the 122f39e637STejun Heo * first chunk is used for static percpu variables in the kernel image 132f39e637STejun Heo * (special boot time alloc/init handling necessary as these areas 142f39e637STejun Heo * need to be brought up before allocation services are running). 152f39e637STejun Heo * Unit grows as necessary and all units grow or shrink in unison. 162f39e637STejun Heo * When a chunk is filled up, another chunk is allocated. ie. in 172f39e637STejun Heo * vmalloc area 18fbf59bc9STejun Heo * 19fbf59bc9STejun Heo * c0 c1 c2 20fbf59bc9STejun Heo * ------------------- ------------------- ------------ 21fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 22fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 23fbf59bc9STejun Heo * 24fbf59bc9STejun Heo * Allocation is done in offset-size areas of single unit space. Ie, 25fbf59bc9STejun Heo * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 262f39e637STejun Heo * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 272f39e637STejun Heo * cpus. On NUMA, the mapping can be non-linear and even sparse. 282f39e637STejun Heo * Percpu access can be done by configuring percpu base registers 292f39e637STejun Heo * according to cpu to unit mapping and pcpu_unit_size. 30fbf59bc9STejun Heo * 312f39e637STejun Heo * There are usually many small percpu allocations many of them being 322f39e637STejun Heo * as small as 4 bytes. The allocator organizes chunks into lists 33fbf59bc9STejun Heo * according to free size and tries to allocate from the fullest one. 34fbf59bc9STejun Heo * Each chunk keeps the maximum contiguous area size hint which is 35fbf59bc9STejun Heo * guaranteed to be eqaul to or larger than the maximum contiguous 36fbf59bc9STejun Heo * area in the chunk. This helps the allocator not to iterate the 37fbf59bc9STejun Heo * chunk maps unnecessarily. 38fbf59bc9STejun Heo * 39fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 40fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 41fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 42fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 43fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 44e1b9aa3fSChristoph Lameter * Chunks can be determined from the address using the index field 45e1b9aa3fSChristoph Lameter * in the page struct. The index field contains a pointer to the chunk. 46fbf59bc9STejun Heo * 47fbf59bc9STejun Heo * To use this allocator, arch code should do the followings. 48fbf59bc9STejun Heo * 49fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 50e0100983STejun Heo * regular address to percpu pointer and back if they need to be 51e0100983STejun Heo * different from the default 52fbf59bc9STejun Heo * 538d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 548d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 55fbf59bc9STejun Heo */ 56fbf59bc9STejun Heo 57fbf59bc9STejun Heo #include <linux/bitmap.h> 58fbf59bc9STejun Heo #include <linux/bootmem.h> 59fd1e8a1fSTejun Heo #include <linux/err.h> 60fbf59bc9STejun Heo #include <linux/list.h> 61a530b795STejun Heo #include <linux/log2.h> 62fbf59bc9STejun Heo #include <linux/mm.h> 63fbf59bc9STejun Heo #include <linux/module.h> 64fbf59bc9STejun Heo #include <linux/mutex.h> 65fbf59bc9STejun Heo #include <linux/percpu.h> 66fbf59bc9STejun Heo #include <linux/pfn.h> 67fbf59bc9STejun Heo #include <linux/slab.h> 68ccea34b5STejun Heo #include <linux/spinlock.h> 69fbf59bc9STejun Heo #include <linux/vmalloc.h> 70a56dbddfSTejun Heo #include <linux/workqueue.h> 71fbf59bc9STejun Heo 72fbf59bc9STejun Heo #include <asm/cacheflush.h> 73e0100983STejun Heo #include <asm/sections.h> 74fbf59bc9STejun Heo #include <asm/tlbflush.h> 753b034b0dSVivek Goyal #include <asm/io.h> 76fbf59bc9STejun Heo 77fbf59bc9STejun Heo #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 78fbf59bc9STejun Heo #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 79fbf59bc9STejun Heo 80e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 81e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 82e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 8343cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 8443cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 8543cf38ebSTejun Heo (unsigned long)__per_cpu_start) 86e0100983STejun Heo #endif 87e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 88e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 8943cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 9043cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 9143cf38ebSTejun Heo (unsigned long)__per_cpu_start) 92e0100983STejun Heo #endif 93e0100983STejun Heo 94fbf59bc9STejun Heo struct pcpu_chunk { 95fbf59bc9STejun Heo struct list_head list; /* linked to pcpu_slot lists */ 96fbf59bc9STejun Heo int free_size; /* free bytes in the chunk */ 97fbf59bc9STejun Heo int contig_hint; /* max contiguous size hint */ 98bba174f5STejun Heo void *base_addr; /* base address of this chunk */ 99fbf59bc9STejun Heo int map_used; /* # of map entries used */ 100fbf59bc9STejun Heo int map_alloc; /* # of map entries allocated */ 101fbf59bc9STejun Heo int *map; /* allocation map */ 1026563297cSTejun Heo struct vm_struct **vms; /* mapped vmalloc regions */ 1038d408b4bSTejun Heo bool immutable; /* no [de]population allowed */ 104ce3141a2STejun Heo unsigned long populated[]; /* populated bitmap */ 105fbf59bc9STejun Heo }; 106fbf59bc9STejun Heo 10740150d37STejun Heo static int pcpu_unit_pages __read_mostly; 10840150d37STejun Heo static int pcpu_unit_size __read_mostly; 1092f39e637STejun Heo static int pcpu_nr_units __read_mostly; 1106563297cSTejun Heo static int pcpu_atom_size __read_mostly; 11140150d37STejun Heo static int pcpu_nr_slots __read_mostly; 11240150d37STejun Heo static size_t pcpu_chunk_struct_size __read_mostly; 113fbf59bc9STejun Heo 1142f39e637STejun Heo /* cpus with the lowest and highest unit numbers */ 1152f39e637STejun Heo static unsigned int pcpu_first_unit_cpu __read_mostly; 1162f39e637STejun Heo static unsigned int pcpu_last_unit_cpu __read_mostly; 1172f39e637STejun Heo 118fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 11940150d37STejun Heo void *pcpu_base_addr __read_mostly; 120fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 121fbf59bc9STejun Heo 122fb435d52STejun Heo static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ 123fb435d52STejun Heo const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ 1242f39e637STejun Heo 1256563297cSTejun Heo /* group information, used for vm allocation */ 1266563297cSTejun Heo static int pcpu_nr_groups __read_mostly; 1276563297cSTejun Heo static const unsigned long *pcpu_group_offsets __read_mostly; 1286563297cSTejun Heo static const size_t *pcpu_group_sizes __read_mostly; 1296563297cSTejun Heo 130ae9e6bc9STejun Heo /* 131ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 132ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 133ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 134ae9e6bc9STejun Heo */ 135ae9e6bc9STejun Heo static struct pcpu_chunk *pcpu_first_chunk; 136ae9e6bc9STejun Heo 137ae9e6bc9STejun Heo /* 138ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 139ae9e6bc9STejun Heo * chunk and serves it for reserved allocations. The amount of 140ae9e6bc9STejun Heo * reserved offset is in pcpu_reserved_chunk_limit. When reserved 141ae9e6bc9STejun Heo * area doesn't exist, the following variables contain NULL and 0 142ae9e6bc9STejun Heo * respectively. 143ae9e6bc9STejun Heo */ 144edcb4639STejun Heo static struct pcpu_chunk *pcpu_reserved_chunk; 145edcb4639STejun Heo static int pcpu_reserved_chunk_limit; 146edcb4639STejun Heo 147fbf59bc9STejun Heo /* 148ccea34b5STejun Heo * Synchronization rules. 149fbf59bc9STejun Heo * 150ccea34b5STejun Heo * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 151ce3141a2STejun Heo * protects allocation/reclaim paths, chunks, populated bitmap and 152ce3141a2STejun Heo * vmalloc mapping. The latter is a spinlock and protects the index 153ce3141a2STejun Heo * data structures - chunk slots, chunks and area maps in chunks. 154fbf59bc9STejun Heo * 155ccea34b5STejun Heo * During allocation, pcpu_alloc_mutex is kept locked all the time and 156ccea34b5STejun Heo * pcpu_lock is grabbed and released as necessary. All actual memory 157403a91b1SJiri Kosina * allocations are done using GFP_KERNEL with pcpu_lock released. In 158403a91b1SJiri Kosina * general, percpu memory can't be allocated with irq off but 159403a91b1SJiri Kosina * irqsave/restore are still used in alloc path so that it can be used 160403a91b1SJiri Kosina * from early init path - sched_init() specifically. 161ccea34b5STejun Heo * 162ccea34b5STejun Heo * Free path accesses and alters only the index data structures, so it 163ccea34b5STejun Heo * can be safely called from atomic context. When memory needs to be 164ccea34b5STejun Heo * returned to the system, free path schedules reclaim_work which 165ccea34b5STejun Heo * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 166ccea34b5STejun Heo * reclaimed, release both locks and frees the chunks. Note that it's 167ccea34b5STejun Heo * necessary to grab both locks to remove a chunk from circulation as 168ccea34b5STejun Heo * allocation path might be referencing the chunk with only 169ccea34b5STejun Heo * pcpu_alloc_mutex locked. 170fbf59bc9STejun Heo */ 171ccea34b5STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 172ccea34b5STejun Heo static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 173fbf59bc9STejun Heo 17440150d37STejun Heo static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 175fbf59bc9STejun Heo 176a56dbddfSTejun Heo /* reclaim work to release fully free chunks, scheduled from free path */ 177a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work); 178a56dbddfSTejun Heo static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 179a56dbddfSTejun Heo 180020ec653STejun Heo static bool pcpu_addr_in_first_chunk(void *addr) 181020ec653STejun Heo { 182020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 183020ec653STejun Heo 184020ec653STejun Heo return addr >= first_start && addr < first_start + pcpu_unit_size; 185020ec653STejun Heo } 186020ec653STejun Heo 187020ec653STejun Heo static bool pcpu_addr_in_reserved_chunk(void *addr) 188020ec653STejun Heo { 189020ec653STejun Heo void *first_start = pcpu_first_chunk->base_addr; 190020ec653STejun Heo 191020ec653STejun Heo return addr >= first_start && 192020ec653STejun Heo addr < first_start + pcpu_reserved_chunk_limit; 193020ec653STejun Heo } 194020ec653STejun Heo 195d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 196fbf59bc9STejun Heo { 197cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 198fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 199fbf59bc9STejun Heo } 200fbf59bc9STejun Heo 201d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 202d9b55eebSTejun Heo { 203d9b55eebSTejun Heo if (size == pcpu_unit_size) 204d9b55eebSTejun Heo return pcpu_nr_slots - 1; 205d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 206d9b55eebSTejun Heo } 207d9b55eebSTejun Heo 208fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 209fbf59bc9STejun Heo { 210fbf59bc9STejun Heo if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 211fbf59bc9STejun Heo return 0; 212fbf59bc9STejun Heo 213fbf59bc9STejun Heo return pcpu_size_to_slot(chunk->free_size); 214fbf59bc9STejun Heo } 215fbf59bc9STejun Heo 216fbf59bc9STejun Heo static int pcpu_page_idx(unsigned int cpu, int page_idx) 217fbf59bc9STejun Heo { 2182f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 219fbf59bc9STejun Heo } 220fbf59bc9STejun Heo 221fbf59bc9STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 222fbf59bc9STejun Heo unsigned int cpu, int page_idx) 223fbf59bc9STejun Heo { 224bba174f5STejun Heo return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 225fb435d52STejun Heo (page_idx << PAGE_SHIFT); 226fbf59bc9STejun Heo } 227fbf59bc9STejun Heo 228ce3141a2STejun Heo static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 229c8a51be4STejun Heo unsigned int cpu, int page_idx) 230fbf59bc9STejun Heo { 231ce3141a2STejun Heo /* must not be used on pre-mapped chunk */ 232ce3141a2STejun Heo WARN_ON(chunk->immutable); 233c8a51be4STejun Heo 234ce3141a2STejun Heo return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); 235fbf59bc9STejun Heo } 236fbf59bc9STejun Heo 237e1b9aa3fSChristoph Lameter /* set the pointer to a chunk in a page struct */ 238e1b9aa3fSChristoph Lameter static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 239e1b9aa3fSChristoph Lameter { 240e1b9aa3fSChristoph Lameter page->index = (unsigned long)pcpu; 241e1b9aa3fSChristoph Lameter } 242e1b9aa3fSChristoph Lameter 243e1b9aa3fSChristoph Lameter /* obtain pointer to a chunk from a page struct */ 244e1b9aa3fSChristoph Lameter static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 245e1b9aa3fSChristoph Lameter { 246e1b9aa3fSChristoph Lameter return (struct pcpu_chunk *)page->index; 247e1b9aa3fSChristoph Lameter } 248e1b9aa3fSChristoph Lameter 249ce3141a2STejun Heo static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 250ce3141a2STejun Heo { 251ce3141a2STejun Heo *rs = find_next_zero_bit(chunk->populated, end, *rs); 252ce3141a2STejun Heo *re = find_next_bit(chunk->populated, end, *rs + 1); 253ce3141a2STejun Heo } 254ce3141a2STejun Heo 255ce3141a2STejun Heo static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 256ce3141a2STejun Heo { 257ce3141a2STejun Heo *rs = find_next_bit(chunk->populated, end, *rs); 258ce3141a2STejun Heo *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 259ce3141a2STejun Heo } 260ce3141a2STejun Heo 261ce3141a2STejun Heo /* 262ce3141a2STejun Heo * (Un)populated page region iterators. Iterate over (un)populated 263ce3141a2STejun Heo * page regions betwen @start and @end in @chunk. @rs and @re should 264ce3141a2STejun Heo * be integer variables and will be set to start and end page index of 265ce3141a2STejun Heo * the current region. 266ce3141a2STejun Heo */ 267ce3141a2STejun Heo #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 268ce3141a2STejun Heo for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 269ce3141a2STejun Heo (rs) < (re); \ 270ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 271ce3141a2STejun Heo 272ce3141a2STejun Heo #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 273ce3141a2STejun Heo for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 274ce3141a2STejun Heo (rs) < (re); \ 275ce3141a2STejun Heo (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 276ce3141a2STejun Heo 277fbf59bc9STejun Heo /** 2781880d93bSTejun Heo * pcpu_mem_alloc - allocate memory 2791880d93bSTejun Heo * @size: bytes to allocate 280fbf59bc9STejun Heo * 2811880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 2821880d93bSTejun Heo * kzalloc() is used; otherwise, vmalloc() is used. The returned 2831880d93bSTejun Heo * memory is always zeroed. 284fbf59bc9STejun Heo * 285ccea34b5STejun Heo * CONTEXT: 286ccea34b5STejun Heo * Does GFP_KERNEL allocation. 287ccea34b5STejun Heo * 288fbf59bc9STejun Heo * RETURNS: 2891880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 290fbf59bc9STejun Heo */ 2911880d93bSTejun Heo static void *pcpu_mem_alloc(size_t size) 292fbf59bc9STejun Heo { 293fbf59bc9STejun Heo if (size <= PAGE_SIZE) 2941880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 2951880d93bSTejun Heo else { 2961880d93bSTejun Heo void *ptr = vmalloc(size); 2971880d93bSTejun Heo if (ptr) 2981880d93bSTejun Heo memset(ptr, 0, size); 2991880d93bSTejun Heo return ptr; 3001880d93bSTejun Heo } 3011880d93bSTejun Heo } 302fbf59bc9STejun Heo 3031880d93bSTejun Heo /** 3041880d93bSTejun Heo * pcpu_mem_free - free memory 3051880d93bSTejun Heo * @ptr: memory to free 3061880d93bSTejun Heo * @size: size of the area 3071880d93bSTejun Heo * 3081880d93bSTejun Heo * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 3091880d93bSTejun Heo */ 3101880d93bSTejun Heo static void pcpu_mem_free(void *ptr, size_t size) 3111880d93bSTejun Heo { 3121880d93bSTejun Heo if (size <= PAGE_SIZE) 3131880d93bSTejun Heo kfree(ptr); 3141880d93bSTejun Heo else 3151880d93bSTejun Heo vfree(ptr); 316fbf59bc9STejun Heo } 317fbf59bc9STejun Heo 318fbf59bc9STejun Heo /** 319fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 320fbf59bc9STejun Heo * @chunk: chunk of interest 321fbf59bc9STejun Heo * @oslot: the previous slot it was on 322fbf59bc9STejun Heo * 323fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 324fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 325edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 326edcb4639STejun Heo * chunk slots. 327ccea34b5STejun Heo * 328ccea34b5STejun Heo * CONTEXT: 329ccea34b5STejun Heo * pcpu_lock. 330fbf59bc9STejun Heo */ 331fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 332fbf59bc9STejun Heo { 333fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 334fbf59bc9STejun Heo 335edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 336fbf59bc9STejun Heo if (oslot < nslot) 337fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 338fbf59bc9STejun Heo else 339fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 340fbf59bc9STejun Heo } 341fbf59bc9STejun Heo } 342fbf59bc9STejun Heo 343fbf59bc9STejun Heo /** 344e1b9aa3fSChristoph Lameter * pcpu_chunk_addr_search - determine chunk containing specified address 345e1b9aa3fSChristoph Lameter * @addr: address for which the chunk needs to be determined. 346ccea34b5STejun Heo * 347fbf59bc9STejun Heo * RETURNS: 348fbf59bc9STejun Heo * The address of the found chunk. 349fbf59bc9STejun Heo */ 350fbf59bc9STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 351fbf59bc9STejun Heo { 352ae9e6bc9STejun Heo /* is it in the first chunk? */ 353020ec653STejun Heo if (pcpu_addr_in_first_chunk(addr)) { 354ae9e6bc9STejun Heo /* is it in the reserved area? */ 355020ec653STejun Heo if (pcpu_addr_in_reserved_chunk(addr)) 356edcb4639STejun Heo return pcpu_reserved_chunk; 357ae9e6bc9STejun Heo return pcpu_first_chunk; 358edcb4639STejun Heo } 359edcb4639STejun Heo 36004a13c7cSTejun Heo /* 36104a13c7cSTejun Heo * The address is relative to unit0 which might be unused and 36204a13c7cSTejun Heo * thus unmapped. Offset the address to the unit space of the 36304a13c7cSTejun Heo * current processor before looking it up in the vmalloc 36404a13c7cSTejun Heo * space. Note that any possible cpu id can be used here, so 36504a13c7cSTejun Heo * there's no need to worry about preemption or cpu hotplug. 36604a13c7cSTejun Heo */ 3675579fd7eSTejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 368e1b9aa3fSChristoph Lameter return pcpu_get_page_chunk(vmalloc_to_page(addr)); 369fbf59bc9STejun Heo } 370fbf59bc9STejun Heo 371fbf59bc9STejun Heo /** 372833af842STejun Heo * pcpu_need_to_extend - determine whether chunk area map needs to be extended 373833af842STejun Heo * @chunk: chunk of interest 3749f7dcf22STejun Heo * 375833af842STejun Heo * Determine whether area map of @chunk needs to be extended to 376833af842STejun Heo * accomodate a new allocation. 3779f7dcf22STejun Heo * 378ccea34b5STejun Heo * CONTEXT: 379833af842STejun Heo * pcpu_lock. 380ccea34b5STejun Heo * 3819f7dcf22STejun Heo * RETURNS: 382833af842STejun Heo * New target map allocation length if extension is necessary, 0 383833af842STejun Heo * otherwise. 3849f7dcf22STejun Heo */ 385833af842STejun Heo static int pcpu_need_to_extend(struct pcpu_chunk *chunk) 3869f7dcf22STejun Heo { 3879f7dcf22STejun Heo int new_alloc; 3889f7dcf22STejun Heo 3899f7dcf22STejun Heo if (chunk->map_alloc >= chunk->map_used + 2) 3909f7dcf22STejun Heo return 0; 3919f7dcf22STejun Heo 3929f7dcf22STejun Heo new_alloc = PCPU_DFL_MAP_ALLOC; 3939f7dcf22STejun Heo while (new_alloc < chunk->map_used + 2) 3949f7dcf22STejun Heo new_alloc *= 2; 3959f7dcf22STejun Heo 396833af842STejun Heo return new_alloc; 397ccea34b5STejun Heo } 398ccea34b5STejun Heo 399833af842STejun Heo /** 400833af842STejun Heo * pcpu_extend_area_map - extend area map of a chunk 401833af842STejun Heo * @chunk: chunk of interest 402833af842STejun Heo * @new_alloc: new target allocation length of the area map 403833af842STejun Heo * 404833af842STejun Heo * Extend area map of @chunk to have @new_alloc entries. 405833af842STejun Heo * 406833af842STejun Heo * CONTEXT: 407833af842STejun Heo * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 408833af842STejun Heo * 409833af842STejun Heo * RETURNS: 410833af842STejun Heo * 0 on success, -errno on failure. 411ccea34b5STejun Heo */ 412833af842STejun Heo static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 413833af842STejun Heo { 414833af842STejun Heo int *old = NULL, *new = NULL; 415833af842STejun Heo size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 416833af842STejun Heo unsigned long flags; 4179f7dcf22STejun Heo 418833af842STejun Heo new = pcpu_mem_alloc(new_size); 419833af842STejun Heo if (!new) 420833af842STejun Heo return -ENOMEM; 421833af842STejun Heo 422833af842STejun Heo /* acquire pcpu_lock and switch to new area map */ 423833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 424833af842STejun Heo 425833af842STejun Heo if (new_alloc <= chunk->map_alloc) 426833af842STejun Heo goto out_unlock; 427833af842STejun Heo 428833af842STejun Heo old_size = chunk->map_alloc * sizeof(chunk->map[0]); 429833af842STejun Heo memcpy(new, chunk->map, old_size); 4309f7dcf22STejun Heo 4319f7dcf22STejun Heo /* 4329f7dcf22STejun Heo * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 4339f7dcf22STejun Heo * one of the first chunks and still using static map. 4349f7dcf22STejun Heo */ 4359f7dcf22STejun Heo if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 436833af842STejun Heo old = chunk->map; 4379f7dcf22STejun Heo 4389f7dcf22STejun Heo chunk->map_alloc = new_alloc; 4399f7dcf22STejun Heo chunk->map = new; 440833af842STejun Heo new = NULL; 441833af842STejun Heo 442833af842STejun Heo out_unlock: 443833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 444833af842STejun Heo 445833af842STejun Heo /* 446833af842STejun Heo * pcpu_mem_free() might end up calling vfree() which uses 447833af842STejun Heo * IRQ-unsafe lock and thus can't be called under pcpu_lock. 448833af842STejun Heo */ 449833af842STejun Heo pcpu_mem_free(old, old_size); 450833af842STejun Heo pcpu_mem_free(new, new_size); 451833af842STejun Heo 4529f7dcf22STejun Heo return 0; 4539f7dcf22STejun Heo } 4549f7dcf22STejun Heo 4559f7dcf22STejun Heo /** 456fbf59bc9STejun Heo * pcpu_split_block - split a map block 457fbf59bc9STejun Heo * @chunk: chunk of interest 458fbf59bc9STejun Heo * @i: index of map block to split 459cae3aeb8STejun Heo * @head: head size in bytes (can be 0) 460cae3aeb8STejun Heo * @tail: tail size in bytes (can be 0) 461fbf59bc9STejun Heo * 462fbf59bc9STejun Heo * Split the @i'th map block into two or three blocks. If @head is 463fbf59bc9STejun Heo * non-zero, @head bytes block is inserted before block @i moving it 464fbf59bc9STejun Heo * to @i+1 and reducing its size by @head bytes. 465fbf59bc9STejun Heo * 466fbf59bc9STejun Heo * If @tail is non-zero, the target block, which can be @i or @i+1 467fbf59bc9STejun Heo * depending on @head, is reduced by @tail bytes and @tail byte block 468fbf59bc9STejun Heo * is inserted after the target block. 469fbf59bc9STejun Heo * 4709f7dcf22STejun Heo * @chunk->map must have enough free slots to accomodate the split. 471ccea34b5STejun Heo * 472ccea34b5STejun Heo * CONTEXT: 473ccea34b5STejun Heo * pcpu_lock. 474fbf59bc9STejun Heo */ 4759f7dcf22STejun Heo static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 4769f7dcf22STejun Heo int head, int tail) 477fbf59bc9STejun Heo { 478fbf59bc9STejun Heo int nr_extra = !!head + !!tail; 479fbf59bc9STejun Heo 4809f7dcf22STejun Heo BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 481fbf59bc9STejun Heo 4829f7dcf22STejun Heo /* insert new subblocks */ 483fbf59bc9STejun Heo memmove(&chunk->map[i + nr_extra], &chunk->map[i], 484fbf59bc9STejun Heo sizeof(chunk->map[0]) * (chunk->map_used - i)); 485fbf59bc9STejun Heo chunk->map_used += nr_extra; 486fbf59bc9STejun Heo 487fbf59bc9STejun Heo if (head) { 488fbf59bc9STejun Heo chunk->map[i + 1] = chunk->map[i] - head; 489fbf59bc9STejun Heo chunk->map[i++] = head; 490fbf59bc9STejun Heo } 491fbf59bc9STejun Heo if (tail) { 492fbf59bc9STejun Heo chunk->map[i++] -= tail; 493fbf59bc9STejun Heo chunk->map[i] = tail; 494fbf59bc9STejun Heo } 495fbf59bc9STejun Heo } 496fbf59bc9STejun Heo 497fbf59bc9STejun Heo /** 498fbf59bc9STejun Heo * pcpu_alloc_area - allocate area from a pcpu_chunk 499fbf59bc9STejun Heo * @chunk: chunk of interest 500cae3aeb8STejun Heo * @size: wanted size in bytes 501fbf59bc9STejun Heo * @align: wanted align 502fbf59bc9STejun Heo * 503fbf59bc9STejun Heo * Try to allocate @size bytes area aligned at @align from @chunk. 504fbf59bc9STejun Heo * Note that this function only allocates the offset. It doesn't 505fbf59bc9STejun Heo * populate or map the area. 506fbf59bc9STejun Heo * 5079f7dcf22STejun Heo * @chunk->map must have at least two free slots. 5089f7dcf22STejun Heo * 509ccea34b5STejun Heo * CONTEXT: 510ccea34b5STejun Heo * pcpu_lock. 511ccea34b5STejun Heo * 512fbf59bc9STejun Heo * RETURNS: 5139f7dcf22STejun Heo * Allocated offset in @chunk on success, -1 if no matching area is 5149f7dcf22STejun Heo * found. 515fbf59bc9STejun Heo */ 516fbf59bc9STejun Heo static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 517fbf59bc9STejun Heo { 518fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 519fbf59bc9STejun Heo int max_contig = 0; 520fbf59bc9STejun Heo int i, off; 521fbf59bc9STejun Heo 522fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 523fbf59bc9STejun Heo bool is_last = i + 1 == chunk->map_used; 524fbf59bc9STejun Heo int head, tail; 525fbf59bc9STejun Heo 526fbf59bc9STejun Heo /* extra for alignment requirement */ 527fbf59bc9STejun Heo head = ALIGN(off, align) - off; 528fbf59bc9STejun Heo BUG_ON(i == 0 && head != 0); 529fbf59bc9STejun Heo 530fbf59bc9STejun Heo if (chunk->map[i] < 0) 531fbf59bc9STejun Heo continue; 532fbf59bc9STejun Heo if (chunk->map[i] < head + size) { 533fbf59bc9STejun Heo max_contig = max(chunk->map[i], max_contig); 534fbf59bc9STejun Heo continue; 535fbf59bc9STejun Heo } 536fbf59bc9STejun Heo 537fbf59bc9STejun Heo /* 538fbf59bc9STejun Heo * If head is small or the previous block is free, 539fbf59bc9STejun Heo * merge'em. Note that 'small' is defined as smaller 540fbf59bc9STejun Heo * than sizeof(int), which is very small but isn't too 541fbf59bc9STejun Heo * uncommon for percpu allocations. 542fbf59bc9STejun Heo */ 543fbf59bc9STejun Heo if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 544fbf59bc9STejun Heo if (chunk->map[i - 1] > 0) 545fbf59bc9STejun Heo chunk->map[i - 1] += head; 546fbf59bc9STejun Heo else { 547fbf59bc9STejun Heo chunk->map[i - 1] -= head; 548fbf59bc9STejun Heo chunk->free_size -= head; 549fbf59bc9STejun Heo } 550fbf59bc9STejun Heo chunk->map[i] -= head; 551fbf59bc9STejun Heo off += head; 552fbf59bc9STejun Heo head = 0; 553fbf59bc9STejun Heo } 554fbf59bc9STejun Heo 555fbf59bc9STejun Heo /* if tail is small, just keep it around */ 556fbf59bc9STejun Heo tail = chunk->map[i] - head - size; 557fbf59bc9STejun Heo if (tail < sizeof(int)) 558fbf59bc9STejun Heo tail = 0; 559fbf59bc9STejun Heo 560fbf59bc9STejun Heo /* split if warranted */ 561fbf59bc9STejun Heo if (head || tail) { 5629f7dcf22STejun Heo pcpu_split_block(chunk, i, head, tail); 563fbf59bc9STejun Heo if (head) { 564fbf59bc9STejun Heo i++; 565fbf59bc9STejun Heo off += head; 566fbf59bc9STejun Heo max_contig = max(chunk->map[i - 1], max_contig); 567fbf59bc9STejun Heo } 568fbf59bc9STejun Heo if (tail) 569fbf59bc9STejun Heo max_contig = max(chunk->map[i + 1], max_contig); 570fbf59bc9STejun Heo } 571fbf59bc9STejun Heo 572fbf59bc9STejun Heo /* update hint and mark allocated */ 573fbf59bc9STejun Heo if (is_last) 574fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 575fbf59bc9STejun Heo else 576fbf59bc9STejun Heo chunk->contig_hint = max(chunk->contig_hint, 577fbf59bc9STejun Heo max_contig); 578fbf59bc9STejun Heo 579fbf59bc9STejun Heo chunk->free_size -= chunk->map[i]; 580fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 581fbf59bc9STejun Heo 582fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 583fbf59bc9STejun Heo return off; 584fbf59bc9STejun Heo } 585fbf59bc9STejun Heo 586fbf59bc9STejun Heo chunk->contig_hint = max_contig; /* fully scanned */ 587fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 588fbf59bc9STejun Heo 5899f7dcf22STejun Heo /* tell the upper layer that this chunk has no matching area */ 5909f7dcf22STejun Heo return -1; 591fbf59bc9STejun Heo } 592fbf59bc9STejun Heo 593fbf59bc9STejun Heo /** 594fbf59bc9STejun Heo * pcpu_free_area - free area to a pcpu_chunk 595fbf59bc9STejun Heo * @chunk: chunk of interest 596fbf59bc9STejun Heo * @freeme: offset of area to free 597fbf59bc9STejun Heo * 598fbf59bc9STejun Heo * Free area starting from @freeme to @chunk. Note that this function 599fbf59bc9STejun Heo * only modifies the allocation map. It doesn't depopulate or unmap 600fbf59bc9STejun Heo * the area. 601ccea34b5STejun Heo * 602ccea34b5STejun Heo * CONTEXT: 603ccea34b5STejun Heo * pcpu_lock. 604fbf59bc9STejun Heo */ 605fbf59bc9STejun Heo static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 606fbf59bc9STejun Heo { 607fbf59bc9STejun Heo int oslot = pcpu_chunk_slot(chunk); 608fbf59bc9STejun Heo int i, off; 609fbf59bc9STejun Heo 610fbf59bc9STejun Heo for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 611fbf59bc9STejun Heo if (off == freeme) 612fbf59bc9STejun Heo break; 613fbf59bc9STejun Heo BUG_ON(off != freeme); 614fbf59bc9STejun Heo BUG_ON(chunk->map[i] > 0); 615fbf59bc9STejun Heo 616fbf59bc9STejun Heo chunk->map[i] = -chunk->map[i]; 617fbf59bc9STejun Heo chunk->free_size += chunk->map[i]; 618fbf59bc9STejun Heo 619fbf59bc9STejun Heo /* merge with previous? */ 620fbf59bc9STejun Heo if (i > 0 && chunk->map[i - 1] >= 0) { 621fbf59bc9STejun Heo chunk->map[i - 1] += chunk->map[i]; 622fbf59bc9STejun Heo chunk->map_used--; 623fbf59bc9STejun Heo memmove(&chunk->map[i], &chunk->map[i + 1], 624fbf59bc9STejun Heo (chunk->map_used - i) * sizeof(chunk->map[0])); 625fbf59bc9STejun Heo i--; 626fbf59bc9STejun Heo } 627fbf59bc9STejun Heo /* merge with next? */ 628fbf59bc9STejun Heo if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 629fbf59bc9STejun Heo chunk->map[i] += chunk->map[i + 1]; 630fbf59bc9STejun Heo chunk->map_used--; 631fbf59bc9STejun Heo memmove(&chunk->map[i + 1], &chunk->map[i + 2], 632fbf59bc9STejun Heo (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 633fbf59bc9STejun Heo } 634fbf59bc9STejun Heo 635fbf59bc9STejun Heo chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 636fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 637fbf59bc9STejun Heo } 638fbf59bc9STejun Heo 639*6081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void) 640*6081089fSTejun Heo { 641*6081089fSTejun Heo struct pcpu_chunk *chunk; 642*6081089fSTejun Heo 643*6081089fSTejun Heo chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 644*6081089fSTejun Heo if (!chunk) 645*6081089fSTejun Heo return NULL; 646*6081089fSTejun Heo 647*6081089fSTejun Heo chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 648*6081089fSTejun Heo if (!chunk->map) { 649*6081089fSTejun Heo kfree(chunk); 650*6081089fSTejun Heo return NULL; 651*6081089fSTejun Heo } 652*6081089fSTejun Heo 653*6081089fSTejun Heo chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 654*6081089fSTejun Heo chunk->map[chunk->map_used++] = pcpu_unit_size; 655*6081089fSTejun Heo 656*6081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 657*6081089fSTejun Heo chunk->free_size = pcpu_unit_size; 658*6081089fSTejun Heo chunk->contig_hint = pcpu_unit_size; 659*6081089fSTejun Heo 660*6081089fSTejun Heo return chunk; 661*6081089fSTejun Heo } 662*6081089fSTejun Heo 663*6081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 664*6081089fSTejun Heo { 665*6081089fSTejun Heo if (!chunk) 666*6081089fSTejun Heo return; 667*6081089fSTejun Heo pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 668*6081089fSTejun Heo kfree(chunk); 669*6081089fSTejun Heo } 670*6081089fSTejun Heo 671fbf59bc9STejun Heo /** 672ce3141a2STejun Heo * pcpu_get_pages_and_bitmap - get temp pages array and bitmap 673fbf59bc9STejun Heo * @chunk: chunk of interest 674ce3141a2STejun Heo * @bitmapp: output parameter for bitmap 675ce3141a2STejun Heo * @may_alloc: may allocate the array 676fbf59bc9STejun Heo * 677ce3141a2STejun Heo * Returns pointer to array of pointers to struct page and bitmap, 678ce3141a2STejun Heo * both of which can be indexed with pcpu_page_idx(). The returned 679ce3141a2STejun Heo * array is cleared to zero and *@bitmapp is copied from 680ce3141a2STejun Heo * @chunk->populated. Note that there is only one array and bitmap 681ce3141a2STejun Heo * and access exclusion is the caller's responsibility. 682ce3141a2STejun Heo * 683ce3141a2STejun Heo * CONTEXT: 684ce3141a2STejun Heo * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. 685ce3141a2STejun Heo * Otherwise, don't care. 686ce3141a2STejun Heo * 687ce3141a2STejun Heo * RETURNS: 688ce3141a2STejun Heo * Pointer to temp pages array on success, NULL on failure. 689fbf59bc9STejun Heo */ 690ce3141a2STejun Heo static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, 691ce3141a2STejun Heo unsigned long **bitmapp, 692ce3141a2STejun Heo bool may_alloc) 693ce3141a2STejun Heo { 694ce3141a2STejun Heo static struct page **pages; 695ce3141a2STejun Heo static unsigned long *bitmap; 6962f39e637STejun Heo size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 697ce3141a2STejun Heo size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * 698ce3141a2STejun Heo sizeof(unsigned long); 699ce3141a2STejun Heo 700ce3141a2STejun Heo if (!pages || !bitmap) { 701ce3141a2STejun Heo if (may_alloc && !pages) 702ce3141a2STejun Heo pages = pcpu_mem_alloc(pages_size); 703ce3141a2STejun Heo if (may_alloc && !bitmap) 704ce3141a2STejun Heo bitmap = pcpu_mem_alloc(bitmap_size); 705ce3141a2STejun Heo if (!pages || !bitmap) 706ce3141a2STejun Heo return NULL; 707ce3141a2STejun Heo } 708ce3141a2STejun Heo 709ce3141a2STejun Heo memset(pages, 0, pages_size); 710ce3141a2STejun Heo bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); 711ce3141a2STejun Heo 712ce3141a2STejun Heo *bitmapp = bitmap; 713ce3141a2STejun Heo return pages; 714ce3141a2STejun Heo } 715ce3141a2STejun Heo 716ce3141a2STejun Heo /** 717ce3141a2STejun Heo * pcpu_free_pages - free pages which were allocated for @chunk 718ce3141a2STejun Heo * @chunk: chunk pages were allocated for 719ce3141a2STejun Heo * @pages: array of pages to be freed, indexed by pcpu_page_idx() 720ce3141a2STejun Heo * @populated: populated bitmap 721ce3141a2STejun Heo * @page_start: page index of the first page to be freed 722ce3141a2STejun Heo * @page_end: page index of the last page to be freed + 1 723ce3141a2STejun Heo * 724ce3141a2STejun Heo * Free pages [@page_start and @page_end) in @pages for all units. 725ce3141a2STejun Heo * The pages were allocated for @chunk. 726ce3141a2STejun Heo */ 727ce3141a2STejun Heo static void pcpu_free_pages(struct pcpu_chunk *chunk, 728ce3141a2STejun Heo struct page **pages, unsigned long *populated, 729ce3141a2STejun Heo int page_start, int page_end) 730ce3141a2STejun Heo { 731ce3141a2STejun Heo unsigned int cpu; 732ce3141a2STejun Heo int i; 733ce3141a2STejun Heo 734ce3141a2STejun Heo for_each_possible_cpu(cpu) { 735ce3141a2STejun Heo for (i = page_start; i < page_end; i++) { 736ce3141a2STejun Heo struct page *page = pages[pcpu_page_idx(cpu, i)]; 737ce3141a2STejun Heo 738ce3141a2STejun Heo if (page) 739ce3141a2STejun Heo __free_page(page); 740ce3141a2STejun Heo } 741ce3141a2STejun Heo } 742ce3141a2STejun Heo } 743ce3141a2STejun Heo 744ce3141a2STejun Heo /** 745ce3141a2STejun Heo * pcpu_alloc_pages - allocates pages for @chunk 746ce3141a2STejun Heo * @chunk: target chunk 747ce3141a2STejun Heo * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 748ce3141a2STejun Heo * @populated: populated bitmap 749ce3141a2STejun Heo * @page_start: page index of the first page to be allocated 750ce3141a2STejun Heo * @page_end: page index of the last page to be allocated + 1 751ce3141a2STejun Heo * 752ce3141a2STejun Heo * Allocate pages [@page_start,@page_end) into @pages for all units. 753ce3141a2STejun Heo * The allocation is for @chunk. Percpu core doesn't care about the 754ce3141a2STejun Heo * content of @pages and will pass it verbatim to pcpu_map_pages(). 755ce3141a2STejun Heo */ 756ce3141a2STejun Heo static int pcpu_alloc_pages(struct pcpu_chunk *chunk, 757ce3141a2STejun Heo struct page **pages, unsigned long *populated, 758ce3141a2STejun Heo int page_start, int page_end) 759ce3141a2STejun Heo { 760ce3141a2STejun Heo const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 761ce3141a2STejun Heo unsigned int cpu; 762ce3141a2STejun Heo int i; 763ce3141a2STejun Heo 764ce3141a2STejun Heo for_each_possible_cpu(cpu) { 765ce3141a2STejun Heo for (i = page_start; i < page_end; i++) { 766ce3141a2STejun Heo struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 767ce3141a2STejun Heo 768ce3141a2STejun Heo *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); 769ce3141a2STejun Heo if (!*pagep) { 770ce3141a2STejun Heo pcpu_free_pages(chunk, pages, populated, 771ce3141a2STejun Heo page_start, page_end); 772ce3141a2STejun Heo return -ENOMEM; 773ce3141a2STejun Heo } 774ce3141a2STejun Heo } 775ce3141a2STejun Heo } 776ce3141a2STejun Heo return 0; 777ce3141a2STejun Heo } 778ce3141a2STejun Heo 779ce3141a2STejun Heo /** 780ce3141a2STejun Heo * pcpu_pre_unmap_flush - flush cache prior to unmapping 781ce3141a2STejun Heo * @chunk: chunk the regions to be flushed belongs to 782ce3141a2STejun Heo * @page_start: page index of the first page to be flushed 783ce3141a2STejun Heo * @page_end: page index of the last page to be flushed + 1 784ce3141a2STejun Heo * 785ce3141a2STejun Heo * Pages in [@page_start,@page_end) of @chunk are about to be 786ce3141a2STejun Heo * unmapped. Flush cache. As each flushing trial can be very 787ce3141a2STejun Heo * expensive, issue flush on the whole region at once rather than 788ce3141a2STejun Heo * doing it for each cpu. This could be an overkill but is more 789ce3141a2STejun Heo * scalable. 790ce3141a2STejun Heo */ 791ce3141a2STejun Heo static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, 792ce3141a2STejun Heo int page_start, int page_end) 793fbf59bc9STejun Heo { 7942f39e637STejun Heo flush_cache_vunmap( 7952f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 7962f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 797ce3141a2STejun Heo } 798fbf59bc9STejun Heo 799ce3141a2STejun Heo static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 800ce3141a2STejun Heo { 801ce3141a2STejun Heo unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); 802ce3141a2STejun Heo } 803fbf59bc9STejun Heo 804ce3141a2STejun Heo /** 805ce3141a2STejun Heo * pcpu_unmap_pages - unmap pages out of a pcpu_chunk 806ce3141a2STejun Heo * @chunk: chunk of interest 807ce3141a2STejun Heo * @pages: pages array which can be used to pass information to free 808ce3141a2STejun Heo * @populated: populated bitmap 809fbf59bc9STejun Heo * @page_start: page index of the first page to unmap 810fbf59bc9STejun Heo * @page_end: page index of the last page to unmap + 1 811fbf59bc9STejun Heo * 812fbf59bc9STejun Heo * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 813ce3141a2STejun Heo * Corresponding elements in @pages were cleared by the caller and can 814ce3141a2STejun Heo * be used to carry information to pcpu_free_pages() which will be 815ce3141a2STejun Heo * called after all unmaps are finished. The caller should call 816ce3141a2STejun Heo * proper pre/post flush functions. 817fbf59bc9STejun Heo */ 818ce3141a2STejun Heo static void pcpu_unmap_pages(struct pcpu_chunk *chunk, 819ce3141a2STejun Heo struct page **pages, unsigned long *populated, 820ce3141a2STejun Heo int page_start, int page_end) 821fbf59bc9STejun Heo { 822fbf59bc9STejun Heo unsigned int cpu; 823ce3141a2STejun Heo int i; 824fbf59bc9STejun Heo 825ce3141a2STejun Heo for_each_possible_cpu(cpu) { 826ce3141a2STejun Heo for (i = page_start; i < page_end; i++) { 827ce3141a2STejun Heo struct page *page; 828fbf59bc9STejun Heo 829ce3141a2STejun Heo page = pcpu_chunk_page(chunk, cpu, i); 830ce3141a2STejun Heo WARN_ON(!page); 831ce3141a2STejun Heo pages[pcpu_page_idx(cpu, i)] = page; 832ce3141a2STejun Heo } 833ce3141a2STejun Heo __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), 834ce3141a2STejun Heo page_end - page_start); 835ce3141a2STejun Heo } 836ce3141a2STejun Heo 837ce3141a2STejun Heo for (i = page_start; i < page_end; i++) 838ce3141a2STejun Heo __clear_bit(i, populated); 839ce3141a2STejun Heo } 840ce3141a2STejun Heo 841ce3141a2STejun Heo /** 842ce3141a2STejun Heo * pcpu_post_unmap_tlb_flush - flush TLB after unmapping 843ce3141a2STejun Heo * @chunk: pcpu_chunk the regions to be flushed belong to 844ce3141a2STejun Heo * @page_start: page index of the first page to be flushed 845ce3141a2STejun Heo * @page_end: page index of the last page to be flushed + 1 846ce3141a2STejun Heo * 847ce3141a2STejun Heo * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush 848ce3141a2STejun Heo * TLB for the regions. This can be skipped if the area is to be 849ce3141a2STejun Heo * returned to vmalloc as vmalloc will handle TLB flushing lazily. 850ce3141a2STejun Heo * 851ce3141a2STejun Heo * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 852ce3141a2STejun Heo * for the whole region. 853fbf59bc9STejun Heo */ 854ce3141a2STejun Heo static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 855ce3141a2STejun Heo int page_start, int page_end) 856ce3141a2STejun Heo { 8572f39e637STejun Heo flush_tlb_kernel_range( 8582f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 8592f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 860fbf59bc9STejun Heo } 861fbf59bc9STejun Heo 862c8a51be4STejun Heo static int __pcpu_map_pages(unsigned long addr, struct page **pages, 863c8a51be4STejun Heo int nr_pages) 864c8a51be4STejun Heo { 865c8a51be4STejun Heo return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, 866c8a51be4STejun Heo PAGE_KERNEL, pages); 867c8a51be4STejun Heo } 868c8a51be4STejun Heo 869c8a51be4STejun Heo /** 870ce3141a2STejun Heo * pcpu_map_pages - map pages into a pcpu_chunk 871c8a51be4STejun Heo * @chunk: chunk of interest 872ce3141a2STejun Heo * @pages: pages array containing pages to be mapped 873ce3141a2STejun Heo * @populated: populated bitmap 874c8a51be4STejun Heo * @page_start: page index of the first page to map 875c8a51be4STejun Heo * @page_end: page index of the last page to map + 1 876c8a51be4STejun Heo * 877ce3141a2STejun Heo * For each cpu, map pages [@page_start,@page_end) into @chunk. The 878ce3141a2STejun Heo * caller is responsible for calling pcpu_post_map_flush() after all 879ce3141a2STejun Heo * mappings are complete. 880ce3141a2STejun Heo * 881ce3141a2STejun Heo * This function is responsible for setting corresponding bits in 882ce3141a2STejun Heo * @chunk->populated bitmap and whatever is necessary for reverse 883ce3141a2STejun Heo * lookup (addr -> chunk). 884c8a51be4STejun Heo */ 885ce3141a2STejun Heo static int pcpu_map_pages(struct pcpu_chunk *chunk, 886ce3141a2STejun Heo struct page **pages, unsigned long *populated, 887ce3141a2STejun Heo int page_start, int page_end) 888c8a51be4STejun Heo { 889ce3141a2STejun Heo unsigned int cpu, tcpu; 890ce3141a2STejun Heo int i, err; 891c8a51be4STejun Heo 892c8a51be4STejun Heo for_each_possible_cpu(cpu) { 893c8a51be4STejun Heo err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), 894ce3141a2STejun Heo &pages[pcpu_page_idx(cpu, page_start)], 895c8a51be4STejun Heo page_end - page_start); 896c8a51be4STejun Heo if (err < 0) 897ce3141a2STejun Heo goto err; 898ce3141a2STejun Heo } 899ce3141a2STejun Heo 900ce3141a2STejun Heo /* mapping successful, link chunk and mark populated */ 901ce3141a2STejun Heo for (i = page_start; i < page_end; i++) { 902fbf59bc9STejun Heo for_each_possible_cpu(cpu) 903ce3141a2STejun Heo pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], 904ce3141a2STejun Heo chunk); 905ce3141a2STejun Heo __set_bit(i, populated); 906ce3141a2STejun Heo } 907fbf59bc9STejun Heo 908ce3141a2STejun Heo return 0; 909ce3141a2STejun Heo 910ce3141a2STejun Heo err: 911ce3141a2STejun Heo for_each_possible_cpu(tcpu) { 912ce3141a2STejun Heo if (tcpu == cpu) 913ce3141a2STejun Heo break; 914ce3141a2STejun Heo __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), 915ce3141a2STejun Heo page_end - page_start); 916ce3141a2STejun Heo } 917c8a51be4STejun Heo return err; 918c8a51be4STejun Heo } 919c8a51be4STejun Heo 920ce3141a2STejun Heo /** 921ce3141a2STejun Heo * pcpu_post_map_flush - flush cache after mapping 922ce3141a2STejun Heo * @chunk: pcpu_chunk the regions to be flushed belong to 923ce3141a2STejun Heo * @page_start: page index of the first page to be flushed 924ce3141a2STejun Heo * @page_end: page index of the last page to be flushed + 1 925ce3141a2STejun Heo * 926ce3141a2STejun Heo * Pages [@page_start,@page_end) of @chunk have been mapped. Flush 927ce3141a2STejun Heo * cache. 928ce3141a2STejun Heo * 929ce3141a2STejun Heo * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 930ce3141a2STejun Heo * for the whole region. 931ce3141a2STejun Heo */ 932ce3141a2STejun Heo static void pcpu_post_map_flush(struct pcpu_chunk *chunk, 933ce3141a2STejun Heo int page_start, int page_end) 934ce3141a2STejun Heo { 9352f39e637STejun Heo flush_cache_vmap( 9362f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 9372f39e637STejun Heo pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 938fbf59bc9STejun Heo } 939fbf59bc9STejun Heo 940fbf59bc9STejun Heo /** 941fbf59bc9STejun Heo * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 942fbf59bc9STejun Heo * @chunk: chunk to depopulate 943fbf59bc9STejun Heo * @off: offset to the area to depopulate 944cae3aeb8STejun Heo * @size: size of the area to depopulate in bytes 945fbf59bc9STejun Heo * @flush: whether to flush cache and tlb or not 946fbf59bc9STejun Heo * 947fbf59bc9STejun Heo * For each cpu, depopulate and unmap pages [@page_start,@page_end) 948fbf59bc9STejun Heo * from @chunk. If @flush is true, vcache is flushed before unmapping 949fbf59bc9STejun Heo * and tlb after. 950ccea34b5STejun Heo * 951ccea34b5STejun Heo * CONTEXT: 952ccea34b5STejun Heo * pcpu_alloc_mutex. 953fbf59bc9STejun Heo */ 954ce3141a2STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 955fbf59bc9STejun Heo { 956fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 957fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 958ce3141a2STejun Heo struct page **pages; 959ce3141a2STejun Heo unsigned long *populated; 960ce3141a2STejun Heo int rs, re; 961fbf59bc9STejun Heo 962ce3141a2STejun Heo /* quick path, check whether it's empty already */ 96322b737f4SWANG Cong rs = page_start; 96422b737f4SWANG Cong pcpu_next_unpop(chunk, &rs, &re, page_end); 965ce3141a2STejun Heo if (rs == page_start && re == page_end) 966ce3141a2STejun Heo return; 967fbf59bc9STejun Heo 968ce3141a2STejun Heo /* immutable chunks can't be depopulated */ 9698d408b4bSTejun Heo WARN_ON(chunk->immutable); 9708d408b4bSTejun Heo 971fbf59bc9STejun Heo /* 972ce3141a2STejun Heo * If control reaches here, there must have been at least one 973ce3141a2STejun Heo * successful population attempt so the temp pages array must 974ce3141a2STejun Heo * be available now. 975fbf59bc9STejun Heo */ 976ce3141a2STejun Heo pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); 977ce3141a2STejun Heo BUG_ON(!pages); 978fbf59bc9STejun Heo 979ce3141a2STejun Heo /* unmap and free */ 980ce3141a2STejun Heo pcpu_pre_unmap_flush(chunk, page_start, page_end); 981fbf59bc9STejun Heo 982ce3141a2STejun Heo pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 983ce3141a2STejun Heo pcpu_unmap_pages(chunk, pages, populated, rs, re); 984ce3141a2STejun Heo 985ce3141a2STejun Heo /* no need to flush tlb, vmalloc will handle it lazily */ 986ce3141a2STejun Heo 987ce3141a2STejun Heo pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 988ce3141a2STejun Heo pcpu_free_pages(chunk, pages, populated, rs, re); 989ce3141a2STejun Heo 990ce3141a2STejun Heo /* commit new bitmap */ 991ce3141a2STejun Heo bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 992fbf59bc9STejun Heo } 993fbf59bc9STejun Heo 994fbf59bc9STejun Heo /** 995fbf59bc9STejun Heo * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 996fbf59bc9STejun Heo * @chunk: chunk of interest 997fbf59bc9STejun Heo * @off: offset to the area to populate 998cae3aeb8STejun Heo * @size: size of the area to populate in bytes 999fbf59bc9STejun Heo * 1000fbf59bc9STejun Heo * For each cpu, populate and map pages [@page_start,@page_end) into 1001fbf59bc9STejun Heo * @chunk. The area is cleared on return. 1002ccea34b5STejun Heo * 1003ccea34b5STejun Heo * CONTEXT: 1004ccea34b5STejun Heo * pcpu_alloc_mutex, does GFP_KERNEL allocation. 1005fbf59bc9STejun Heo */ 1006fbf59bc9STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 1007fbf59bc9STejun Heo { 1008fbf59bc9STejun Heo int page_start = PFN_DOWN(off); 1009fbf59bc9STejun Heo int page_end = PFN_UP(off + size); 1010ce3141a2STejun Heo int free_end = page_start, unmap_end = page_start; 1011ce3141a2STejun Heo struct page **pages; 1012ce3141a2STejun Heo unsigned long *populated; 1013fbf59bc9STejun Heo unsigned int cpu; 1014ce3141a2STejun Heo int rs, re, rc; 1015fbf59bc9STejun Heo 1016ce3141a2STejun Heo /* quick path, check whether all pages are already there */ 101722b737f4SWANG Cong rs = page_start; 101822b737f4SWANG Cong pcpu_next_pop(chunk, &rs, &re, page_end); 1019ce3141a2STejun Heo if (rs == page_start && re == page_end) 1020ce3141a2STejun Heo goto clear; 1021fbf59bc9STejun Heo 1022ce3141a2STejun Heo /* need to allocate and map pages, this chunk can't be immutable */ 1023ce3141a2STejun Heo WARN_ON(chunk->immutable); 1024fbf59bc9STejun Heo 1025ce3141a2STejun Heo pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); 1026ce3141a2STejun Heo if (!pages) 1027fbf59bc9STejun Heo return -ENOMEM; 1028fbf59bc9STejun Heo 1029ce3141a2STejun Heo /* alloc and map */ 1030ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 1031ce3141a2STejun Heo rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); 1032ce3141a2STejun Heo if (rc) 1033ce3141a2STejun Heo goto err_free; 1034ce3141a2STejun Heo free_end = re; 1035fbf59bc9STejun Heo } 1036fbf59bc9STejun Heo 1037ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 1038ce3141a2STejun Heo rc = pcpu_map_pages(chunk, pages, populated, rs, re); 1039ce3141a2STejun Heo if (rc) 1040ce3141a2STejun Heo goto err_unmap; 1041ce3141a2STejun Heo unmap_end = re; 1042ce3141a2STejun Heo } 1043ce3141a2STejun Heo pcpu_post_map_flush(chunk, page_start, page_end); 1044fbf59bc9STejun Heo 1045ce3141a2STejun Heo /* commit new bitmap */ 1046ce3141a2STejun Heo bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 1047ce3141a2STejun Heo clear: 1048fbf59bc9STejun Heo for_each_possible_cpu(cpu) 10492f39e637STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1050fbf59bc9STejun Heo return 0; 1051ce3141a2STejun Heo 1052ce3141a2STejun Heo err_unmap: 1053ce3141a2STejun Heo pcpu_pre_unmap_flush(chunk, page_start, unmap_end); 1054ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) 1055ce3141a2STejun Heo pcpu_unmap_pages(chunk, pages, populated, rs, re); 1056ce3141a2STejun Heo pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); 1057ce3141a2STejun Heo err_free: 1058ce3141a2STejun Heo pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) 1059ce3141a2STejun Heo pcpu_free_pages(chunk, pages, populated, rs, re); 1060ce3141a2STejun Heo return rc; 1061fbf59bc9STejun Heo } 1062fbf59bc9STejun Heo 1063*6081089fSTejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) 1064fbf59bc9STejun Heo { 1065*6081089fSTejun Heo if (chunk && chunk->vms) 10666563297cSTejun Heo pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups); 1067*6081089fSTejun Heo pcpu_free_chunk(chunk); 1068fbf59bc9STejun Heo } 1069fbf59bc9STejun Heo 1070*6081089fSTejun Heo static struct pcpu_chunk *pcpu_create_chunk(void) 1071fbf59bc9STejun Heo { 1072fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1073*6081089fSTejun Heo struct vm_struct **vms; 1074fbf59bc9STejun Heo 1075*6081089fSTejun Heo chunk = pcpu_alloc_chunk(); 1076fbf59bc9STejun Heo if (!chunk) 1077fbf59bc9STejun Heo return NULL; 1078fbf59bc9STejun Heo 1079*6081089fSTejun Heo vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, 1080*6081089fSTejun Heo pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL); 1081*6081089fSTejun Heo if (!vms) { 1082*6081089fSTejun Heo pcpu_free_chunk(chunk); 1083fbf59bc9STejun Heo return NULL; 1084fbf59bc9STejun Heo } 1085fbf59bc9STejun Heo 1086*6081089fSTejun Heo chunk->vms = vms; 1087*6081089fSTejun Heo chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; 1088fbf59bc9STejun Heo return chunk; 1089fbf59bc9STejun Heo } 1090fbf59bc9STejun Heo 1091fbf59bc9STejun Heo /** 1092edcb4639STejun Heo * pcpu_alloc - the percpu allocator 1093cae3aeb8STejun Heo * @size: size of area to allocate in bytes 1094fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 1095edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 1096fbf59bc9STejun Heo * 1097ccea34b5STejun Heo * Allocate percpu area of @size bytes aligned at @align. 1098ccea34b5STejun Heo * 1099ccea34b5STejun Heo * CONTEXT: 1100ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1101fbf59bc9STejun Heo * 1102fbf59bc9STejun Heo * RETURNS: 1103fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1104fbf59bc9STejun Heo */ 110543cf38ebSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) 1106fbf59bc9STejun Heo { 1107f2badb0cSTejun Heo static int warn_limit = 10; 1108fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1109f2badb0cSTejun Heo const char *err; 1110833af842STejun Heo int slot, off, new_alloc; 1111403a91b1SJiri Kosina unsigned long flags; 1112fbf59bc9STejun Heo 11138d408b4bSTejun Heo if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1114fbf59bc9STejun Heo WARN(true, "illegal size (%zu) or align (%zu) for " 1115fbf59bc9STejun Heo "percpu allocation\n", size, align); 1116fbf59bc9STejun Heo return NULL; 1117fbf59bc9STejun Heo } 1118fbf59bc9STejun Heo 1119ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1120403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1121fbf59bc9STejun Heo 1122edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 1123edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 1124edcb4639STejun Heo chunk = pcpu_reserved_chunk; 1125833af842STejun Heo 1126833af842STejun Heo if (size > chunk->contig_hint) { 1127833af842STejun Heo err = "alloc from reserved chunk failed"; 1128ccea34b5STejun Heo goto fail_unlock; 1129f2badb0cSTejun Heo } 1130833af842STejun Heo 1131833af842STejun Heo while ((new_alloc = pcpu_need_to_extend(chunk))) { 1132833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1133833af842STejun Heo if (pcpu_extend_area_map(chunk, new_alloc) < 0) { 1134833af842STejun Heo err = "failed to extend area map of reserved chunk"; 1135833af842STejun Heo goto fail_unlock_mutex; 1136833af842STejun Heo } 1137833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1138833af842STejun Heo } 1139833af842STejun Heo 1140edcb4639STejun Heo off = pcpu_alloc_area(chunk, size, align); 1141edcb4639STejun Heo if (off >= 0) 1142edcb4639STejun Heo goto area_found; 1143833af842STejun Heo 1144f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 1145ccea34b5STejun Heo goto fail_unlock; 1146edcb4639STejun Heo } 1147edcb4639STejun Heo 1148ccea34b5STejun Heo restart: 1149edcb4639STejun Heo /* search through normal chunks */ 1150fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 1151fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 1152fbf59bc9STejun Heo if (size > chunk->contig_hint) 1153fbf59bc9STejun Heo continue; 1154ccea34b5STejun Heo 1155833af842STejun Heo new_alloc = pcpu_need_to_extend(chunk); 1156833af842STejun Heo if (new_alloc) { 1157833af842STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1158833af842STejun Heo if (pcpu_extend_area_map(chunk, 1159833af842STejun Heo new_alloc) < 0) { 1160f2badb0cSTejun Heo err = "failed to extend area map"; 1161833af842STejun Heo goto fail_unlock_mutex; 1162833af842STejun Heo } 1163833af842STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1164833af842STejun Heo /* 1165833af842STejun Heo * pcpu_lock has been dropped, need to 1166833af842STejun Heo * restart cpu_slot list walking. 1167833af842STejun Heo */ 1168833af842STejun Heo goto restart; 1169ccea34b5STejun Heo } 1170ccea34b5STejun Heo 1171fbf59bc9STejun Heo off = pcpu_alloc_area(chunk, size, align); 1172fbf59bc9STejun Heo if (off >= 0) 1173fbf59bc9STejun Heo goto area_found; 1174fbf59bc9STejun Heo } 1175fbf59bc9STejun Heo } 1176fbf59bc9STejun Heo 1177fbf59bc9STejun Heo /* hmmm... no space left, create a new chunk */ 1178403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1179ccea34b5STejun Heo 1180*6081089fSTejun Heo chunk = pcpu_create_chunk(); 1181f2badb0cSTejun Heo if (!chunk) { 1182f2badb0cSTejun Heo err = "failed to allocate new chunk"; 1183ccea34b5STejun Heo goto fail_unlock_mutex; 1184f2badb0cSTejun Heo } 1185ccea34b5STejun Heo 1186403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1187fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1188ccea34b5STejun Heo goto restart; 1189fbf59bc9STejun Heo 1190fbf59bc9STejun Heo area_found: 1191403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1192ccea34b5STejun Heo 1193fbf59bc9STejun Heo /* populate, map and clear the area */ 1194fbf59bc9STejun Heo if (pcpu_populate_chunk(chunk, off, size)) { 1195403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1196fbf59bc9STejun Heo pcpu_free_area(chunk, off); 1197f2badb0cSTejun Heo err = "failed to populate"; 1198ccea34b5STejun Heo goto fail_unlock; 1199fbf59bc9STejun Heo } 1200fbf59bc9STejun Heo 1201ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1202ccea34b5STejun Heo 1203bba174f5STejun Heo /* return address relative to base address */ 1204bba174f5STejun Heo return __addr_to_pcpu_ptr(chunk->base_addr + off); 1205ccea34b5STejun Heo 1206ccea34b5STejun Heo fail_unlock: 1207403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1208ccea34b5STejun Heo fail_unlock_mutex: 1209ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1210f2badb0cSTejun Heo if (warn_limit) { 1211f2badb0cSTejun Heo pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " 1212f2badb0cSTejun Heo "%s\n", size, align, err); 1213f2badb0cSTejun Heo dump_stack(); 1214f2badb0cSTejun Heo if (!--warn_limit) 1215f2badb0cSTejun Heo pr_info("PERCPU: limit reached, disable warning\n"); 1216f2badb0cSTejun Heo } 1217ccea34b5STejun Heo return NULL; 1218fbf59bc9STejun Heo } 1219edcb4639STejun Heo 1220edcb4639STejun Heo /** 1221edcb4639STejun Heo * __alloc_percpu - allocate dynamic percpu area 1222edcb4639STejun Heo * @size: size of area to allocate in bytes 1223edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1224edcb4639STejun Heo * 1225edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align. Might 1226edcb4639STejun Heo * sleep. Might trigger writeouts. 1227edcb4639STejun Heo * 1228ccea34b5STejun Heo * CONTEXT: 1229ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1230ccea34b5STejun Heo * 1231edcb4639STejun Heo * RETURNS: 1232edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1233edcb4639STejun Heo */ 123443cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1235edcb4639STejun Heo { 1236edcb4639STejun Heo return pcpu_alloc(size, align, false); 1237edcb4639STejun Heo } 1238fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1239fbf59bc9STejun Heo 1240edcb4639STejun Heo /** 1241edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1242edcb4639STejun Heo * @size: size of area to allocate in bytes 1243edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1244edcb4639STejun Heo * 1245edcb4639STejun Heo * Allocate percpu area of @size bytes aligned at @align from reserved 1246edcb4639STejun Heo * percpu area if arch has set it up; otherwise, allocation is served 1247edcb4639STejun Heo * from the same dynamic area. Might sleep. Might trigger writeouts. 1248edcb4639STejun Heo * 1249ccea34b5STejun Heo * CONTEXT: 1250ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1251ccea34b5STejun Heo * 1252edcb4639STejun Heo * RETURNS: 1253edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1254edcb4639STejun Heo */ 125543cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1256edcb4639STejun Heo { 1257edcb4639STejun Heo return pcpu_alloc(size, align, true); 1258edcb4639STejun Heo } 1259edcb4639STejun Heo 1260a56dbddfSTejun Heo /** 1261a56dbddfSTejun Heo * pcpu_reclaim - reclaim fully free chunks, workqueue function 1262a56dbddfSTejun Heo * @work: unused 1263a56dbddfSTejun Heo * 1264a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 1265ccea34b5STejun Heo * 1266ccea34b5STejun Heo * CONTEXT: 1267ccea34b5STejun Heo * workqueue context. 1268a56dbddfSTejun Heo */ 1269a56dbddfSTejun Heo static void pcpu_reclaim(struct work_struct *work) 1270fbf59bc9STejun Heo { 1271a56dbddfSTejun Heo LIST_HEAD(todo); 1272a56dbddfSTejun Heo struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 1273a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 1274a56dbddfSTejun Heo 1275ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1276ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1277a56dbddfSTejun Heo 1278a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, head, list) { 12798d408b4bSTejun Heo WARN_ON(chunk->immutable); 1280a56dbddfSTejun Heo 1281a56dbddfSTejun Heo /* spare the first one */ 1282a56dbddfSTejun Heo if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 1283a56dbddfSTejun Heo continue; 1284a56dbddfSTejun Heo 1285a56dbddfSTejun Heo list_move(&chunk->list, &todo); 1286a56dbddfSTejun Heo } 1287a56dbddfSTejun Heo 1288ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1289a56dbddfSTejun Heo 1290a56dbddfSTejun Heo list_for_each_entry_safe(chunk, next, &todo, list) { 1291ce3141a2STejun Heo pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 1292*6081089fSTejun Heo pcpu_destroy_chunk(chunk); 1293fbf59bc9STejun Heo } 1294971f3918STejun Heo 1295971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1296a56dbddfSTejun Heo } 1297fbf59bc9STejun Heo 1298fbf59bc9STejun Heo /** 1299fbf59bc9STejun Heo * free_percpu - free percpu area 1300fbf59bc9STejun Heo * @ptr: pointer to area to free 1301fbf59bc9STejun Heo * 1302ccea34b5STejun Heo * Free percpu area @ptr. 1303ccea34b5STejun Heo * 1304ccea34b5STejun Heo * CONTEXT: 1305ccea34b5STejun Heo * Can be called from atomic context. 1306fbf59bc9STejun Heo */ 130743cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 1308fbf59bc9STejun Heo { 1309129182e5SAndrew Morton void *addr; 1310fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1311ccea34b5STejun Heo unsigned long flags; 1312fbf59bc9STejun Heo int off; 1313fbf59bc9STejun Heo 1314fbf59bc9STejun Heo if (!ptr) 1315fbf59bc9STejun Heo return; 1316fbf59bc9STejun Heo 1317129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 1318129182e5SAndrew Morton 1319ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1320fbf59bc9STejun Heo 1321fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1322bba174f5STejun Heo off = addr - chunk->base_addr; 1323fbf59bc9STejun Heo 1324fbf59bc9STejun Heo pcpu_free_area(chunk, off); 1325fbf59bc9STejun Heo 1326a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 1327fbf59bc9STejun Heo if (chunk->free_size == pcpu_unit_size) { 1328fbf59bc9STejun Heo struct pcpu_chunk *pos; 1329fbf59bc9STejun Heo 1330a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1331fbf59bc9STejun Heo if (pos != chunk) { 1332a56dbddfSTejun Heo schedule_work(&pcpu_reclaim_work); 1333fbf59bc9STejun Heo break; 1334fbf59bc9STejun Heo } 1335fbf59bc9STejun Heo } 1336fbf59bc9STejun Heo 1337ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1338fbf59bc9STejun Heo } 1339fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1340fbf59bc9STejun Heo 13413b034b0dSVivek Goyal /** 134210fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 134310fad5e4STejun Heo * @addr: address to test 134410fad5e4STejun Heo * 134510fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 134610fad5e4STejun Heo * static percpu areas are not considered. For those, use 134710fad5e4STejun Heo * is_module_percpu_address(). 134810fad5e4STejun Heo * 134910fad5e4STejun Heo * RETURNS: 135010fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 135110fad5e4STejun Heo */ 135210fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 135310fad5e4STejun Heo { 135410fad5e4STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 135510fad5e4STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 135610fad5e4STejun Heo unsigned int cpu; 135710fad5e4STejun Heo 135810fad5e4STejun Heo for_each_possible_cpu(cpu) { 135910fad5e4STejun Heo void *start = per_cpu_ptr(base, cpu); 136010fad5e4STejun Heo 136110fad5e4STejun Heo if ((void *)addr >= start && (void *)addr < start + static_size) 136210fad5e4STejun Heo return true; 136310fad5e4STejun Heo } 136410fad5e4STejun Heo return false; 136510fad5e4STejun Heo } 136610fad5e4STejun Heo 136710fad5e4STejun Heo /** 13683b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 13693b034b0dSVivek Goyal * @addr: the address to be converted to physical address 13703b034b0dSVivek Goyal * 13713b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 13723b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 13733b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 13743b034b0dSVivek Goyal * until this function finishes. 13753b034b0dSVivek Goyal * 13763b034b0dSVivek Goyal * RETURNS: 13773b034b0dSVivek Goyal * The physical address for @addr. 13783b034b0dSVivek Goyal */ 13793b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 13803b034b0dSVivek Goyal { 1381020ec653STejun Heo if (pcpu_addr_in_first_chunk(addr)) { 13823b034b0dSVivek Goyal if ((unsigned long)addr < VMALLOC_START || 13833b034b0dSVivek Goyal (unsigned long)addr >= VMALLOC_END) 13843b034b0dSVivek Goyal return __pa(addr); 13853b034b0dSVivek Goyal else 13863b034b0dSVivek Goyal return page_to_phys(vmalloc_to_page(addr)); 1387020ec653STejun Heo } else 1388020ec653STejun Heo return page_to_phys(vmalloc_to_page(addr)); 13893b034b0dSVivek Goyal } 13903b034b0dSVivek Goyal 1391033e48fbSTejun Heo static inline size_t pcpu_calc_fc_sizes(size_t static_size, 1392033e48fbSTejun Heo size_t reserved_size, 1393033e48fbSTejun Heo ssize_t *dyn_sizep) 1394033e48fbSTejun Heo { 1395033e48fbSTejun Heo size_t size_sum; 1396033e48fbSTejun Heo 1397033e48fbSTejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 1398033e48fbSTejun Heo (*dyn_sizep >= 0 ? *dyn_sizep : 0)); 1399033e48fbSTejun Heo if (*dyn_sizep != 0) 1400033e48fbSTejun Heo *dyn_sizep = size_sum - static_size - reserved_size; 1401033e48fbSTejun Heo 1402033e48fbSTejun Heo return size_sum; 1403033e48fbSTejun Heo } 1404033e48fbSTejun Heo 1405fbf59bc9STejun Heo /** 1406fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 1407fd1e8a1fSTejun Heo * @nr_groups: the number of groups 1408fd1e8a1fSTejun Heo * @nr_units: the number of units 1409033e48fbSTejun Heo * 1410fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 1411fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 1412fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 1413fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1414fd1e8a1fSTejun Heo * pointer of other groups. 1415033e48fbSTejun Heo * 1416033e48fbSTejun Heo * RETURNS: 1417fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 1418fd1e8a1fSTejun Heo * failure. 1419033e48fbSTejun Heo */ 1420fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1421fd1e8a1fSTejun Heo int nr_units) 1422fd1e8a1fSTejun Heo { 1423fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1424fd1e8a1fSTejun Heo size_t base_size, ai_size; 1425fd1e8a1fSTejun Heo void *ptr; 1426fd1e8a1fSTejun Heo int unit; 1427fd1e8a1fSTejun Heo 1428fd1e8a1fSTejun Heo base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1429fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 1430fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1431fd1e8a1fSTejun Heo 1432fd1e8a1fSTejun Heo ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); 1433fd1e8a1fSTejun Heo if (!ptr) 1434fd1e8a1fSTejun Heo return NULL; 1435fd1e8a1fSTejun Heo ai = ptr; 1436fd1e8a1fSTejun Heo ptr += base_size; 1437fd1e8a1fSTejun Heo 1438fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 1439fd1e8a1fSTejun Heo 1440fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 1441fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 1442fd1e8a1fSTejun Heo 1443fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 1444fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 1445fd1e8a1fSTejun Heo 1446fd1e8a1fSTejun Heo return ai; 1447fd1e8a1fSTejun Heo } 1448fd1e8a1fSTejun Heo 1449fd1e8a1fSTejun Heo /** 1450fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 1451fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 1452fd1e8a1fSTejun Heo * 1453fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1454fd1e8a1fSTejun Heo */ 1455fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1456fd1e8a1fSTejun Heo { 1457fd1e8a1fSTejun Heo free_bootmem(__pa(ai), ai->__ai_size); 1458fd1e8a1fSTejun Heo } 1459fd1e8a1fSTejun Heo 1460fd1e8a1fSTejun Heo /** 1461fd1e8a1fSTejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1462edcb4639STejun Heo * @reserved_size: the size of reserved percpu area in bytes 1463cafe8816STejun Heo * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1464fd1e8a1fSTejun Heo * @atom_size: allocation atom size 1465fd1e8a1fSTejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1466fd1e8a1fSTejun Heo * 1467fd1e8a1fSTejun Heo * This function determines grouping of units, their mappings to cpus 1468fd1e8a1fSTejun Heo * and other parameters considering needed percpu size, allocation 1469fd1e8a1fSTejun Heo * atom size and distances between CPUs. 1470fd1e8a1fSTejun Heo * 1471fd1e8a1fSTejun Heo * Groups are always mutliples of atom size and CPUs which are of 1472fd1e8a1fSTejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 1473fd1e8a1fSTejun Heo * units in the same group. The returned configuration is guaranteed 1474fd1e8a1fSTejun Heo * to have CPUs on different nodes on different groups and >=75% usage 1475fd1e8a1fSTejun Heo * of allocated virtual address space. 1476fd1e8a1fSTejun Heo * 1477fd1e8a1fSTejun Heo * RETURNS: 1478fd1e8a1fSTejun Heo * On success, pointer to the new allocation_info is returned. On 1479fd1e8a1fSTejun Heo * failure, ERR_PTR value is returned. 1480fd1e8a1fSTejun Heo */ 1481fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1482fd1e8a1fSTejun Heo size_t reserved_size, ssize_t dyn_size, 1483fd1e8a1fSTejun Heo size_t atom_size, 1484033e48fbSTejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1485033e48fbSTejun Heo { 1486033e48fbSTejun Heo static int group_map[NR_CPUS] __initdata; 1487033e48fbSTejun Heo static int group_cnt[NR_CPUS] __initdata; 1488033e48fbSTejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 1489fd1e8a1fSTejun Heo int group_cnt_max = 0, nr_groups = 1, nr_units = 0; 1490033e48fbSTejun Heo size_t size_sum, min_unit_size, alloc_size; 1491033e48fbSTejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1492fd1e8a1fSTejun Heo int last_allocs, group, unit; 1493033e48fbSTejun Heo unsigned int cpu, tcpu; 1494fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1495fd1e8a1fSTejun Heo unsigned int *cpu_map; 1496033e48fbSTejun Heo 1497fb59e72eSTejun Heo /* this function may be called multiple times */ 1498fb59e72eSTejun Heo memset(group_map, 0, sizeof(group_map)); 1499fb59e72eSTejun Heo memset(group_cnt, 0, sizeof(group_map)); 1500fb59e72eSTejun Heo 1501033e48fbSTejun Heo /* 1502033e48fbSTejun Heo * Determine min_unit_size, alloc_size and max_upa such that 1503fd1e8a1fSTejun Heo * alloc_size is multiple of atom_size and is the smallest 1504033e48fbSTejun Heo * which can accomodate 4k aligned segments which are equal to 1505033e48fbSTejun Heo * or larger than min_unit_size. 1506033e48fbSTejun Heo */ 1507fd1e8a1fSTejun Heo size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); 1508033e48fbSTejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1509033e48fbSTejun Heo 1510fd1e8a1fSTejun Heo alloc_size = roundup(min_unit_size, atom_size); 1511033e48fbSTejun Heo upa = alloc_size / min_unit_size; 1512033e48fbSTejun Heo while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1513033e48fbSTejun Heo upa--; 1514033e48fbSTejun Heo max_upa = upa; 1515033e48fbSTejun Heo 1516033e48fbSTejun Heo /* group cpus according to their proximity */ 1517033e48fbSTejun Heo for_each_possible_cpu(cpu) { 1518033e48fbSTejun Heo group = 0; 1519033e48fbSTejun Heo next_group: 1520033e48fbSTejun Heo for_each_possible_cpu(tcpu) { 1521033e48fbSTejun Heo if (cpu == tcpu) 1522033e48fbSTejun Heo break; 1523fd1e8a1fSTejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 1524033e48fbSTejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1525033e48fbSTejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1526033e48fbSTejun Heo group++; 1527fd1e8a1fSTejun Heo nr_groups = max(nr_groups, group + 1); 1528033e48fbSTejun Heo goto next_group; 1529033e48fbSTejun Heo } 1530033e48fbSTejun Heo } 1531033e48fbSTejun Heo group_map[cpu] = group; 1532033e48fbSTejun Heo group_cnt[group]++; 1533033e48fbSTejun Heo group_cnt_max = max(group_cnt_max, group_cnt[group]); 1534033e48fbSTejun Heo } 1535033e48fbSTejun Heo 1536033e48fbSTejun Heo /* 1537033e48fbSTejun Heo * Expand unit size until address space usage goes over 75% 1538033e48fbSTejun Heo * and then as much as possible without using more address 1539033e48fbSTejun Heo * space. 1540033e48fbSTejun Heo */ 1541033e48fbSTejun Heo last_allocs = INT_MAX; 1542033e48fbSTejun Heo for (upa = max_upa; upa; upa--) { 1543033e48fbSTejun Heo int allocs = 0, wasted = 0; 1544033e48fbSTejun Heo 1545033e48fbSTejun Heo if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1546033e48fbSTejun Heo continue; 1547033e48fbSTejun Heo 1548fd1e8a1fSTejun Heo for (group = 0; group < nr_groups; group++) { 1549033e48fbSTejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1550033e48fbSTejun Heo allocs += this_allocs; 1551033e48fbSTejun Heo wasted += this_allocs * upa - group_cnt[group]; 1552033e48fbSTejun Heo } 1553033e48fbSTejun Heo 1554033e48fbSTejun Heo /* 1555033e48fbSTejun Heo * Don't accept if wastage is over 25%. The 1556033e48fbSTejun Heo * greater-than comparison ensures upa==1 always 1557033e48fbSTejun Heo * passes the following check. 1558033e48fbSTejun Heo */ 1559033e48fbSTejun Heo if (wasted > num_possible_cpus() / 3) 1560033e48fbSTejun Heo continue; 1561033e48fbSTejun Heo 1562033e48fbSTejun Heo /* and then don't consume more memory */ 1563033e48fbSTejun Heo if (allocs > last_allocs) 1564033e48fbSTejun Heo break; 1565033e48fbSTejun Heo last_allocs = allocs; 1566033e48fbSTejun Heo best_upa = upa; 1567033e48fbSTejun Heo } 1568fd1e8a1fSTejun Heo upa = best_upa; 1569033e48fbSTejun Heo 1570fd1e8a1fSTejun Heo /* allocate and fill alloc_info */ 1571fd1e8a1fSTejun Heo for (group = 0; group < nr_groups; group++) 1572fd1e8a1fSTejun Heo nr_units += roundup(group_cnt[group], upa); 1573fd1e8a1fSTejun Heo 1574fd1e8a1fSTejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1575fd1e8a1fSTejun Heo if (!ai) 1576fd1e8a1fSTejun Heo return ERR_PTR(-ENOMEM); 1577fd1e8a1fSTejun Heo cpu_map = ai->groups[0].cpu_map; 1578fd1e8a1fSTejun Heo 1579fd1e8a1fSTejun Heo for (group = 0; group < nr_groups; group++) { 1580fd1e8a1fSTejun Heo ai->groups[group].cpu_map = cpu_map; 1581fd1e8a1fSTejun Heo cpu_map += roundup(group_cnt[group], upa); 1582fd1e8a1fSTejun Heo } 1583fd1e8a1fSTejun Heo 1584fd1e8a1fSTejun Heo ai->static_size = static_size; 1585fd1e8a1fSTejun Heo ai->reserved_size = reserved_size; 1586fd1e8a1fSTejun Heo ai->dyn_size = dyn_size; 1587fd1e8a1fSTejun Heo ai->unit_size = alloc_size / upa; 1588fd1e8a1fSTejun Heo ai->atom_size = atom_size; 1589fd1e8a1fSTejun Heo ai->alloc_size = alloc_size; 1590fd1e8a1fSTejun Heo 1591fd1e8a1fSTejun Heo for (group = 0, unit = 0; group_cnt[group]; group++) { 1592fd1e8a1fSTejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1593fd1e8a1fSTejun Heo 1594fd1e8a1fSTejun Heo /* 1595fd1e8a1fSTejun Heo * Initialize base_offset as if all groups are located 1596fd1e8a1fSTejun Heo * back-to-back. The caller should update this to 1597fd1e8a1fSTejun Heo * reflect actual allocation. 1598fd1e8a1fSTejun Heo */ 1599fd1e8a1fSTejun Heo gi->base_offset = unit * ai->unit_size; 1600fd1e8a1fSTejun Heo 1601033e48fbSTejun Heo for_each_possible_cpu(cpu) 1602033e48fbSTejun Heo if (group_map[cpu] == group) 1603fd1e8a1fSTejun Heo gi->cpu_map[gi->nr_units++] = cpu; 1604fd1e8a1fSTejun Heo gi->nr_units = roundup(gi->nr_units, upa); 1605fd1e8a1fSTejun Heo unit += gi->nr_units; 1606fd1e8a1fSTejun Heo } 1607fd1e8a1fSTejun Heo BUG_ON(unit != nr_units); 1608fd1e8a1fSTejun Heo 1609fd1e8a1fSTejun Heo return ai; 1610033e48fbSTejun Heo } 1611033e48fbSTejun Heo 1612fd1e8a1fSTejun Heo /** 1613fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1614fd1e8a1fSTejun Heo * @lvl: loglevel 1615fd1e8a1fSTejun Heo * @ai: allocation info to dump 1616fd1e8a1fSTejun Heo * 1617fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 1618fd1e8a1fSTejun Heo */ 1619fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 1620fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 1621033e48fbSTejun Heo { 1622fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 1623033e48fbSTejun Heo char empty_str[] = "--------"; 1624fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 1625fd1e8a1fSTejun Heo int group, v; 1626fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 1627033e48fbSTejun Heo 1628fd1e8a1fSTejun Heo v = ai->nr_groups; 1629033e48fbSTejun Heo while (v /= 10) 1630fd1e8a1fSTejun Heo group_width++; 1631033e48fbSTejun Heo 1632fd1e8a1fSTejun Heo v = num_possible_cpus(); 1633fd1e8a1fSTejun Heo while (v /= 10) 1634fd1e8a1fSTejun Heo cpu_width++; 1635fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1636033e48fbSTejun Heo 1637fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 1638fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 1639fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 1640033e48fbSTejun Heo 1641fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1642fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1643fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1644fd1e8a1fSTejun Heo 1645fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 1646fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 1647fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 1648fd1e8a1fSTejun Heo 1649fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 1650fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 1651fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 1652fd1e8a1fSTejun Heo if (!(alloc % apl)) { 1653033e48fbSTejun Heo printk("\n"); 1654fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 1655033e48fbSTejun Heo } 1656fd1e8a1fSTejun Heo printk("[%0*d] ", group_width, group); 1657fd1e8a1fSTejun Heo 1658fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 1659fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 1660fd1e8a1fSTejun Heo printk("%0*d ", cpu_width, 1661fd1e8a1fSTejun Heo gi->cpu_map[unit]); 1662033e48fbSTejun Heo else 1663033e48fbSTejun Heo printk("%s ", empty_str); 1664033e48fbSTejun Heo } 1665fd1e8a1fSTejun Heo } 1666033e48fbSTejun Heo printk("\n"); 1667033e48fbSTejun Heo } 1668033e48fbSTejun Heo 1669fbf59bc9STejun Heo /** 16708d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 1671fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 167238a6be52STejun Heo * @base_addr: mapped address 1673fbf59bc9STejun Heo * 16748d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 16758d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 167638a6be52STejun Heo * setup path. 16778d408b4bSTejun Heo * 1678fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 1679fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 16808d408b4bSTejun Heo * 1681fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 1682fd1e8a1fSTejun Heo * 1683fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1684edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1685edcb4639STejun Heo * the first chunk such that it's available only through reserved 1686edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1687edcb4639STejun Heo * static areas on architectures where the addressing model has 1688edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1689edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1690edcb4639STejun Heo * 1691fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 1692fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 1693fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 16946074d5b0STejun Heo * 1695fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1696fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 1697fd1e8a1fSTejun Heo * @ai->dyn_size. 16988d408b4bSTejun Heo * 1699fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 1700fd1e8a1fSTejun Heo * for vm areas. 17018d408b4bSTejun Heo * 1702fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 1703fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 1704fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 1705fd1e8a1fSTejun Heo * 1706fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 1707fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 1708fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 1709fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 1710fd1e8a1fSTejun Heo * all units is assumed. 17118d408b4bSTejun Heo * 171238a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 171338a6be52STejun Heo * copied static data to each unit. 1714fbf59bc9STejun Heo * 1715edcb4639STejun Heo * If the first chunk ends up with both reserved and dynamic areas, it 1716edcb4639STejun Heo * is served by two chunks - one to serve the core static and reserved 1717edcb4639STejun Heo * areas and the other for the dynamic area. They share the same vm 1718edcb4639STejun Heo * and page map but uses different area allocation map to stay away 1719edcb4639STejun Heo * from each other. The latter chunk is circulated in the chunk slots 1720edcb4639STejun Heo * and available for dynamic allocation like any other chunks. 1721edcb4639STejun Heo * 1722fbf59bc9STejun Heo * RETURNS: 1723fb435d52STejun Heo * 0 on success, -errno on failure. 1724fbf59bc9STejun Heo */ 1725fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1726fd1e8a1fSTejun Heo void *base_addr) 1727fbf59bc9STejun Heo { 1728635b75fcSTejun Heo static char cpus_buf[4096] __initdata; 1729edcb4639STejun Heo static int smap[2], dmap[2]; 1730fd1e8a1fSTejun Heo size_t dyn_size = ai->dyn_size; 1731fd1e8a1fSTejun Heo size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1732edcb4639STejun Heo struct pcpu_chunk *schunk, *dchunk = NULL; 17336563297cSTejun Heo unsigned long *group_offsets; 17346563297cSTejun Heo size_t *group_sizes; 1735fb435d52STejun Heo unsigned long *unit_off; 1736fbf59bc9STejun Heo unsigned int cpu; 1737fd1e8a1fSTejun Heo int *unit_map; 1738fd1e8a1fSTejun Heo int group, unit, i; 1739fbf59bc9STejun Heo 1740635b75fcSTejun Heo cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); 1741635b75fcSTejun Heo 1742635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 1743635b75fcSTejun Heo if (unlikely(cond)) { \ 1744635b75fcSTejun Heo pr_emerg("PERCPU: failed to initialize, %s", #cond); \ 1745635b75fcSTejun Heo pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ 1746635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1747635b75fcSTejun Heo BUG(); \ 1748635b75fcSTejun Heo } \ 1749635b75fcSTejun Heo } while (0) 1750635b75fcSTejun Heo 17512f39e637STejun Heo /* sanity checks */ 1752edcb4639STejun Heo BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1753edcb4639STejun Heo ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1754635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1755635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 1756635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 1757635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1758635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1759635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 17608d408b4bSTejun Heo 17616563297cSTejun Heo /* process group information and build config tables accordingly */ 17626563297cSTejun Heo group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 17636563297cSTejun Heo group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); 1764fd1e8a1fSTejun Heo unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); 1765fb435d52STejun Heo unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 17662f39e637STejun Heo 1767fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1768ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 1769fd1e8a1fSTejun Heo pcpu_first_unit_cpu = NR_CPUS; 17702f39e637STejun Heo 1771fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1772fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 17732f39e637STejun Heo 17746563297cSTejun Heo group_offsets[group] = gi->base_offset; 17756563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 17766563297cSTejun Heo 1777fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 1778fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 1779fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 1780fd1e8a1fSTejun Heo continue; 1781fd1e8a1fSTejun Heo 1782635b75fcSTejun Heo PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); 1783635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1784635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1785fd1e8a1fSTejun Heo 1786fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 1787fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1788fb435d52STejun Heo 1789fd1e8a1fSTejun Heo if (pcpu_first_unit_cpu == NR_CPUS) 17902f39e637STejun Heo pcpu_first_unit_cpu = cpu; 17912f39e637STejun Heo } 1792fd1e8a1fSTejun Heo } 17932f39e637STejun Heo pcpu_last_unit_cpu = cpu; 1794fd1e8a1fSTejun Heo pcpu_nr_units = unit; 17952f39e637STejun Heo 17962f39e637STejun Heo for_each_possible_cpu(cpu) 1797635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1798635b75fcSTejun Heo 1799635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 1800635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 1801635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_INFO, ai); 18022f39e637STejun Heo 18036563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 18046563297cSTejun Heo pcpu_group_offsets = group_offsets; 18056563297cSTejun Heo pcpu_group_sizes = group_sizes; 1806fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 1807fb435d52STejun Heo pcpu_unit_offsets = unit_off; 18082f39e637STejun Heo 18092f39e637STejun Heo /* determine basic parameters */ 1810fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1811d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 18126563297cSTejun Heo pcpu_atom_size = ai->atom_size; 1813ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1814ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1815cafe8816STejun Heo 1816d9b55eebSTejun Heo /* 1817d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 1818d9b55eebSTejun Heo * empty chunks. 1819d9b55eebSTejun Heo */ 1820d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1821fbf59bc9STejun Heo pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1822fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 1823fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 1824fbf59bc9STejun Heo 1825edcb4639STejun Heo /* 1826edcb4639STejun Heo * Initialize static chunk. If reserved_size is zero, the 1827edcb4639STejun Heo * static chunk covers static area + dynamic allocation area 1828edcb4639STejun Heo * in the first chunk. If reserved_size is not zero, it 1829edcb4639STejun Heo * covers static area + reserved area (mostly used for module 1830edcb4639STejun Heo * static percpu allocation). 1831edcb4639STejun Heo */ 18322441d15cSTejun Heo schunk = alloc_bootmem(pcpu_chunk_struct_size); 18332441d15cSTejun Heo INIT_LIST_HEAD(&schunk->list); 1834bba174f5STejun Heo schunk->base_addr = base_addr; 183561ace7faSTejun Heo schunk->map = smap; 183661ace7faSTejun Heo schunk->map_alloc = ARRAY_SIZE(smap); 183738a6be52STejun Heo schunk->immutable = true; 1838ce3141a2STejun Heo bitmap_fill(schunk->populated, pcpu_unit_pages); 1839edcb4639STejun Heo 1840fd1e8a1fSTejun Heo if (ai->reserved_size) { 1841fd1e8a1fSTejun Heo schunk->free_size = ai->reserved_size; 1842ae9e6bc9STejun Heo pcpu_reserved_chunk = schunk; 1843fd1e8a1fSTejun Heo pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; 1844edcb4639STejun Heo } else { 18452441d15cSTejun Heo schunk->free_size = dyn_size; 1846edcb4639STejun Heo dyn_size = 0; /* dynamic area covered */ 1847edcb4639STejun Heo } 18482441d15cSTejun Heo schunk->contig_hint = schunk->free_size; 1849fbf59bc9STejun Heo 1850fd1e8a1fSTejun Heo schunk->map[schunk->map_used++] = -ai->static_size; 185161ace7faSTejun Heo if (schunk->free_size) 185261ace7faSTejun Heo schunk->map[schunk->map_used++] = schunk->free_size; 185361ace7faSTejun Heo 1854edcb4639STejun Heo /* init dynamic chunk if necessary */ 1855edcb4639STejun Heo if (dyn_size) { 1856ce3141a2STejun Heo dchunk = alloc_bootmem(pcpu_chunk_struct_size); 1857edcb4639STejun Heo INIT_LIST_HEAD(&dchunk->list); 1858bba174f5STejun Heo dchunk->base_addr = base_addr; 1859edcb4639STejun Heo dchunk->map = dmap; 1860edcb4639STejun Heo dchunk->map_alloc = ARRAY_SIZE(dmap); 186138a6be52STejun Heo dchunk->immutable = true; 1862ce3141a2STejun Heo bitmap_fill(dchunk->populated, pcpu_unit_pages); 1863edcb4639STejun Heo 1864edcb4639STejun Heo dchunk->contig_hint = dchunk->free_size = dyn_size; 1865edcb4639STejun Heo dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1866edcb4639STejun Heo dchunk->map[dchunk->map_used++] = dchunk->free_size; 1867edcb4639STejun Heo } 1868edcb4639STejun Heo 18692441d15cSTejun Heo /* link the first chunk in */ 1870ae9e6bc9STejun Heo pcpu_first_chunk = dchunk ?: schunk; 1871ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 1872fbf59bc9STejun Heo 1873fbf59bc9STejun Heo /* we're done */ 1874bba174f5STejun Heo pcpu_base_addr = base_addr; 1875fb435d52STejun Heo return 0; 1876fbf59bc9STejun Heo } 187766c3a757STejun Heo 1878f58dc01bSTejun Heo const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { 1879f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 1880f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 1881f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 1882f58dc01bSTejun Heo }; 188366c3a757STejun Heo 1884f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1885f58dc01bSTejun Heo 1886f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 188766c3a757STejun Heo { 1888f58dc01bSTejun Heo if (0) 1889f58dc01bSTejun Heo /* nada */; 1890f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1891f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 1892f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 1893f58dc01bSTejun Heo #endif 1894f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1895f58dc01bSTejun Heo else if (!strcmp(str, "page")) 1896f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 1897f58dc01bSTejun Heo #endif 1898f58dc01bSTejun Heo else 1899f58dc01bSTejun Heo pr_warning("PERCPU: unknown allocator %s specified\n", str); 190066c3a757STejun Heo 1901f58dc01bSTejun Heo return 0; 190266c3a757STejun Heo } 1903f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 190466c3a757STejun Heo 190508fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 190608fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 190766c3a757STejun Heo /** 190866c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 190966c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 191066c3a757STejun Heo * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1911c8826dd5STejun Heo * @atom_size: allocation atom size 1912c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 1913c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 1914c8826dd5STejun Heo * @free_fn: funtion to free percpu page 191566c3a757STejun Heo * 191666c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 191766c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 191866c3a757STejun Heo * 191966c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 1920c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 1921c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 1922c8826dd5STejun Heo * aligned to @atom_size. 1923c8826dd5STejun Heo * 1924c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 1925c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 1926c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 1927c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 1928c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 1929c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 193066c3a757STejun Heo * 193166c3a757STejun Heo * When @dyn_size is positive, dynamic area might be larger than 1932788e5abcSTejun Heo * specified to fill page alignment. When @dyn_size is auto, 1933788e5abcSTejun Heo * @dyn_size is just big enough to fill page alignment after static 1934788e5abcSTejun Heo * and reserved areas. 193566c3a757STejun Heo * 193666c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 1937c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 193866c3a757STejun Heo * 193966c3a757STejun Heo * RETURNS: 1940fb435d52STejun Heo * 0 on success, -errno on failure. 194166c3a757STejun Heo */ 1942c8826dd5STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, 1943c8826dd5STejun Heo size_t atom_size, 1944c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1945c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 1946c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 194766c3a757STejun Heo { 1948c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 1949c8826dd5STejun Heo void **areas = NULL; 1950fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 19516ea529a2STejun Heo size_t size_sum, areas_size, max_distance; 1952c8826dd5STejun Heo int group, i, rc; 195366c3a757STejun Heo 1954c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1955c8826dd5STejun Heo cpu_distance_fn); 1956fd1e8a1fSTejun Heo if (IS_ERR(ai)) 1957fd1e8a1fSTejun Heo return PTR_ERR(ai); 195866c3a757STejun Heo 1959fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1960c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 196166c3a757STejun Heo 1962c8826dd5STejun Heo areas = alloc_bootmem_nopanic(areas_size); 1963c8826dd5STejun Heo if (!areas) { 1964fb435d52STejun Heo rc = -ENOMEM; 1965c8826dd5STejun Heo goto out_free; 1966fa8a7094STejun Heo } 196766c3a757STejun Heo 1968c8826dd5STejun Heo /* allocate, copy and determine base address */ 1969c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 1970c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 1971c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 1972c8826dd5STejun Heo void *ptr; 197366c3a757STejun Heo 1974c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 1975c8826dd5STejun Heo cpu = gi->cpu_map[i]; 1976c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 1977c8826dd5STejun Heo 1978c8826dd5STejun Heo /* allocate space for the whole group */ 1979c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 1980c8826dd5STejun Heo if (!ptr) { 1981c8826dd5STejun Heo rc = -ENOMEM; 1982c8826dd5STejun Heo goto out_free_areas; 1983c8826dd5STejun Heo } 1984c8826dd5STejun Heo areas[group] = ptr; 1985c8826dd5STejun Heo 1986c8826dd5STejun Heo base = min(ptr, base); 1987c8826dd5STejun Heo 1988c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1989c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 1990c8826dd5STejun Heo /* unused unit, free whole */ 1991c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 1992c8826dd5STejun Heo continue; 1993c8826dd5STejun Heo } 1994c8826dd5STejun Heo /* copy and return the unused part */ 1995fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 1996c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 1997c8826dd5STejun Heo } 199866c3a757STejun Heo } 199966c3a757STejun Heo 2000c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 20016ea529a2STejun Heo max_distance = 0; 20026ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2003c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 20041a0c3298STejun Heo max_distance = max_t(size_t, max_distance, 20051a0c3298STejun Heo ai->groups[group].base_offset); 20066ea529a2STejun Heo } 20076ea529a2STejun Heo max_distance += ai->unit_size; 20086ea529a2STejun Heo 20096ea529a2STejun Heo /* warn if maximum distance is further than 75% of vmalloc space */ 20106ea529a2STejun Heo if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { 20111a0c3298STejun Heo pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 20126ea529a2STejun Heo "space 0x%lx\n", 20136ea529a2STejun Heo max_distance, VMALLOC_END - VMALLOC_START); 20146ea529a2STejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 20156ea529a2STejun Heo /* and fail if we have fallback */ 20166ea529a2STejun Heo rc = -EINVAL; 20176ea529a2STejun Heo goto out_free; 20186ea529a2STejun Heo #endif 20196ea529a2STejun Heo } 2020c8826dd5STejun Heo 2021004018e2STejun Heo pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 2022fd1e8a1fSTejun Heo PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 2023fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 202466c3a757STejun Heo 2025fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, base); 2026c8826dd5STejun Heo goto out_free; 2027c8826dd5STejun Heo 2028c8826dd5STejun Heo out_free_areas: 2029c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 2030c8826dd5STejun Heo free_fn(areas[group], 2031c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 2032c8826dd5STejun Heo out_free: 2033fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2034c8826dd5STejun Heo if (areas) 2035c8826dd5STejun Heo free_bootmem(__pa(areas), areas_size); 2036fb435d52STejun Heo return rc; 2037d4b95f80STejun Heo } 203808fc4580STejun Heo #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || 203908fc4580STejun Heo !CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2040d4b95f80STejun Heo 204108fc4580STejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2042d4b95f80STejun Heo /** 204300ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2044d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2045d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 2046d4b95f80STejun Heo * @free_fn: funtion to free percpu page, always called with PAGE_SIZE 2047d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 2048d4b95f80STejun Heo * 204900ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 205000ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 2051d4b95f80STejun Heo * 2052d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 2053d4b95f80STejun Heo * page-by-page into vmalloc area. 2054d4b95f80STejun Heo * 2055d4b95f80STejun Heo * RETURNS: 2056fb435d52STejun Heo * 0 on success, -errno on failure. 2057d4b95f80STejun Heo */ 2058fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 2059d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2060d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 2061d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 2062d4b95f80STejun Heo { 20638f05a6a6STejun Heo static struct vm_struct vm; 2064fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 206500ae4064STejun Heo char psize_str[16]; 2066ce3141a2STejun Heo int unit_pages; 2067d4b95f80STejun Heo size_t pages_size; 2068ce3141a2STejun Heo struct page **pages; 2069fb435d52STejun Heo int unit, i, j, rc; 2070d4b95f80STejun Heo 207100ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 207200ae4064STejun Heo 2073fd1e8a1fSTejun Heo ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL); 2074fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2075fd1e8a1fSTejun Heo return PTR_ERR(ai); 2076fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 2077fd1e8a1fSTejun Heo BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); 2078fd1e8a1fSTejun Heo 2079fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 2080d4b95f80STejun Heo 2081d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 2082fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2083fd1e8a1fSTejun Heo sizeof(pages[0])); 2084ce3141a2STejun Heo pages = alloc_bootmem(pages_size); 2085d4b95f80STejun Heo 20868f05a6a6STejun Heo /* allocate pages */ 2087d4b95f80STejun Heo j = 0; 2088fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) 2089ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) { 2090fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 2091d4b95f80STejun Heo void *ptr; 2092d4b95f80STejun Heo 20933cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2094d4b95f80STejun Heo if (!ptr) { 209500ae4064STejun Heo pr_warning("PERCPU: failed to allocate %s page " 209600ae4064STejun Heo "for cpu%u\n", psize_str, cpu); 2097d4b95f80STejun Heo goto enomem; 2098d4b95f80STejun Heo } 2099ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 2100d4b95f80STejun Heo } 2101d4b95f80STejun Heo 21028f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 21038f05a6a6STejun Heo vm.flags = VM_ALLOC; 2104fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 21058f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 21068f05a6a6STejun Heo 2107fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 21081d9d3257STejun Heo unsigned long unit_addr = 2109fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 21108f05a6a6STejun Heo 2111ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 21128f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 21138f05a6a6STejun Heo 21148f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 2115fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2116ce3141a2STejun Heo unit_pages); 2117fb435d52STejun Heo if (rc < 0) 2118fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 21198f05a6a6STejun Heo 21208f05a6a6STejun Heo /* 21218f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 21228f05a6a6STejun Heo * cache for the linear mapping here - something 21238f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 21248f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 21258f05a6a6STejun Heo * data structures are not set up yet. 21268f05a6a6STejun Heo */ 21278f05a6a6STejun Heo 21288f05a6a6STejun Heo /* copy static data */ 2129fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 213066c3a757STejun Heo } 213166c3a757STejun Heo 213266c3a757STejun Heo /* we're ready, commit */ 21331d9d3257STejun Heo pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", 2134fd1e8a1fSTejun Heo unit_pages, psize_str, vm.addr, ai->static_size, 2135fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 213666c3a757STejun Heo 2137fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, vm.addr); 2138d4b95f80STejun Heo goto out_free_ar; 2139d4b95f80STejun Heo 2140d4b95f80STejun Heo enomem: 2141d4b95f80STejun Heo while (--j >= 0) 2142ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 2143fb435d52STejun Heo rc = -ENOMEM; 2144d4b95f80STejun Heo out_free_ar: 2145ce3141a2STejun Heo free_bootmem(__pa(pages), pages_size); 2146fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2147fb435d52STejun Heo return rc; 214866c3a757STejun Heo } 214908fc4580STejun Heo #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ 2150d4b95f80STejun Heo 21518c4bfc6eSTejun Heo /* 2152e74e3962STejun Heo * Generic percpu area setup. 2153e74e3962STejun Heo * 2154e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 2155e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 2156e74e3962STejun Heo * important because many archs have addressing restrictions and might 2157e74e3962STejun Heo * fail if the percpu area is located far away from the previous 2158e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 2159e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 2160e74e3962STejun Heo * on the physical linear memory mapping which uses large page 2161e74e3962STejun Heo * mappings on applicable archs. 2162e74e3962STejun Heo */ 2163e74e3962STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 2164e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2165e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 2166e74e3962STejun Heo 2167c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2168c8826dd5STejun Heo size_t align) 2169c8826dd5STejun Heo { 2170c8826dd5STejun Heo return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 2171c8826dd5STejun Heo } 2172c8826dd5STejun Heo 2173c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2174c8826dd5STejun Heo { 2175c8826dd5STejun Heo free_bootmem(__pa(ptr), size); 2176c8826dd5STejun Heo } 2177c8826dd5STejun Heo 2178e74e3962STejun Heo void __init setup_per_cpu_areas(void) 2179e74e3962STejun Heo { 2180e74e3962STejun Heo unsigned long delta; 2181e74e3962STejun Heo unsigned int cpu; 2182fb435d52STejun Heo int rc; 2183e74e3962STejun Heo 2184e74e3962STejun Heo /* 2185e74e3962STejun Heo * Always reserve area for module percpu variables. That's 2186e74e3962STejun Heo * what the legacy allocator did. 2187e74e3962STejun Heo */ 2188fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2189c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2190c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2191fb435d52STejun Heo if (rc < 0) 2192e74e3962STejun Heo panic("Failed to initialized percpu areas."); 2193e74e3962STejun Heo 2194e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2195e74e3962STejun Heo for_each_possible_cpu(cpu) 2196fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2197e74e3962STejun Heo } 2198e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2199