Lines Matching defs:chunk

28  * There is special consideration for the first chunk which must handle
30 * are not online yet. In short, the first chunk is structured like so:
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
50 * of the bitmap. The reverse mapping from page to chunk is stored in
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
66 * setup the first chunk containing the kernel static percpu area
146 /* the address of the first chunk which starts with the kernel static area */
158 * The first chunk which always exists. Note that unlike other
165 * Optional reserved chunk. This chunk reserves part of the first
166 * chunk and serves it for reserved allocations. When the reserved
172 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
174 struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
178 * The reserved chunk doesn't contribute to the count.
184 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
185 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
194 * empty chunk.
208 * pcpu_addr_in_chunk - check if the address is served from this chunk
209 * @chunk: chunk of interest
213 * True if the address is served from this chunk.
215 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
219 if (!chunk)
222 start_addr = chunk->base_addr + chunk->start_offset;
223 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
224 chunk->end_offset;
242 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
244 const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
246 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
253 /* set the pointer to a chunk in a page struct */
259 /* obtain pointer to a chunk from a page struct */
275 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
278 return (unsigned long)chunk->base_addr +
286 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
288 return chunk->alloc_map +
314 * Note, a chunk uses the same hints as a block so this can also check against
315 * the chunk's contig hint.
356 * @chunk: chunk of interest
357 * @bit_off: chunk offset
365 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
373 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
408 * @chunk: chunk of interest
411 * @bit_off: chunk offset
420 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
428 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
468 *bit_off = pcpu_chunk_map_bits(chunk);
477 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
478 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
479 (bit_off) < pcpu_chunk_map_bits((chunk)); \
481 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
483 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
484 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
486 (bit_off) < pcpu_chunk_map_bits((chunk)); \
488 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
526 static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
529 if (chunk != pcpu_reserved_chunk) {
531 list_move(&chunk->list, &pcpu_chunk_lists[slot]);
533 list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
537 static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
539 __pcpu_chunk_move(chunk, slot, true);
543 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
544 * @chunk: chunk of interest
547 * This function is called after an allocation or free changed @chunk.
548 * New slot according to the changed state is determined and @chunk is
549 * moved to the slot. Note that the reserved chunk is never put on
550 * chunk slots.
555 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
557 int nslot = pcpu_chunk_slot(chunk);
560 if (chunk->isolated)
564 __pcpu_chunk_move(chunk, nslot, oslot < nslot);
567 static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
571 if (!chunk->isolated) {
572 chunk->isolated = true;
573 pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
575 list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
578 static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
582 if (chunk->isolated) {
583 chunk->isolated = false;
584 pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
585 pcpu_chunk_relocate(chunk, -1);
591 * @chunk: chunk of interest
598 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
600 chunk->nr_empty_pop_pages += nr;
601 if (chunk != pcpu_reserved_chunk && !chunk->isolated)
698 * @chunk: chunk of interest
699 * @bit_off: chunk offset
712 static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
724 block = chunk->md_blocks + s_index;
727 l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
734 * pcpu_chunk_refresh_hint - updates metadata about a chunk
735 * @chunk: chunk of interest
745 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
747 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
762 pcpu_for_each_md_free_region(chunk, bit_off, bits)
768 * @chunk: chunk of interest
774 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
776 struct pcpu_block_md *block = chunk->md_blocks + index;
777 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
800 * @chunk: chunk of interest
801 * @bit_off: chunk offset
805 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
808 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
811 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
828 s_block = chunk->md_blocks + s_index;
829 e_block = chunk->md_blocks + e_index;
844 pcpu_index_alloc_map(chunk, s_index),
862 pcpu_block_refresh_hint(chunk, s_index);
885 pcpu_index_alloc_map(chunk, e_index),
898 pcpu_block_refresh_hint(chunk, e_index);
923 pcpu_update_empty_pages(chunk, -nr_empty_pages);
933 * The only time a full chunk scan is required is if the chunk
935 * was used and therefore the chunk contig hint is still correct.
942 pcpu_chunk_refresh_hint(chunk, false);
947 * @chunk: chunk of interest
948 * @bit_off: chunk offset
956 * A chunk update is triggered if a page becomes free, a block becomes free,
963 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
983 s_block = chunk->md_blocks + s_index;
984 e_block = chunk->md_blocks + e_index;
1004 * remainder of the chunk is free.
1006 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1015 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1044 pcpu_update_empty_pages(chunk, nr_empty_pages);
1047 * Refresh chunk metadata when the free makes a block free or spans
1053 pcpu_chunk_refresh_hint(chunk, true);
1055 pcpu_block_update(&chunk->chunk_md,
1062 * @chunk: chunk of interest
1063 * @bit_off: chunk offset
1073 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1081 start = find_next_zero_bit(chunk->populated, end, start);
1085 end = find_next_bit(chunk->populated, end, start + 1);
1093 * @chunk: chunk of interest
1098 * Given a chunk and an allocation spec, find the offset to begin searching
1102 * of a block or chunk, it is skipped. This errs on the side of caution
1110 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1113 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1119 * and creating a new chunk would happen soon.
1126 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1127 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1135 if (bit_off == pcpu_chunk_map_bits(chunk))
1199 * @chunk: chunk of interest
1213 * Allocated addr offset in @chunk on success.
1216 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1219 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1226 oslot = pcpu_chunk_slot(chunk);
1232 pcpu_chunk_map_bits(chunk));
1233 bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1239 pcpu_block_update_scan(chunk, area_off, area_bits);
1242 bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1245 set_bit(bit_off, chunk->bound_map);
1246 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1247 set_bit(bit_off + alloc_bits, chunk->bound_map);
1249 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1254 chunk->alloc_map,
1255 pcpu_chunk_map_bits(chunk),
1258 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1260 pcpu_chunk_relocate(chunk, oslot);
1267 * @chunk: chunk of interest
1268 * @off: addr offset into chunk
1276 static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1278 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1282 pcpu_stats_area_dealloc(chunk);
1284 oslot = pcpu_chunk_slot(chunk);
1289 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1292 bitmap_clear(chunk->alloc_map, bit_off, bits);
1297 chunk->free_bytes += freed;
1302 pcpu_block_update_hint_free(chunk, bit_off, bits);
1304 pcpu_chunk_relocate(chunk, oslot);
1319 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1323 /* init the chunk's block */
1324 pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1326 for (md_block = chunk->md_blocks;
1327 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1333 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1337 * This is responsible for creating the chunks that serve the first chunk. The
1348 struct pcpu_chunk *chunk;
1359 /* allocate chunk */
1360 alloc_size = struct_size(chunk, populated,
1362 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1363 if (!chunk)
1367 INIT_LIST_HEAD(&chunk->list);
1369 chunk->base_addr = (void *)aligned_addr;
1370 chunk->start_offset = start_offset;
1371 chunk->end_offset = region_size - chunk->start_offset - map_size;
1373 chunk->nr_pages = region_size >> PAGE_SHIFT;
1374 region_bits = pcpu_chunk_map_bits(chunk);
1376 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1377 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1378 if (!chunk->alloc_map)
1383 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1384 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1385 if (!chunk->bound_map)
1389 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1390 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1391 if (!chunk->md_blocks)
1396 /* first chunk is free to use */
1397 chunk->obj_exts = NULL;
1399 pcpu_init_md_blocks(chunk);
1402 chunk->immutable = true;
1403 bitmap_fill(chunk->populated, chunk->nr_pages);
1404 chunk->nr_populated = chunk->nr_pages;
1405 chunk->nr_empty_pop_pages = chunk->nr_pages;
1407 chunk->free_bytes = map_size;
1409 if (chunk->start_offset) {
1411 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1412 bitmap_set(chunk->alloc_map, 0, offset_bits);
1413 set_bit(0, chunk->bound_map);
1414 set_bit(offset_bits, chunk->bound_map);
1416 chunk->chunk_md.first_free = offset_bits;
1418 pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1421 if (chunk->end_offset) {
1423 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1424 bitmap_set(chunk->alloc_map,
1425 pcpu_chunk_map_bits(chunk) - offset_bits,
1428 chunk->bound_map);
1429 set_bit(region_bits, chunk->bound_map);
1431 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1435 return chunk;
1440 struct pcpu_chunk *chunk;
1443 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1444 if (!chunk)
1447 INIT_LIST_HEAD(&chunk->list);
1448 chunk->nr_pages = pcpu_unit_pages;
1449 region_bits = pcpu_chunk_map_bits(chunk);
1451 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1452 sizeof(chunk->alloc_map[0]), gfp);
1453 if (!chunk->alloc_map)
1456 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1457 sizeof(chunk->bound_map[0]), gfp);
1458 if (!chunk->bound_map)
1461 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1462 sizeof(chunk->md_blocks[0]), gfp);
1463 if (!chunk->md_blocks)
1468 chunk->obj_exts =
1469 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1471 if (!chunk->obj_exts)
1476 pcpu_init_md_blocks(chunk);
1479 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1481 return chunk;
1485 pcpu_mem_free(chunk->md_blocks);
1488 pcpu_mem_free(chunk->bound_map);
1490 pcpu_mem_free(chunk->alloc_map);
1492 pcpu_mem_free(chunk);
1497 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1499 if (!chunk)
1502 pcpu_mem_free(chunk->obj_exts);
1504 pcpu_mem_free(chunk->md_blocks);
1505 pcpu_mem_free(chunk->bound_map);
1506 pcpu_mem_free(chunk->alloc_map);
1507 pcpu_mem_free(chunk);
1512 * @chunk: pcpu_chunk which got populated
1516 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1520 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1527 bitmap_set(chunk->populated, page_start, nr);
1528 chunk->nr_populated += nr;
1531 pcpu_update_empty_pages(chunk, nr);
1536 * @chunk: pcpu_chunk which got depopulated
1540 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1544 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1551 bitmap_clear(chunk->populated, page_start, nr);
1552 chunk->nr_populated -= nr;
1555 pcpu_update_empty_pages(chunk, -nr);
1561 * To allow different implementations, chunk alloc/free and
1566 * pcpu_populate_chunk - populate the specified range of a chunk
1567 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1568 * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
1569 * pcpu_create_chunk - create a new chunk
1570 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1574 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1576 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1578 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1581 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1592 * pcpu_chunk_addr_search - determine chunk containing specified address
1593 * @addr: address for which the chunk needs to be determined.
1599 * The address of the found chunk.
1603 /* is it in the dynamic region (first chunk)? */
1643 struct pcpu_chunk *chunk, int off,
1649 if (likely(chunk && chunk->obj_exts)) {
1651 chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = objcg;
1662 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1666 if (unlikely(!chunk->obj_exts))
1669 objcg = chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup;
1672 chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = NULL;
1692 struct pcpu_chunk *chunk, int off,
1697 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1703 static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
1706 if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts)) {
1707 alloc_tag_add(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag,
1712 static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1714 if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts))
1715 alloc_tag_sub(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag, size);
1718 static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
1723 static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1732 * @reserved: allocate from the reserved chunk if available
1751 struct pcpu_chunk *chunk, *next;
1803 /* serve reserved allocations from the reserved chunk if available */
1805 chunk = pcpu_reserved_chunk;
1807 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1809 err = "alloc from reserved chunk failed";
1813 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1817 err = "alloc from reserved chunk failed";
1824 list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1826 off = pcpu_find_block_fit(chunk, bits, bit_align,
1830 pcpu_chunk_move(chunk, 0);
1834 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1836 pcpu_reintegrate_chunk(chunk);
1849 /* No space left. Create a new chunk. */
1851 chunk = pcpu_create_chunk(pcpu_gfp);
1852 if (!chunk) {
1853 err = "failed to allocate new chunk";
1858 pcpu_chunk_relocate(chunk, -1);
1866 pcpu_stats_area_alloc(chunk, size);
1880 for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
1881 WARN_ON(chunk->immutable);
1883 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1887 pcpu_free_area(chunk, off);
1891 pcpu_chunk_populated(chunk, rs, re);
1900 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1902 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1906 chunk->base_addr, off, ptr,
1909 pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1911 pcpu_alloc_tag_alloc_hook(chunk, off, size);
1958 struct pcpu_chunk *chunk, *next;
1966 list_for_each_entry_safe(chunk, next, free_head, list) {
1967 WARN_ON(chunk->immutable);
1970 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1973 if (!empty_only || chunk->nr_empty_pop_pages == 0)
1974 list_move(&chunk->list, &to_free);
1981 list_for_each_entry_safe(chunk, next, &to_free, list) {
1984 for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
1985 pcpu_depopulate_chunk(chunk, rs, re);
1987 pcpu_chunk_depopulated(chunk, rs, re);
1990 pcpu_destroy_chunk(chunk);
2012 struct pcpu_chunk *chunk;
2044 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
2045 nr_unpop = chunk->nr_pages - chunk->nr_populated;
2053 /* @chunk can't go away while pcpu_alloc_mutex is held */
2054 for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
2058 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2063 pcpu_chunk_populated(chunk, rs, rs + nr);
2076 chunk = pcpu_create_chunk(gfp);
2079 if (chunk) {
2080 pcpu_chunk_relocate(chunk, -1);
2093 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2094 * Each chunk is scanned in the reverse order to keep populated pages close to
2095 * the beginning of the chunk.
2103 struct pcpu_chunk *chunk;
2112 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2117 while ((chunk = list_first_entry_or_null(
2120 WARN_ON(chunk->immutable);
2123 * Scan chunk's pages in the reverse order to keep populated
2124 * pages close to the beginning of the chunk.
2126 freed_page_start = chunk->nr_pages;
2129 for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2131 if (chunk->nr_empty_pop_pages == 0)
2134 /* reintegrate chunk to prevent atomic alloc failures */
2144 * (first) page in the chunk.
2146 block = chunk->md_blocks + i;
2148 test_bit(i, chunk->populated)) {
2161 pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2165 pcpu_chunk_depopulated(chunk, i + 1, end + 1);
2173 /* batch tlb flush per chunk to amortize cost */
2176 pcpu_post_unmap_tlb_flush(chunk,
2183 if (reintegrate || chunk->free_bytes == pcpu_unit_size)
2184 pcpu_reintegrate_chunk(chunk);
2186 list_move_tail(&chunk->list,
2195 * For each chunk type, manage the number of fully free chunks and the number of
2232 struct pcpu_chunk *chunk;
2243 chunk = pcpu_chunk_addr_search(addr);
2244 off = addr - chunk->base_addr;
2247 size = pcpu_free_area(chunk, off);
2249 pcpu_alloc_tag_free_hook(chunk, off, size);
2251 pcpu_memcg_free_hook(chunk, off, size);
2255 * If the chunk is isolated, it may be in the process of being
2256 * reclaimed. Let reclaim manage cleaning up of that chunk.
2258 if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
2262 if (pos != chunk) {
2266 } else if (pcpu_should_reclaim_chunk(chunk)) {
2267 pcpu_isolate_chunk(chunk);
2271 trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2330 * percpu allocator has special setup for the first chunk, which currently
2336 * first chunk. But the current code reflects better how percpu allocator
2354 * aren't in the first chunk.
2356 * The address check is against full chunk sizes. pcpu_base_addr
2357 * points to the beginning of the first chunk including the
2358 * static region. Assumes good intent as the first chunk may
2501 * pcpu_setup_first_chunk - initialize the first percpu chunk
2505 * Initialize the first percpu chunk which contains the kernel static
2510 * chunk and prime the dynamic percpu allocator.
2515 * reserve after the static area in the first chunk. This reserves
2516 * the first chunk such that it's available only through reserved
2523 * allocation in the first chunk. The area between @ai->static_size +
2543 * The caller should have mapped the first chunk at @base_addr and
2546 * The first chunk will always contain a static and a dynamic region.
2547 * However, the static region is not managed by any chunk. If the first
2548 * chunk also contains a reserved region, it is served by two chunks -
2551 * The chunk serving the dynamic region is circulated in the chunk slots
2552 * and available for dynamic allocation like any other chunk.
2679 * Allocate chunk slots. The slots after the active slots are:
2701 * dynamic region. The first chunk ends page aligned by
2710 * Initialize first chunk:
2711 * This chunk is broken up into 3 parts:
2713 * - static - there is no backing chunk because these allocations can
2718 * chunk.
2730 /* include all regions of the first chunk */
2787 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2991 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2998 * This is a helper to ease setting up embedded first percpu chunk and
3001 * If this function is used to setup the first chunk, it is allocated
3006 * This enables the first chunk to piggy back on the linear physical
3197 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3202 * chunk and can be called where pcpu_setup_first_chunk() is expected.