Lines Matching +full:manual +full:- +full:span +full:- +full:operation +full:- +full:config
1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm_init.c - Memory initialisation verification and debugging
18 #include <linux/page-isolation.h>
42 /* The zonelists are simply reported, validation is manual. */
62 zonelist = &pgdat->node_zonelists[listid];
63 zone = &pgdat->node_zones[zoneid];
70 zone->name);
74 pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
86 width = shift - NR_NON_PAGEFLAG_BITS;
112 "Node/Zone ID: %lu -> %lu\n",
116 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
128 shift -= SECTIONS_WIDTH;
132 shift -= NODES_WIDTH;
136 shift -= ZONES_WIDTH;
213 return -ENOMEM;
242 return -EINVAL;
304 unsigned long pages = end_pfn - start_pfn;
321 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
330 VM_BUG_ON(zone_index == -1);
437 * Round-up so that ZONE_MOVABLE is at least as large as what
443 corepages = totalpages - required_movablecore;
491 - start_pfn;
493 kernelcore_remaining -= min(kernel_pages,
495 required_kernelcore -= min(kernel_pages,
515 * start_pfn->end_pfn. Calculate size_pages as the
518 size_pages = end_pfn - start_pfn;
528 required_kernelcore -= min(required_kernelcore,
530 kernelcore_remaining -= size_pages;
542 usable_nodes--;
570 atomic_set(&page->_mapcount, -1);
574 INIT_LIST_HEAD(&page->lru);
605 if (state->last_start <= pfn && pfn < state->last_end)
606 return state->last_nid;
610 state->last_start = start_pfn;
611 state->last_end = end_pfn;
612 state->last_nid = nid;
655 pgdat->first_deferred_pfn = ULONG_MAX;
661 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
679 /* Always populate low zones for address-constrained allocations */
683 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
701 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
702 NODE_DATA(nid)->first_deferred_pfn = pfn;
719 struct zone *zone = &pgdat->node_zones[zid];
804 * - physical memory bank size is not necessarily the exact multiple of the
806 * - early reserved memory may not be listed in memblock.memory
807 * - non-memory regions covered by the contigious flatmem mapping
808 * - memory layouts defined with memmap= kernel parameter may not align
812 * - PG_Reserved is set
813 * - zone and node links point to zone and node that span the page if the
815 * - zone and node links point to adjacent zone/node if the hole falls on
829 pfn = pageblock_end_pfn(pfn) - 1;
843 * Initially all pages are reserved - free ones are freed
845 * done. Non-atomic initialization, single-pass.
859 if (highest_memmap_pfn < end_pfn - 1)
860 highest_memmap_pfn = end_pfn - 1;
874 if (start_pfn == altmap->base_pfn)
875 start_pfn += altmap->reserve;
876 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
882 * There can be holes in boot-time mem_map[]s handed to this
923 unsigned long zone_start_pfn = zone->zone_start_pfn;
924 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
933 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
952 struct zone *zone = node->node_zones + j;
991 * We can use the non-atomic __set_bit operation for setting
997 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
999 * ever freed or placed on a driver-private list.
1001 page->pgmap = pgmap;
1002 page->zone_device_data = NULL;
1008 * the address space during boot when many long-lived
1023 if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
1024 pgmap->type == MEMORY_DEVICE_COHERENT)
1052 unsigned int order = pgmap->vmemmap_shift;
1059 prep_compound_tail(head, pfn - head_pfn);
1078 struct pglist_data *pgdat = zone->zone_pgdat;
1083 int nid = pgdat->node_id;
1094 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1095 nr_pages = end_pfn - start_pfn;
1111 nr_pages, jiffies_to_msecs(jiffies - start));
1159 unsigned long nr_absent = range_end_pfn - range_start_pfn;
1166 nr_absent -= end_pfn - start_pfn;
1172 * absent_pages_in_range - Return number of page frames in holes within a range
1215 nr_absent += end_pfn - start_pfn;
1219 nr_absent += end_pfn - start_pfn;
1228 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1255 return *zone_end_pfn - *zone_start_pfn;
1262 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1263 z->zone_start_pfn = 0;
1264 z->spanned_pages = 0;
1265 z->present_pages = 0;
1267 z->present_early_pages = 0;
1271 pgdat->node_spanned_pages = 0;
1272 pgdat->node_present_pages = 0;
1273 pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1290 nr_all_pages += end_pfn - start_pfn;
1295 nr_kernel_pages += end_pfn - start_pfn;
1308 struct zone *zone = pgdat->node_zones + i;
1313 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1318 absent = zone_absent_pages_in_node(pgdat->node_id, i,
1322 real_size = spanned - absent;
1325 zone->zone_start_pfn = zone_start_pfn;
1327 zone->zone_start_pfn = 0;
1328 zone->spanned_pages = spanned;
1329 zone->present_pages = real_size;
1331 zone->present_early_pages = real_size;
1338 pgdat->node_spanned_pages = totalpages;
1339 pgdat->node_present_pages = realtotalpages;
1340 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1346 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1348 spin_lock_init(&ds_queue->split_queue_lock);
1349 INIT_LIST_HEAD(&ds_queue->split_queue);
1350 ds_queue->split_queue_len = 0;
1359 init_waitqueue_head(&pgdat->kcompactd_wait);
1375 init_waitqueue_head(&pgdat->kswapd_wait);
1376 init_waitqueue_head(&pgdat->pfmemalloc_wait);
1379 init_waitqueue_head(&pgdat->reclaim_wait[i]);
1382 lruvec_init(&pgdat->__lruvec);
1388 atomic_long_set(&zone->managed_pages, remaining_pages);
1390 zone->name = zone_names[idx];
1391 zone->zone_pgdat = NODE_DATA(nid);
1392 spin_lock_init(&zone->lock);
1401 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1402 zone->free_area[order].nr_free = 0;
1406 INIT_LIST_HEAD(&zone->unaccepted_pages);
1414 struct pglist_data *pgdat = zone->zone_pgdat;
1417 if (zone_idx > pgdat->nr_zones)
1418 pgdat->nr_zones = zone_idx;
1420 zone->zone_start_pfn = zone_start_pfn;
1423 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
1424 pgdat->node_id,
1429 zone->initialized = 1;
1434 * Calculate the size of the zone->blockflags rounded to an unsigned long
1444 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1455 unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1456 zone->spanned_pages);
1457 zone->pageblock_flags = NULL;
1459 zone->pageblock_flags =
1462 if (!zone->pageblock_flags)
1464 usemapsize, zone->name, zone_to_nid(zone));
1496 * is unused as pageblock_order is set at compile-time. See
1497 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1498 * the kernel config
1508 * - init pgdat internals
1509 * - init all zones belonging to this node
1516 int nid = pgdat->node_id;
1522 if (pgdat->per_cpu_nodestats == &boot_nodestats)
1523 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1530 pgdat->nr_zones = 0;
1531 pgdat->kswapd_order = 0;
1532 pgdat->kswapd_highest_zoneidx = 0;
1533 pgdat->node_start_pfn = 0;
1534 pgdat->node_present_pages = 0;
1539 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1544 * When memory is hot-added, all the memory is in offline state. So
1549 struct zone *zone = pgdat->node_zones + z;
1551 zone->present_pages = 0;
1560 int nid = pgdat->node_id;
1563 pgdat->per_cpu_nodestats = &boot_nodestats;
1566 struct zone *zone = pgdat->node_zones + j;
1567 unsigned long size = zone->spanned_pages;
1570 * Initialize zone->managed_pages as 0 , it will be reset
1573 zone_init_internals(zone, j, nid, zone->present_pages);
1579 init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1610 if (!pgdat->node_spanned_pages)
1613 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1614 offset = pgdat->node_start_pfn - start;
1621 size = (end - start) * sizeof(struct page);
1623 pgdat->node_id, false);
1626 size, pgdat->node_id);
1627 pgdat->node_mem_map = map + offset;
1630 __func__, pgdat->node_id, (unsigned long)pgdat,
1631 (unsigned long)pgdat->node_mem_map);
1635 mem_map = NODE_DATA(0)->node_mem_map;
1636 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1637 mem_map -= offset;
1646 * get_pfn_range_for_nid - Return the start and end page frames for a node
1661 *start_pfn = -1UL;
1669 if (*start_pfn == -1UL)
1680 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1684 pgdat->node_id = nid;
1685 pgdat->node_start_pfn = start_pfn;
1686 pgdat->per_cpu_nodestats = NULL;
1689 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1691 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1712 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1713 struct zone *zone = &pgdat->node_zones[zone_type];
1716 node_set_state(pgdat->node_id, N_HIGH_MEMORY);
1718 node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
1747 * free_area_init - Initialise all pg_data_t and zone data
1776 zone = MAX_NR_ZONES - i - 1;
1799 pr_info(" %-8s ", zone_names[i]);
1804 pr_cont("[mem %#018Lx-%#018Lx]\n",
1808 << PAGE_SHIFT) - 1);
1821 * subsection-map relative to active online memory ranges to
1822 * enable future "sub-section" extensions of the memory map.
1826 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1828 ((u64)end_pfn << PAGE_SHIFT) - 1);
1829 subsection_map_init(start_pfn, end_pfn - start_pfn);
1848 *for memory-less node because here it's not marked as N_MEMORY
1851 *memory-less node. The pgdat will get fully initialized by
1854 if (pgdat->node_present_pages) {
1868 * node_map_pfn_alignment - determine the maximum internode alignment
1875 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
1879 * This is used to test whether pfn -> nid mapping of the chosen memory
1901 * Start with a mask granular enough to pin-point to the
1902 * start pfn and tick off bits one-by-one until it becomes
1905 mask = ~((1 << __ffs(start)) - 1);
1929 /* Free a large naturally-aligned chunk if possible */
1966 unsigned long nr_pages = end_pfn - pfn;
1976 * This function is meant to pre-load the iterator for the zone init from
2055 deferred_free_pages(spfn, t - spfn);
2094 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2107 first_init_pfn = pgdat->first_deferred_pfn;
2115 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2116 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2117 pgdat->first_deferred_pfn = ULONG_MAX;
2122 * pre-grown prior to start of deferred page initialization.
2127 zone = pgdat->node_zones + pgdat->nr_zones - 1;
2137 .size = first_init_pfn - spfn,
2148 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone));
2151 pgdat->node_id, jiffies_to_msecs(jiffies - start));
2171 pg_data_t *pgdat = zone->zone_pgdat;
2172 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2187 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2195 pgdat->first_deferred_pfn = ULONG_MAX;
2222 pgdat->first_deferred_pfn = spfn;
2239 } while (++p, --i);
2248 page_zone(page)->cma_pages += pageblock_nr_pages;
2254 unsigned long block_start_pfn = zone->zone_start_pfn;
2271 zone->contiguous = true;
2293 * on-demand struct page initialization.
2327 * Because 32-bit systems cannot have large physical memory, where this scaling
2338 * - it is assumed that the hash table must contain an exact power-of-2
2340 * - limit is the number of hash buckets, not the total allocation size
2380 numentries >>= (scale - PAGE_SHIFT);
2382 numentries <<= (PAGE_SHIFT - scale);
2420 * If bucketsize is not a power-of-two, we may free
2427 } while (!table && size > PAGE_SIZE && --log2qty);
2433 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
2439 *_hash_mask = (1 << log2qty) - 1;
2516 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2538 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2559 /* Report memory auto-initialization states for this boot. */
2577 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2581 pr_info("mem auto-init: clearing system memory may take some time...\n");
2590 codesize = _etext - _stext;
2591 datasize = _edata - _sdata;
2592 rosize = __end_rodata - __start_rodata;
2593 bss_size = __bss_stop - __bss_start;
2594 init_data_size = __init_end - __init_begin;
2595 init_code_size = _einittext - _sinittext;
2607 size -= adj; \
2619 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2627 K(physpages - totalram_pages() - totalcma_pages),
2670 /* Should be run before the first non-init thread is created */