Lines Matching +full:r +full:- +full:tile
1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm_init.c - Memory initialisation verification and debugging
18 #include <linux/page-isolation.h>
78 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist()
79 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
86 zone->name); in mminit_verify_zonelist()
90 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist()
102 width = shift - NR_NON_PAGEFLAG_BITS; in mminit_verify_pageflags_layout()
128 "Node/Zone ID: %lu -> %lu\n", in mminit_verify_pageflags_layout()
132 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", in mminit_verify_pageflags_layout()
144 shift -= SECTIONS_WIDTH; in mminit_verify_pageflags_layout()
148 shift -= NODES_WIDTH; in mminit_verify_pageflags_layout()
152 shift -= ZONES_WIDTH; in mminit_verify_pageflags_layout()
229 return -ENOMEM; in mm_sysfs_init()
258 return -EINVAL; in cmdline_parse_core()
320 unsigned long pages = end_pfn - start_pfn; in early_calculate_totalpages()
337 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { in find_usable_zone_for_movable()
346 VM_BUG_ON(zone_index == -1); in find_usable_zone_for_movable()
365 struct memblock_region *r; in find_zone_movable_pfns_for_nodes() local
375 for_each_mem_region(r) { in find_zone_movable_pfns_for_nodes()
376 if (!memblock_is_hotpluggable(r)) in find_zone_movable_pfns_for_nodes()
379 nid = memblock_get_region_node(r); in find_zone_movable_pfns_for_nodes()
381 usable_startpfn = memblock_region_memory_base_pfn(r); in find_zone_movable_pfns_for_nodes()
406 for_each_mem_region(r) { in find_zone_movable_pfns_for_nodes()
407 if (memblock_is_mirror(r)) in find_zone_movable_pfns_for_nodes()
410 nid = memblock_get_region_node(r); in find_zone_movable_pfns_for_nodes()
412 usable_startpfn = memblock_region_memory_base_pfn(r); in find_zone_movable_pfns_for_nodes()
453 * Round-up so that ZONE_MOVABLE is at least as large as what in find_zone_movable_pfns_for_nodes()
459 corepages = totalpages - required_movablecore; in find_zone_movable_pfns_for_nodes()
507 - start_pfn; in find_zone_movable_pfns_for_nodes()
509 kernelcore_remaining -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
511 required_kernelcore -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
531 * start_pfn->end_pfn. Calculate size_pages as the in find_zone_movable_pfns_for_nodes()
534 size_pages = end_pfn - start_pfn; in find_zone_movable_pfns_for_nodes()
544 required_kernelcore -= min(required_kernelcore, in find_zone_movable_pfns_for_nodes()
546 kernelcore_remaining -= size_pages; in find_zone_movable_pfns_for_nodes()
558 usable_nodes--; in find_zone_movable_pfns_for_nodes()
586 atomic_set(&page->_mapcount, -1); in __init_single_page()
590 INIT_LIST_HEAD(&page->lru); in __init_single_page()
621 if (state->last_start <= pfn && pfn < state->last_end) in __early_pfn_to_nid()
622 return state->last_nid; in __early_pfn_to_nid()
626 state->last_start = start_pfn; in __early_pfn_to_nid()
627 state->last_end = end_pfn; in __early_pfn_to_nid()
628 state->last_nid = nid; in __early_pfn_to_nid()
679 struct zone *zone = &pgdat->node_zones[zid]; in __init_page_from_nid()
693 pgdat->first_deferred_pfn = ULONG_MAX; in pgdat_set_deferred_range()
699 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_initialised()
717 /* Always populate low zones for address-constrained allocations */ in defer_init()
721 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) in defer_init()
739 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in defer_init()
740 NODE_DATA(nid)->first_deferred_pfn = pfn; in defer_init()
803 static struct memblock_region *r; in overlap_memmap_init() local
806 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { in overlap_memmap_init()
807 for_each_mem_region(r) { in overlap_memmap_init()
808 if (*pfn < memblock_region_memory_end_pfn(r)) in overlap_memmap_init()
812 if (*pfn >= memblock_region_memory_base_pfn(r) && in overlap_memmap_init()
813 memblock_is_mirror(r)) { in overlap_memmap_init()
814 *pfn = memblock_region_memory_end_pfn(r); in overlap_memmap_init()
828 * - physical memory bank size is not necessarily the exact multiple of the
830 * - early reserved memory may not be listed in memblock.memory
831 * - non-memory regions covered by the contigious flatmem mapping
832 * - memory layouts defined with memmap= kernel parameter may not align
836 * - PG_Reserved is set
837 * - zone and node links point to zone and node that span the page if the
839 * - zone and node links point to adjacent zone/node if the hole falls on
853 pfn = pageblock_end_pfn(pfn) - 1; in init_unavailable_range()
867 * Initially all pages are reserved - free ones are freed
869 * done. Non-atomic initialization, single-pass.
883 if (highest_memmap_pfn < end_pfn - 1) in memmap_init_range()
884 highest_memmap_pfn = end_pfn - 1; in memmap_init_range()
898 if (start_pfn == altmap->base_pfn) in memmap_init_range()
899 start_pfn += altmap->reserve; in memmap_init_range()
900 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_range()
906 * There can be holes in boot-time mem_map[]s handed to this in memmap_init_range()
947 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
948 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
957 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, in memmap_init_zone_range()
976 struct zone *zone = node->node_zones + j; in memmap_init()
1015 * We can use the non-atomic __set_bit operation for setting in __init_zone_device_page()
1021 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer in __init_zone_device_page()
1023 * ever freed or placed on a driver-private list. in __init_zone_device_page()
1025 page_folio(page)->pgmap = pgmap; in __init_zone_device_page()
1026 page->zone_device_data = NULL; in __init_zone_device_page()
1032 * the address space during boot when many long-lived in __init_zone_device_page()
1052 switch (pgmap->type) { in __init_zone_device_page()
1089 unsigned int order = pgmap->vmemmap_shift; in memmap_init_compound()
1096 prep_compound_tail(head, pfn - head_pfn); in memmap_init_compound()
1115 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
1120 int nid = pgdat->node_id; in memmap_init_zone_device()
1131 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_zone_device()
1132 nr_pages = end_pfn - start_pfn; in memmap_init_zone_device()
1148 nr_pages, jiffies_to_msecs(jiffies - start)); in memmap_init_zone_device()
1196 unsigned long nr_absent = range_end_pfn - range_start_pfn; in __absent_pages_in_range()
1203 nr_absent -= end_pfn - start_pfn; in __absent_pages_in_range()
1209 * absent_pages_in_range - Return number of page frames in holes within a range
1242 struct memblock_region *r; in zone_absent_pages_in_node() local
1244 for_each_mem_region(r) { in zone_absent_pages_in_node()
1245 start_pfn = clamp(memblock_region_memory_base_pfn(r), in zone_absent_pages_in_node()
1247 end_pfn = clamp(memblock_region_memory_end_pfn(r), in zone_absent_pages_in_node()
1251 memblock_is_mirror(r)) in zone_absent_pages_in_node()
1252 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
1255 !memblock_is_mirror(r)) in zone_absent_pages_in_node()
1256 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
1265 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1292 return *zone_end_pfn - *zone_start_pfn; in zone_spanned_pages_in_node()
1299 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages()
1300 z->zone_start_pfn = 0; in reset_memoryless_node_totalpages()
1301 z->spanned_pages = 0; in reset_memoryless_node_totalpages()
1302 z->present_pages = 0; in reset_memoryless_node_totalpages()
1304 z->present_early_pages = 0; in reset_memoryless_node_totalpages()
1308 pgdat->node_spanned_pages = 0; in reset_memoryless_node_totalpages()
1309 pgdat->node_present_pages = 0; in reset_memoryless_node_totalpages()
1310 pr_debug("On node %d totalpages: 0\n", pgdat->node_id); in reset_memoryless_node_totalpages()
1327 nr_all_pages += end_pfn - start_pfn; in calc_nr_kernel_pages()
1332 nr_kernel_pages += end_pfn - start_pfn; in calc_nr_kernel_pages()
1345 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages()
1350 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
1355 absent = zone_absent_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
1359 real_size = spanned - absent; in calculate_node_totalpages()
1362 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
1364 zone->zone_start_pfn = 0; in calculate_node_totalpages()
1365 zone->spanned_pages = spanned; in calculate_node_totalpages()
1366 zone->present_pages = real_size; in calculate_node_totalpages()
1368 zone->present_early_pages = real_size; in calculate_node_totalpages()
1375 pgdat->node_spanned_pages = totalpages; in calculate_node_totalpages()
1376 pgdat->node_present_pages = realtotalpages; in calculate_node_totalpages()
1377 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); in calculate_node_totalpages()
1383 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; in pgdat_init_split_queue()
1385 spin_lock_init(&ds_queue->split_queue_lock); in pgdat_init_split_queue()
1386 INIT_LIST_HEAD(&ds_queue->split_queue); in pgdat_init_split_queue()
1387 ds_queue->split_queue_len = 0; in pgdat_init_split_queue()
1396 init_waitqueue_head(&pgdat->kcompactd_wait); in pgdat_init_kcompactd()
1412 init_waitqueue_head(&pgdat->kswapd_wait); in pgdat_init_internals()
1413 init_waitqueue_head(&pgdat->pfmemalloc_wait); in pgdat_init_internals()
1416 init_waitqueue_head(&pgdat->reclaim_wait[i]); in pgdat_init_internals()
1419 lruvec_init(&pgdat->__lruvec); in pgdat_init_internals()
1425 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
1427 zone->name = zone_names[idx]; in zone_init_internals()
1428 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
1429 spin_lock_init(&zone->lock); in zone_init_internals()
1438 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
1439 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
1443 INIT_LIST_HEAD(&zone->unaccepted_pages); in zone_init_free_lists()
1444 INIT_WORK(&zone->unaccepted_cleanup, unaccepted_cleanup_work); in zone_init_free_lists()
1452 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
1455 if (zone_idx > pgdat->nr_zones) in init_currently_empty_zone()
1456 pgdat->nr_zones = zone_idx; in init_currently_empty_zone()
1458 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
1461 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
1462 pgdat->node_id, in init_currently_empty_zone()
1467 zone->initialized = 1; in init_currently_empty_zone()
1472 * Calculate the size of the zone->pageblock_flags rounded to an unsigned long
1482 zonesize += zone_start_pfn & (pageblock_nr_pages-1); in usemap_size()
1493 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, in setup_usemap()
1494 zone->spanned_pages); in setup_usemap()
1495 zone->pageblock_flags = NULL; in setup_usemap()
1497 zone->pageblock_flags = in setup_usemap()
1500 if (!zone->pageblock_flags) in setup_usemap()
1502 usemapsize, zone->name, zone_to_nid(zone)); in setup_usemap()
1534 * is unused as pageblock_order is set at compile-time. See
1535 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1546 * - init pgdat internals
1547 * - init all zones belonging to this node
1554 int nid = pgdat->node_id; in free_area_init_core_hotplug()
1560 if (pgdat->per_cpu_nodestats == &boot_nodestats) in free_area_init_core_hotplug()
1561 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); in free_area_init_core_hotplug()
1568 pgdat->nr_zones = 0; in free_area_init_core_hotplug()
1569 pgdat->kswapd_order = 0; in free_area_init_core_hotplug()
1570 pgdat->kswapd_highest_zoneidx = 0; in free_area_init_core_hotplug()
1571 pgdat->node_start_pfn = 0; in free_area_init_core_hotplug()
1572 pgdat->node_present_pages = 0; in free_area_init_core_hotplug()
1577 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in free_area_init_core_hotplug()
1582 * When memory is hot-added, all the memory is in offline state. So in free_area_init_core_hotplug()
1587 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug()
1589 zone->present_pages = 0; in free_area_init_core_hotplug()
1598 int nid = pgdat->node_id; in free_area_init_core()
1601 pgdat->per_cpu_nodestats = &boot_nodestats; in free_area_init_core()
1604 struct zone *zone = pgdat->node_zones + j; in free_area_init_core()
1605 unsigned long size = zone->spanned_pages; in free_area_init_core()
1608 * Initialize zone->managed_pages as 0 , it will be reset in free_area_init_core()
1611 zone_init_internals(zone, j, nid, zone->present_pages); in free_area_init_core()
1617 init_currently_empty_zone(zone, zone->zone_start_pfn, size); in free_area_init_core()
1652 if (!pgdat->node_spanned_pages) in alloc_node_mem_map()
1655 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); in alloc_node_mem_map()
1656 offset = pgdat->node_start_pfn - start; in alloc_node_mem_map()
1663 size = (end - start) * sizeof(struct page); in alloc_node_mem_map()
1665 pgdat->node_id, false); in alloc_node_mem_map()
1668 size, pgdat->node_id); in alloc_node_mem_map()
1669 pgdat->node_mem_map = map + offset; in alloc_node_mem_map()
1672 __func__, pgdat->node_id, (unsigned long)pgdat, in alloc_node_mem_map()
1673 (unsigned long)pgdat->node_mem_map); in alloc_node_mem_map()
1678 mem_map = pgdat->node_mem_map; in alloc_node_mem_map()
1679 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) in alloc_node_mem_map()
1680 mem_map -= offset; in alloc_node_mem_map()
1682 max_mapnr = end - start; in alloc_node_mem_map()
1689 * get_pfn_range_for_nid - Return the start and end page frames for a node
1704 *start_pfn = -1UL; in get_pfn_range_for_nid()
1712 if (*start_pfn == -1UL) in get_pfn_range_for_nid()
1723 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); in free_area_init_node()
1727 pgdat->node_id = nid; in free_area_init_node()
1728 pgdat->node_start_pfn = start_pfn; in free_area_init_node()
1729 pgdat->per_cpu_nodestats = NULL; in free_area_init_node()
1732 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, in free_area_init_node()
1734 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); in free_area_init_node()
1755 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { in check_for_memory()
1756 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory()
1759 node_set_state(pgdat->node_id, N_HIGH_MEMORY); in check_for_memory()
1761 node_set_state(pgdat->node_id, N_NORMAL_MEMORY); in check_for_memory()
1807 high_memory = phys_to_virt(highmem - 1) + 1; in set_high_memory()
1811 * free_area_init - Initialise all pg_data_t and zone data
1840 zone = MAX_NR_ZONES - i - 1; in free_area_init()
1863 pr_info(" %-8s ", zone_names[i]); in free_area_init()
1868 pr_cont("[mem %#018Lx-%#018Lx]\n", in free_area_init()
1872 << PAGE_SHIFT) - 1); in free_area_init()
1885 * subsection-map relative to active online memory ranges to in free_area_init()
1886 * enable future "sub-section" extensions of the memory map. in free_area_init()
1890 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, in free_area_init()
1892 ((u64)end_pfn << PAGE_SHIFT) - 1); in free_area_init()
1893 subsection_map_init(start_pfn, end_pfn - start_pfn); in free_area_init()
1912 *for memory-less node because here it's not marked as N_MEMORY in free_area_init()
1915 *memory-less node. The pgdat will get fully initialized by in free_area_init()
1918 if (pgdat->node_present_pages) { in free_area_init()
1937 * node_map_pfn_alignment - determine the maximum internode alignment
1944 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
1948 * This is used to test whether pfn -> nid mapping of the chosen memory
1970 * Start with a mask granular enough to pin-point to the in node_map_pfn_alignment()
1971 * start pfn and tick off bits one-by-one until it becomes in node_map_pfn_alignment()
1974 mask = ~((1 << __ffs(start)) - 1); in node_map_pfn_alignment()
1998 /* Free a large naturally-aligned chunk if possible */ in deferred_free_pages()
2035 unsigned long nr_pages = end_pfn - pfn; in deferred_init_pages()
2045 * This function is meant to pre-load the iterator for the zone init from
2124 deferred_free_pages(spfn, t - spfn); in deferred_init_maxorder()
2163 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in deferred_init_memmap()
2176 first_init_pfn = pgdat->first_deferred_pfn; in deferred_init_memmap()
2184 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); in deferred_init_memmap()
2185 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); in deferred_init_memmap()
2186 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_init_memmap()
2191 * pre-grown prior to start of deferred page initialization. in deferred_init_memmap()
2196 zone = pgdat->node_zones + pgdat->nr_zones - 1; in deferred_init_memmap()
2206 .size = first_init_pfn - spfn, in deferred_init_memmap()
2217 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
2220 pgdat->node_id, jiffies_to_msecs(jiffies - start)); in deferred_init_memmap()
2240 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2241 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; in deferred_grow_zone()
2256 if (first_deferred_pfn != pgdat->first_deferred_pfn) { in deferred_grow_zone()
2264 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_grow_zone()
2291 pgdat->first_deferred_pfn = spfn; in deferred_grow_zone()
2308 } while (++p, --i); in init_cma_reserved_pageblock()
2317 page_zone(page)->cma_pages += pageblock_nr_pages; in init_cma_reserved_pageblock()
2326 page_zone(page)->cma_pages += pageblock_nr_pages; in init_cma_pageblock()
2332 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
2349 zone->contiguous = true; in set_zone_contiguous()
2396 * on-demand struct page initialization. in page_alloc_init_late()
2430 * Because 32-bit systems cannot have large physical memory, where this scaling
2441 * - it is assumed that the hash table must contain an exact power-of-2
2443 * - limit is the number of hash buckets, not the total allocation size
2483 numentries >>= (scale - PAGE_SHIFT); in alloc_large_system_hash()
2485 numentries <<= (PAGE_SHIFT - scale); in alloc_large_system_hash()
2523 * If bucketsize is not a power-of-two, we may free in alloc_large_system_hash()
2530 } while (!table && size > PAGE_SIZE && --log2qty); in alloc_large_system_hash()
2536 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, in alloc_large_system_hash()
2542 *_hash_mask = (1 << log2qty) - 1; in alloc_large_system_hash()
2619 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " in mem_debugging_and_hardening_init()
2641 …pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running … in mem_debugging_and_hardening_init()
2662 /* Report memory auto-initialization states for this boot. */
2680 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", in report_meminit()
2684 pr_info("mem auto-init: clearing system memory may take some time...\n"); in report_meminit()
2693 codesize = _etext - _stext; in mem_init_print_info()
2694 datasize = _edata - _sdata; in mem_init_print_info()
2695 rosize = __end_rodata - __start_rodata; in mem_init_print_info()
2696 bss_size = __bss_stop - __bss_start; in mem_init_print_info()
2697 init_data_size = __init_end - __init_begin; in mem_init_print_info()
2698 init_code_size = _einittext - _sinittext; in mem_init_print_info()
2704 * please refer to arch/tile/kernel/vmlinux.lds.S. in mem_init_print_info()
2710 size -= adj; \ in mem_init_print_info()
2722 …(%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" in mem_init_print_info()
2730 K(physpages - totalram_pages() - totalcma_pages), in mem_init_print_info()
2785 /* Should be run before the first non-init thread is created */ in mm_core_init()