| /linux/mm/ |
| H A D | mm_init.c | 317 unsigned long start_pfn, end_pfn; in early_calculate_totalpages() local 320 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { in early_calculate_totalpages() 321 unsigned long pages = end_pfn - start_pfn; in early_calculate_totalpages() 479 unsigned long start_pfn, end_pfn; in find_zone_movable_pfns_for_nodes() local 497 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { in find_zone_movable_pfns_for_nodes() 500 start_pfn = max(start_pfn, zone_movable_pfn[nid]); in find_zone_movable_pfns_for_nodes() 501 if (start_pfn >= end_pfn) in find_zone_movable_pfns_for_nodes() 505 if (start_pfn < usable_startpfn) { in find_zone_movable_pfns_for_nodes() 508 - start_pfn; in find_zone_movable_pfns_for_nodes() 527 start_pfn = usable_startpfn; in find_zone_movable_pfns_for_nodes() [all …]
|
| H A D | memory_hotplug.c | 435 unsigned long start_pfn, in find_smallest_section_pfn() argument 438 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { in find_smallest_section_pfn() 439 if (unlikely(!pfn_to_online_page(start_pfn))) in find_smallest_section_pfn() 442 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn() 445 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 448 return start_pfn; in find_smallest_section_pfn() 456 unsigned long start_pfn, in find_biggest_section_pfn() argument 463 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { in find_biggest_section_pfn() 479 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument 485 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span() [all …]
|
| H A D | shuffle.c | 83 unsigned long start_pfn = z->zone_start_pfn; in __shuffle_zone() local 89 start_pfn = ALIGN(start_pfn, order_pages); in __shuffle_zone() 90 for (i = start_pfn; i < end_pfn; i += order_pages) { in __shuffle_zone()
|
| H A D | compaction.c | 211 static unsigned long skip_offline_sections(unsigned long start_pfn) in skip_offline_sections() argument 213 unsigned long start_nr = pfn_to_section_nr(start_pfn); in skip_offline_sections() 231 static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) in skip_offline_sections_reverse() argument 233 unsigned long start_nr = pfn_to_section_nr(start_pfn); in skip_offline_sections_reverse() 246 static unsigned long skip_offline_sections(unsigned long start_pfn) in skip_offline_sections() argument 251 static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) in skip_offline_sections_reverse() argument 557 unsigned long *start_pfn, in isolate_freepages_block() argument 567 unsigned long blockpfn = *start_pfn; in isolate_freepages_block() 660 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, in isolate_freepages_block() 664 *start_pfn = blockpfn; in isolate_freepages_block() [all …]
|
| H A D | page_alloc.c | 583 unsigned long sp, start_pfn; in page_outside_zone_boundaries() local 587 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries() 595 start_pfn, start_pfn + sp); in page_outside_zone_boundaries() 1658 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, in __pageblock_pfn_to_page() argument 1670 start_page = pfn_to_online_page(start_pfn); in __pageblock_pfn_to_page() 1940 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, in __move_freepages_block() argument 1948 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); in __move_freepages_block() 1949 end_pfn = pageblock_end_pfn(start_pfn); in __move_freepages_block() 1951 for (pfn = start_pfn; pfn < end_pfn;) { in __move_freepages_block() 1974 unsigned long *start_pfn, in prep_move_freepages_block() argument [all …]
|
| H A D | util.c | 1329 remap_pfn_range_prepare(desc, action->remap.start_pfn); in mmap_action_prepare() 1332 io_remap_pfn_range_prepare(desc, action->remap.start_pfn, in mmap_action_prepare() 1358 action->remap.start_pfn, action->remap.size, in mmap_action_complete() 1363 action->remap.start_pfn, action->remap.size, in mmap_action_complete() 1453 const unsigned long start_pfn = page_to_pfn(page); in page_range_contiguous() local 1454 const unsigned long end_pfn = start_pfn + nr_pages; in page_range_contiguous() 1463 for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION); in page_range_contiguous() 1465 if (unlikely(page + (pfn - start_pfn) != pfn_to_page(pfn))) in page_range_contiguous()
|
| /linux/arch/x86/xen/ |
| H A D | setup.c | 83 static void __init xen_del_extra_mem(unsigned long start_pfn, in xen_del_extra_mem() argument 90 start_r = xen_extra_mem[i].start_pfn; in xen_del_extra_mem() 94 if (start_r == start_pfn) { in xen_del_extra_mem() 96 xen_extra_mem[i].start_pfn += n_pfns; in xen_del_extra_mem() 101 if (start_r + size_r == start_pfn + n_pfns) { in xen_del_extra_mem() 107 if (start_pfn > start_r && start_pfn < start_r + size_r) { in xen_del_extra_mem() 108 BUG_ON(start_pfn + n_pfns > start_r + size_r); in xen_del_extra_mem() 109 xen_extra_mem[i].n_pfns = start_pfn - start_r; in xen_del_extra_mem() 111 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r - in xen_del_extra_mem() 112 (start_pfn + n_pfns)); in xen_del_extra_mem() [all …]
|
| H A D | enlighten.c | 414 void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns) in xen_add_extra_mem() argument 425 xen_extra_mem[i].start_pfn = start_pfn; in xen_add_extra_mem() 430 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns == in xen_add_extra_mem() 431 start_pfn) { in xen_add_extra_mem() 439 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); in xen_add_extra_mem() 462 pfn_to_page(xen_extra_mem[i].start_pfn + j); in arch_xen_unpopulated_init()
|
| /linux/include/trace/events/ |
| H A D | page_isolation.h | 13 unsigned long start_pfn, 17 TP_ARGS(start_pfn, end_pfn, fin_pfn), 20 __field(unsigned long, start_pfn) 26 __entry->start_pfn = start_pfn; 32 __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
|
| H A D | compaction.h | 17 unsigned long start_pfn, 22 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken), 25 __field(unsigned long, start_pfn) 32 __entry->start_pfn = start_pfn; 39 __entry->start_pfn, 48 unsigned long start_pfn, 53 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 59 unsigned long start_pfn, 64 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 70 unsigned long start_pfn, [all …]
|
| /linux/arch/x86/mm/ |
| H A D | init.c | 328 unsigned long start_pfn, unsigned long end_pfn, in save_mr() argument 331 if (start_pfn < end_pfn) { in save_mr() 334 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr() 406 unsigned long start_pfn, end_pfn, limit_pfn; in split_mem_range() local 413 pfn = start_pfn = PFN_DOWN(start); in split_mem_range() 430 if (start_pfn < end_pfn) { in split_mem_range() 431 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); in split_mem_range() 436 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 445 if (start_pfn < end_pfn) { in split_mem_range() 446 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, in split_mem_range() [all …]
|
| /linux/arch/powerpc/platforms/powernv/ |
| H A D | memtrace.c | 98 unsigned long pfn, start_pfn; in memtrace_alloc_node() local 109 start_pfn = page_to_pfn(page); in memtrace_alloc_node() 115 flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), in memtrace_alloc_node() 116 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), in memtrace_alloc_node() 123 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_alloc_node() 126 arch_remove_linear_mapping(PFN_PHYS(start_pfn), size); in memtrace_alloc_node() 128 return PFN_PHYS(start_pfn); in memtrace_alloc_node() 202 const unsigned long start_pfn = PHYS_PFN(start); in memtrace_free() local 210 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_free() 213 free_contig_range(start_pfn, nr_pages); in memtrace_free()
|
| /linux/arch/sh/mm/ |
| H A D | numa.c | 25 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 30 start_pfn = PFN_DOWN(start); in setup_bootmem_node() 38 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 48 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_bootmem_node() 49 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node()
|
| /linux/drivers/base/ |
| H A D | memory.c | 226 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); in memory_block_online() local 236 start_pfn, nr_pages); in memory_block_online() 250 ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); in memory_block_online() 255 ret = online_pages(start_pfn + nr_vmemmap_pages, in memory_block_online() 259 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); in memory_block_online() 268 adjust_present_page_count(pfn_to_page(start_pfn), mem->group, in memory_block_online() 282 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); in memory_block_offline() local 299 adjust_present_page_count(pfn_to_page(start_pfn), mem->group, in memory_block_offline() 302 ret = offline_pages(start_pfn + nr_vmemmap_pages, in memory_block_offline() 307 adjust_present_page_count(pfn_to_page(start_pfn), in memory_block_offline() [all …]
|
| H A D | arch_numa.c | 195 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 197 if (start_pfn >= end_pfn) in setup_node_data() 203 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_node_data() 204 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_node_data() 217 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 219 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 220 setup_node_data(nid, start_pfn, end_pfn); in numa_register_nodes()
|
| /linux/arch/s390/mm/ |
| H A D | init.c | 242 mem_data.start = arg->start_pfn << PAGE_SHIFT; in s390_cma_mem_notifier() 264 unsigned long start_pfn = PFN_DOWN(start); in arch_add_memory() local 276 rc = __add_pages(nid, start_pfn, size_pages, params); in arch_add_memory() 284 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 287 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
| /linux/arch/loongarch/mm/ |
| H A D | init.c | 83 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 87 ret = __add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory() 98 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 100 struct page *page = pfn_to_page(start_pfn); in arch_remove_memory() 105 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
| /linux/drivers/xen/ |
| H A D | balloon.c | 347 unsigned long start_pfn = page_to_pfn(page); in xen_online_page() local 350 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); in xen_online_page() 353 p = pfn_to_page(start_pfn + i); in xen_online_page() 684 unsigned long start_pfn, pages; in balloon_add_regions() local 693 start_pfn = xen_extra_mem[i].start_pfn; in balloon_add_regions() 700 extra_pfn_end = min(max_pfn, start_pfn + pages); in balloon_add_regions() 702 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) in balloon_add_regions() 711 pages = extra_pfn_end - start_pfn; in balloon_add_regions()
|
| /linux/arch/sh/kernel/ |
| H A D | setup.c | 193 void __init __add_active_range(unsigned int nid, unsigned long start_pfn, in __add_active_range() argument 201 start = start_pfn << PAGE_SHIFT; in __add_active_range() 211 start_pfn, end_pfn); in __add_active_range() 235 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
| /linux/arch/x86/platform/efi/ |
| H A D | efi_32.c | 38 u64 start_pfn, end_pfn, end; in efi_map_region() local 42 start_pfn = PFN_DOWN(md->phys_addr); in efi_map_region() 47 if (pfn_range_is_mapped(start_pfn, end_pfn)) { in efi_map_region()
|
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 288 const unsigned long start_pfn = PHYS_PFN(phys); in kho_restore_pages() local 289 const unsigned long end_pfn = start_pfn + nr_pages; in kho_restore_pages() 290 unsigned long pfn = start_pfn; in kho_restore_pages() 302 return pfn_to_page(start_pfn); in kho_restore_pages() 835 const unsigned long start_pfn = page_to_pfn(page); in kho_preserve_pages() local 836 const unsigned long end_pfn = start_pfn + nr_pages; in kho_preserve_pages() 837 unsigned long pfn = start_pfn; in kho_preserve_pages() 841 if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT, in kho_preserve_pages() 860 __kho_unpreserve(track, start_pfn, failed_pfn); in kho_preserve_pages() 879 const unsigned long start_pfn = page_to_pfn(page); in kho_unpreserve_pages() local [all …]
|
| /linux/kernel/dma/ |
| H A D | direct.c | 568 unsigned long start_pfn) in dma_find_range() argument 575 if (start_pfn >= cpu_start_pfn && in dma_find_range() 576 start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) in dma_find_range() 588 static int check_ram_in_range_map(unsigned long start_pfn, in check_ram_in_range_map() argument 591 unsigned long end_pfn = start_pfn + nr_pages; in check_ram_in_range_map() 594 while (start_pfn < end_pfn) { in check_ram_in_range_map() 597 bdr = dma_find_range(dev, start_pfn); in check_ram_in_range_map() 601 start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size); in check_ram_in_range_map()
|
| /linux/arch/loongarch/kernel/ |
| H A D | numa.c | 145 unsigned long start_pfn, end_pfn; in node_mem_init() local 152 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 154 node, start_pfn, end_pfn); in node_mem_init()
|
| /linux/include/asm-generic/ |
| H A D | memory_model.h | 35 #define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ argument 36 for ((pfn) = max_t(unsigned long, (start_pfn), ARCH_PFN_OFFSET); \
|
| /linux/drivers/virt/acrn/ |
| H A D | mm.c | 179 unsigned long start_pfn, cur_pfn; in acrn_vm_ram_map() local 199 start_pfn = cur_pfn; in acrn_vm_ram_map() 218 if (cur_pfn != start_pfn + i) { in acrn_vm_ram_map() 232 PFN_PHYS(start_pfn), memmap->len, in acrn_vm_ram_map()
|