| /linux/mm/ |
| H A D | mm_init.c | 317 unsigned long start_pfn, end_pfn; in early_calculate_totalpages() local 320 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { in early_calculate_totalpages() 321 unsigned long pages = end_pfn - start_pfn; in early_calculate_totalpages() 479 unsigned long start_pfn, end_pfn; in find_zone_movable_pfns_for_nodes() local 497 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { in find_zone_movable_pfns_for_nodes() 501 if (start_pfn >= end_pfn) in find_zone_movable_pfns_for_nodes() 507 kernel_pages = min(end_pfn, usable_startpfn) in find_zone_movable_pfns_for_nodes() 516 if (end_pfn <= usable_startpfn) { in find_zone_movable_pfns_for_nodes() 524 zone_movable_pfn[nid] = end_pfn; in find_zone_movable_pfns_for_nodes() 535 size_pages = end_pfn - start_pfn; in find_zone_movable_pfns_for_nodes() [all …]
|
| H A D | page_idle.c | 124 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local 134 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read() 135 if (end_pfn > max_pfn) in page_idle_bitmap_read() 136 end_pfn = max_pfn; in page_idle_bitmap_read() 138 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read() 169 unsigned long pfn, end_pfn; in page_idle_bitmap_write() local 179 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_write() 180 if (end_pfn > max_pfn) in page_idle_bitmap_write() 181 end_pfn = max_pfn; in page_idle_bitmap_write() 183 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_write()
|
| H A D | memory_hotplug.c | 392 const unsigned long end_pfn = pfn + nr_pages; in __add_pages() local 419 for (; pfn < end_pfn; pfn += cur_nr_pages) { in __add_pages() 421 cur_nr_pages = min(end_pfn - pfn, in __add_pages() 436 unsigned long end_pfn) in find_smallest_section_pfn() argument 438 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { in find_smallest_section_pfn() 457 unsigned long end_pfn) in find_biggest_section_pfn() argument 462 pfn = end_pfn - 1; in find_biggest_section_pfn() 480 unsigned long end_pfn) in shrink_zone_span() argument 492 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span() 501 } else if (zone_end_pfn(zone) == end_pfn) { in shrink_zone_span() [all …]
|
| H A D | compaction.c | 558 unsigned long end_pfn, in isolate_freepages_block() argument 577 for (; blockpfn < end_pfn; blockpfn += stride, page += stride) { in isolate_freepages_block() 602 (blockpfn + (1UL << order) <= end_pfn)) { in isolate_freepages_block() 657 if (unlikely(blockpfn > end_pfn)) in isolate_freepages_block() 658 blockpfn = end_pfn; in isolate_freepages_block() 671 if (strict && blockpfn < end_pfn) in isolate_freepages_block() 696 unsigned long start_pfn, unsigned long end_pfn) in isolate_freepages_range() argument 710 for (; pfn < end_pfn; pfn += isolated, in isolate_freepages_range() 726 block_end_pfn = min(block_end_pfn, end_pfn); in isolate_freepages_range() 750 if (pfn < end_pfn) { in isolate_freepages_range() [all …]
|
| H A D | shuffle.c | 84 unsigned long end_pfn = zone_end_pfn(z); in __shuffle_zone() local 90 for (i = start_pfn; i < end_pfn; i += order_pages) { in __shuffle_zone()
|
| H A D | memblock.c | 773 unsigned long start_pfn, end_pfn, mem_size_mb; in memblock_validate_numa_coverage() local 777 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { in memblock_validate_numa_coverage() 779 nr_pages += end_pfn - start_pfn; in memblock_validate_numa_coverage() 1974 unsigned long *start_pfn, unsigned long *end_pfn) in memblock_search_pfn_nid() argument 1983 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); in memblock_search_pfn_nid() 2119 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) in free_memmap() argument 2128 end_pg = pfn_to_page(end_pfn - 1) + 1; in free_memmap() 2229 unsigned long end_pfn = PFN_DOWN(end); in __free_memory_core() local 2231 if (!IS_ENABLED(CONFIG_HIGHMEM) && end_pfn > max_low_pfn) in __free_memory_core() 2232 end_pfn = max_low_pfn; in __free_memory_core() [all …]
|
| H A D | page_owner.c | 432 unsigned long end_pfn = zone_end_pfn(zone); in pagetypeinfo_showmixedcount_print() local 445 for (; pfn < end_pfn; ) { in pagetypeinfo_showmixedcount_print() 453 block_end_pfn = min(block_end_pfn, end_pfn); in pagetypeinfo_showmixedcount_print() 775 unsigned long end_pfn = zone_end_pfn(zone); in init_pages_in_zone() local 783 for (; pfn < end_pfn; ) { in init_pages_in_zone() 792 block_end_pfn = min(block_end_pfn, end_pfn); in init_pages_in_zone()
|
| /linux/arch/x86/mm/ |
| H A D | init.c | 328 unsigned long start_pfn, unsigned long end_pfn, in save_mr() argument 331 if (start_pfn < end_pfn) { in save_mr() 335 mr[nr_range].end = end_pfn<<PAGE_SHIFT; in save_mr() 406 unsigned long start_pfn, end_pfn, limit_pfn; in split_mem_range() local 422 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range() 424 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 426 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 428 if (end_pfn > limit_pfn) in split_mem_range() 429 end_pfn = limit_pfn; in split_mem_range() 430 if (start_pfn < end_pfn) { in split_mem_range() [all …]
|
| H A D | numa.c | 128 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 134 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 135 if (start_pfn >= end_pfn) in numa_register_nodes()
|
| H A D | init_32.c | 256 unsigned long start_pfn, end_pfn; in kernel_physical_mapping_init() local 267 end_pfn = end >> PAGE_SHIFT; in kernel_physical_mapping_init() 296 if (pfn >= end_pfn) in kernel_physical_mapping_init() 304 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; in kernel_physical_mapping_init() 344 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; in kernel_physical_mapping_init()
|
| /linux/include/trace/events/ |
| H A D | page_isolation.h | 14 unsigned long end_pfn, 17 TP_ARGS(start_pfn, end_pfn, fin_pfn), 21 __field(unsigned long, end_pfn) 27 __entry->end_pfn = end_pfn; 32 __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, 33 __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
|
| H A D | compaction.h | 18 unsigned long end_pfn, 22 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken), 26 __field(unsigned long, end_pfn) 33 __entry->end_pfn = end_pfn; 40 __entry->end_pfn, 49 unsigned long end_pfn, 53 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 60 unsigned long end_pfn, 64 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 71 unsigned long end_pfn, [all …]
|
| /linux/arch/sh/mm/ |
| H A D | numa.c | 25 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 31 end_pfn = PFN_DOWN(end); in setup_bootmem_node() 38 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 49 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node()
|
| /linux/drivers/base/ |
| H A D | arch_numa.c | 195 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 197 if (start_pfn >= end_pfn) in setup_node_data() 204 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_node_data() 217 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 219 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 220 setup_node_data(nid, start_pfn, end_pfn); in numa_register_nodes()
|
| /linux/arch/x86/xen/ |
| H A D | setup.c | 215 unsigned long end_pfn) in xen_set_identity_and_release_chunk() argument 220 WARN_ON(start_pfn > end_pfn); in xen_set_identity_and_release_chunk() 223 end = min(end_pfn, ini_nr_pages); in xen_set_identity_and_release_chunk() 242 set_phys_range_identity(start_pfn, end_pfn); in xen_set_identity_and_release_chunk() 344 unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn) in xen_set_identity_and_remap_chunk() argument 348 unsigned long n = end_pfn - start_pfn; in xen_set_identity_and_remap_chunk() 390 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) in xen_set_identity_and_remap_chunk() 399 unsigned long start_pfn, unsigned long end_pfn, in xen_count_remap_pages() argument 405 return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn; in xen_count_remap_pages() 409 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, in xen_foreach_remap_area() argument [all …]
|
| /linux/arch/x86/platform/efi/ |
| H A D | efi_32.c | 38 u64 start_pfn, end_pfn, end; in efi_map_region() local 45 end_pfn = PFN_UP(end); in efi_map_region() 47 if (pfn_range_is_mapped(start_pfn, end_pfn)) { in efi_map_region()
|
| /linux/arch/sh/kernel/ |
| H A D | swsusp.c | 22 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave() local 24 return (pfn >= begin_pfn) && (pfn < end_pfn); in pfn_is_nosave()
|
| H A D | setup.c | 194 unsigned long end_pfn) in __add_active_range() argument 202 end = end_pfn << PAGE_SHIFT; in __add_active_range() 211 start_pfn, end_pfn); in __add_active_range() 235 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 168 unsigned long end_pfn) in __kho_unpreserve() argument 172 while (pfn < end_pfn) { in __kho_unpreserve() 173 order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in __kho_unpreserve() 289 const unsigned long end_pfn = start_pfn + nr_pages; in kho_restore_pages() local 292 while (pfn < end_pfn) { in kho_restore_pages() 294 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in kho_restore_pages() 836 const unsigned long end_pfn = start_pfn + nr_pages; in kho_preserve_pages() local 846 while (pfn < end_pfn) { in kho_preserve_pages() 848 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in kho_preserve_pages() 880 const unsigned long end_pfn = start_pfn + nr_pages; in kho_unpreserve_pages() local [all …]
|
| /linux/arch/loongarch/kernel/ |
| H A D | numa.c | 145 unsigned long start_pfn, end_pfn; in node_mem_init() local 152 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 154 node, start_pfn, end_pfn); in node_mem_init()
|
| /linux/include/asm-generic/ |
| H A D | memory_model.h | 35 #define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ argument 37 (pfn) < min_t(unsigned long, (end_pfn), \
|
| /linux/include/linux/ |
| H A D | node.h | 123 unsigned long end_pfn); 127 unsigned long end_pfn) in register_memory_blocks_under_node_hotplug() argument
|
| H A D | mmzone.h | 2094 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 2095 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 2199 static inline unsigned long first_valid_pfn(unsigned long pfn, unsigned long end_pfn) in first_valid_pfn() argument 2205 while (nr <= __highest_present_section_nr && pfn < end_pfn) { in first_valid_pfn() 2220 return end_pfn; in first_valid_pfn() 2223 static inline unsigned long next_valid_pfn(unsigned long pfn, unsigned long end_pfn) in next_valid_pfn() argument 2227 if (pfn >= end_pfn) in next_valid_pfn() 2228 return end_pfn; in next_valid_pfn() 2240 return first_valid_pfn(pfn, end_pfn); in next_valid_pfn()
|
| /linux/arch/x86/include/asm/ |
| H A D | mtrr.h | 59 extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 101 static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) in mtrr_trim_uncached_memory() argument
|
| /linux/drivers/of/ |
| H A D | kexec.c | 131 unsigned long start_pfn, end_pfn; in ima_get_kexec_buffer() local 152 end_pfn = PHYS_PFN(tmp_addr + tmp_size - 1); in ima_get_kexec_buffer() 153 if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) { in ima_get_kexec_buffer()
|