| /linux/tools/testing/nvdimm/ |
| H A D | pmem-dax.c | 15 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; in __pmem_direct_access() 17 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, in __pmem_direct_access() 18 PFN_PHYS(nr_pages)))) in __pmem_direct_access()
|
| H A D | dax-dev.c | 23 addr = PFN_PHYS(pgoff - dax_range->pgoff) + range->start; in dax_pgoff_to_phys() 32 return PFN_PHYS(page_to_pfn(page)); in dax_pgoff_to_phys()
|
| /linux/include/asm-generic/ |
| H A D | memory_model.h | 71 #define __pfn_to_phys(pfn) PFN_PHYS(pfn) 82 PFN_PHYS(__pfn); \ 85 #define page_to_phys(page) PFN_PHYS(page_to_pfn(page))
|
| /linux/arch/parisc/kernel/ |
| H A D | cache.c | 107 #define pfn_va(pfn) __va(PFN_PHYS(pfn)) 637 __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte))); in flush_cache_page_if_present() 654 __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from))); in copy_user_highpage() 663 __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); in copy_to_user_page() 671 __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); in copy_from_user_page() 773 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); in flush_cache_page() 781 __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page))); in flush_anon_page() 793 __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte))); in ptep_clear_flush_young() 812 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); in ptep_clear_flush()
|
| /linux/arch/loongarch/kernel/ |
| H A D | setup.c | 417 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); in arch_mem_init() 425 early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); in arch_mem_init() 562 start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); in reserve_memblock_reserved_regions() 563 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); in reserve_memblock_reserved_regions()
|
| H A D | mem.c | 51 memblock_set_current_limit(PFN_PHYS(max_low_pfn)); in memblock_init()
|
| /linux/arch/mips/mm/ |
| H A D | init.c | 285 cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align); in maar_res_walk() 286 cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1; in maar_res_walk() 437 memblock_remove(PFN_PHYS(highstart_pfn), -1); in highmem_init() 444 if (!memblock_is_memory(PFN_PHYS(tmp))) in highmem_init() 483 void *addr = phys_to_virt(PFN_PHYS(pfn)); in free_init_pages()
|
| /linux/arch/x86/mm/ |
| H A D | numa.c | 210 0LLU, PFN_PHYS(max_pfn) - 1); in dummy_numa_init() 213 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); in dummy_numa_init() 454 return PFN_PHYS(MAX_DMA32_PFN); in numa_emu_dma_end()
|
| /linux/arch/mips/kernel/ |
| H A D | setup.c | 221 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { in finalize_initrd() 661 memblock_set_current_limit(PFN_PHYS(max_low_pfn)); in arch_mem_init() 680 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); in arch_mem_init() 686 early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); in arch_mem_init()
|
| /linux/arch/x86/xen/ |
| H A D | setup.c | 116 memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); in xen_del_extra_mem() 636 phys_addr_t mem_end = PFN_PHYS(ini_nr_pages); in xen_e820_swap_entry_with_ram() 774 start = PFN_PHYS(xen_start_info->first_p2m_pfn); in xen_reserve_xen_mfnlist() 775 size = PFN_PHYS(xen_start_info->nr_p2m_frames); in xen_reserve_xen_mfnlist() 804 mem_end = PFN_PHYS(ini_nr_pages); in xen_memory_setup() 909 chunk_size = min(size, PFN_PHYS(extra_pages)); in xen_memory_setup()
|
| H A D | enlighten.c | 439 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); in xen_add_extra_mem()
|
| /linux/lib/ |
| H A D | devmem_is_allowed.c | 23 if (iomem_is_exclusive(PFN_PHYS(pfn))) in devmem_is_allowed()
|
| /linux/tools/include/linux/ |
| H A D | pfn.h | 9 #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) macro
|
| /linux/samples/damon/ |
| H A D | mtier.c | 67 range->start = PFN_PHYS(node_start_pfn(target_node)); in nid_to_phys() 68 range->end = PFN_PHYS(node_end_pfn(target_node)); in nid_to_phys()
|
| /linux/mm/ |
| H A D | numa_emulation.c | 41 return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn)); in mem_hole_size() 109 size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); in split_nodes_interleave() 210 return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes); in uniform_size() 378 const u64 max_addr = PFN_PHYS(max_pfn); in numa_emulation()
|
| /linux/drivers/media/pci/intel/ipu6/ |
| H A D | ipu6-mmu.c | 447 ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), in allocate_trash_buffer() 458 mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo); in allocate_trash_buffer() 464 ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), in allocate_trash_buffer() 465 PFN_PHYS(iova_size(iova))); in allocate_trash_buffer() 716 ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo), in ipu6_mmu_destroy() 717 PFN_PHYS(iova_size(iova))); in ipu6_mmu_destroy()
|
| /linux/arch/hexagon/kernel/ |
| H A D | dma.c | 41 return dma_init_global_coherent(PFN_PHYS(max_low_pfn), in hexagon_dma_init()
|
| /linux/arch/sh/kernel/ |
| H A D | setup.c | 235 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
| /linux/arch/arm64/include/asm/ |
| H A D | vmalloc.h | 40 if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE)) in arch_vmap_pte_range_map_size()
|
| /linux/arch/powerpc/include/asm/ |
| H A D | pte-walk.h | 53 pa = PFN_PHYS(pte_pfn(*ptep)); in ppc_find_vmap_phys()
|
| /linux/arch/x86/include/asm/xen/ |
| H A D | page.h | 233 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); in phys_to_machine() 239 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); in machine_to_phys()
|
| /linux/arch/alpha/kernel/ |
| H A D | setup.c | 308 memblock_add(PFN_PHYS(cluster->start_pfn), in setup_memory() 315 memblock_reserve(PFN_PHYS(cluster->start_pfn), in setup_memory() 356 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { in setup_memory() 357 if (!move_initrd(PFN_PHYS(max_low_pfn))) in setup_memory() 361 phys_to_virt(PFN_PHYS(max_low_pfn))); in setup_memory()
|
| /linux/arch/mips/include/asm/mach-generic/ |
| H A D | spaces.h | 26 # define PHYS_OFFSET ((unsigned long)PFN_PHYS(ARCH_PFN_OFFSET))
|
| /linux/arch/powerpc/platforms/powernv/ |
| H A D | memtrace.c | 126 arch_remove_linear_mapping(PFN_PHYS(start_pfn), size); in memtrace_alloc_node() 128 return PFN_PHYS(start_pfn); in memtrace_alloc_node()
|
| /linux/arch/arm64/kernel/ |
| H A D | setup.c | 260 start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); in reserve_memblock_reserved_regions() 261 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); in reserve_memblock_reserved_regions()
|