Home
last modified time | relevance | path

Searched refs:pfn (Results 1 – 25 of 486) sorted by relevance

12345678910>>...20

/linux/arch/x86/xen/
H A Dp2m.c127 static inline unsigned p2m_top_index(unsigned long pfn) in p2m_top_index() argument
129 BUG_ON(pfn >= MAX_P2M_PFN); in p2m_top_index()
130 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); in p2m_top_index()
133 static inline unsigned p2m_mid_index(unsigned long pfn) in p2m_mid_index() argument
135 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; in p2m_mid_index()
170 static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) in p2m_init_identity() argument
175 p2m[i] = IDENTITY_FRAME(pfn + i); in p2m_init_identity()
209 unsigned long pfn, mfn; in xen_build_mfn_list_list() local
232 for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN; in xen_build_mfn_list_list()
233 pfn += P2M_PER_PAGE) { in xen_build_mfn_list_list()
[all …]
/linux/arch/x86/include/asm/xen/
H A Dpage.h56 extern int xen_alloc_p2m_entry(unsigned long pfn);
58 extern unsigned long get_phys_to_machine(unsigned long pfn);
59 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
60 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
133 static inline unsigned long __pfn_to_mfn(unsigned long pfn) in __pfn_to_mfn() argument
137 if (pfn < xen_p2m_size) in __pfn_to_mfn()
138 mfn = xen_p2m_addr[pfn]; in __pfn_to_mfn()
139 else if (unlikely(pfn < xen_max_p2m_pfn)) in __pfn_to_mfn()
140 return get_phys_to_machine(pfn); in __pfn_to_mfn()
142 return IDENTITY_FRAME(pfn); in __pfn_to_mfn()
[all …]
/linux/tools/testing/scatterlist/
H A Dmain.c11 unsigned *pfn; member
28 #define pfn(...) (unsigned []){ __VA_ARGS__ } macro
42 printf(" %x", test->pfn[i]); in fail()
56 { -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 }, in main()
57 { 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 }, in main()
58 { 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 }, in main()
59 { 0, 1, pfn(0), NULL, 1, sgmax, 1 }, in main()
60 { 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 }, in main()
61 { 0, 2, pfn(1, 0), NULL, 2 * PAGE_SIZE, sgmax, 2 }, in main()
62 { 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 }, in main()
[all …]
/linux/arch/arm/xen/
H A Dp2m.c23 unsigned long pfn; member
44 if (new->pfn == entry->pfn) in xen_add_phys_to_mach_entry()
47 if (new->pfn < entry->pfn) in xen_add_phys_to_mach_entry()
59 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); in xen_add_phys_to_mach_entry()
64 unsigned long __pfn_to_mfn(unsigned long pfn) in __pfn_to_mfn() argument
74 if (entry->pfn <= pfn && in __pfn_to_mfn()
75 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn()
76 unsigned long mfn = entry->mfn + (pfn - entry->pfn); in __pfn_to_mfn()
80 if (pfn < entry->pfn) in __pfn_to_mfn()
150 bool __set_phys_to_machine_multi(unsigned long pfn, in __set_phys_to_machine_multi() argument
[all …]
/linux/include/trace/events/
H A Dksm.h132 TP_PROTO(unsigned long pfn, void *rmap_item, void *mm, int err),
134 TP_ARGS(pfn, rmap_item, mm, err),
137 __field(unsigned long, pfn)
144 __entry->pfn = pfn;
151 __entry->pfn, __entry->rmap_item, __entry->mm, __entry->err)
167 TP_PROTO(void *ksm_page, unsigned long pfn, void *rmap_item, void *mm, int err),
169 TP_ARGS(ksm_page, pfn, rmap_item, mm, err),
173 __field(unsigned long, pfn)
181 __entry->pfn = pfn;
189 __entry->pfn, __entry->rmap_item, __entry->mm, __entry->err)
[all …]
H A Dcma.h13 TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
16 TP_ARGS(name, pfn, page, count),
20 __field(unsigned long, pfn)
27 __entry->pfn = pfn;
34 __entry->pfn,
72 TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
75 TP_ARGS(name, pfn, page, count, align, errorno),
79 __field(unsigned long, pfn)
88 __entry->pfn = pfn;
97 __entry->pfn,
[all …]
H A Dkmem.h146 __field( unsigned long, pfn )
151 __entry->pfn = page_to_pfn(page);
156 pfn_to_page(__entry->pfn),
157 __entry->pfn,
168 __field( unsigned long, pfn )
172 __entry->pfn = page_to_pfn(page);
176 pfn_to_page(__entry->pfn),
177 __entry->pfn)
188 __field( unsigned long, pfn )
195 __entry->pfn = page ? page_to_pfn(page) : -1UL;
[all …]
/linux/mm/
H A Dmemory-failure.c83 void num_poisoned_pages_inc(unsigned long pfn) in num_poisoned_pages_inc() argument
86 memblk_nr_poison_inc(pfn); in num_poisoned_pages_inc()
89 void num_poisoned_pages_sub(unsigned long pfn, long i) in num_poisoned_pages_sub() argument
92 if (pfn != -1UL) in num_poisoned_pages_sub()
93 memblk_nr_poison_sub(pfn, i); in num_poisoned_pages_sub()
286 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) in kill_proc() argument
293 pfn, t->comm, task_pid_nr(t)); in kill_proc()
463 unsigned long pfn, int flags) in kill_procs() argument
471 pfn, tk->tsk->comm, task_pid_nr(tk->tsk)); in kill_procs()
482 else if (kill_proc(tk, pfn, flags) < 0) in kill_procs()
[all …]
H A Dmemory_hotplug.c319 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) in check_pfn_span() argument
336 if (!IS_ALIGNED(pfn | nr_pages, min_align)) in check_pfn_span()
346 struct page *pfn_to_online_page(unsigned long pfn) in pfn_to_online_page() argument
348 unsigned long nr = pfn_to_section_nr(pfn); in pfn_to_online_page()
363 if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) in pfn_to_online_page()
366 if (!pfn_section_valid(ms, pfn)) in pfn_to_online_page()
370 return pfn_to_page(pfn); in pfn_to_online_page()
378 pgmap = get_dev_pagemap(pfn); in pfn_to_online_page()
385 return pfn_to_page(pfn); in pfn_to_online_page()
389 int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
[all …]
H A Dpage_idle.c34 static struct folio *page_idle_get_folio(unsigned long pfn) in page_idle_get_folio() argument
36 struct page *page = pfn_to_online_page(pfn); in page_idle_get_folio()
124 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local
130 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read()
131 if (pfn >= max_pfn) in page_idle_bitmap_read()
134 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read()
138 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read()
139 bit = pfn % BITMAP_CHUNK_BITS; in page_idle_bitmap_read()
142 folio = page_idle_get_folio(pfn); in page_idle_bitmap_read()
169 unsigned long pfn, end_pfn; in page_idle_bitmap_write() local
[all …]
H A Dpage_owner.c431 unsigned long pfn, block_end_pfn; in pagetypeinfo_showmixedcount_print() local
438 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print()
445 for (; pfn < end_pfn; ) { in pagetypeinfo_showmixedcount_print()
446 page = pfn_to_online_page(pfn); in pagetypeinfo_showmixedcount_print()
448 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); in pagetypeinfo_showmixedcount_print()
452 block_end_pfn = pageblock_end_pfn(pfn); in pagetypeinfo_showmixedcount_print()
457 for (; pfn < block_end_pfn; pfn++) { in pagetypeinfo_showmixedcount_print()
459 page = pfn_to_page(pfn); in pagetypeinfo_showmixedcount_print()
469 pfn += (1UL << freepage_order) - 1; in pagetypeinfo_showmixedcount_print()
491 pfn = block_end_pfn; in pagetypeinfo_showmixedcount_print()
[all …]
H A Dmm_init.c581 void __meminit __init_single_page(struct page *page, unsigned long pfn, in __init_single_page() argument
585 set_page_links(page, zone, nid, pfn); in __init_single_page()
595 set_page_address(page, __va(pfn << PAGE_SHIFT)); in __init_single_page()
616 static int __meminit __early_pfn_to_nid(unsigned long pfn, in __early_pfn_to_nid() argument
622 if (state->last_start <= pfn && pfn < state->last_end) in __early_pfn_to_nid()
625 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); in __early_pfn_to_nid()
635 int __meminit early_pfn_to_nid(unsigned long pfn) in early_pfn_to_nid() argument
641 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); in early_pfn_to_nid()
672 void __meminit __init_page_from_nid(unsigned long pfn, int nid) in __init_page_from_nid() argument
682 if (zone_spans_pfn(zone, pfn)) in __init_page_from_nid()
[all …]
/linux/scripts/gdb/linux/
H A Dpage_owner.py56 pfn = int(argv[1])
57 self.read_page_owner_by_addr(self.p_ops.pfn_to_page(pfn))
80 pfn = self.p_ops.page_to_pfn(page)
81 section = self.p_ops.pfn_to_section(pfn)
85 return self.get_entry(page_ext, pfn)
100 pfn = self.p_ops.page_to_pfn(page)
102 if pfn < self.min_pfn or pfn > self.max_pfn or (not self.p_ops.pfn_valid(pfn)):
106 page = self.p_ops.pfn_to_page(pfn)
130 gdb.write("PFN: %d, Flags: 0x%x\n" % (pfn, page['flags']))
145 pfn = self.min_pfn
[all …]
H A Dmm.py138 def pfn_to_section_nr(self, pfn): argument
139 return pfn >> self.PFN_SECTION_SHIFT
144 def __pfn_to_section(self, pfn): argument
145 return self.__nr_to_section(self.pfn_to_section_nr(pfn))
147 def pfn_to_section(self, pfn): argument
148 return self.__pfn_to_section(pfn)
150 def subsection_map_index(self, pfn): argument
151 return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION
153 def pfn_section_valid(self, ms, pfn): argument
155 idx = self.subsection_map_index(pfn)
[all …]
/linux/include/asm-generic/
H A Dmemory_model.h18 #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) argument
26 static inline int pfn_valid(unsigned long pfn) in pfn_valid() argument
30 return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr; in pfn_valid()
35 #define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ argument
36 for ((pfn) = max_t(unsigned long, (start_pfn), ARCH_PFN_OFFSET); \
37 (pfn) < min_t(unsigned long, (end_pfn), \
39 (pfn)++)
46 #define __pfn_to_page(pfn) (vmemmap + (pfn)) argument
60 #define __pfn_to_page(pfn) \ argument
61 ({ unsigned long __pfn = (pfn); \
[all …]
/linux/arch/x86/hyperv/
H A Divm.c486 u64 pfn; member
497 u64 pfn; in hv_list_enc_add() local
501 pfn = pfn_list[i]; in hv_list_enc_add()
506 if ((ent->pfn <= pfn) && (ent->pfn + ent->count - 1 >= pfn)) in hv_list_enc_add()
519 if (ent->pfn + ent->count == pfn) { in hv_list_enc_add()
523 } else if (pfn + 1 == ent->pfn) { in hv_list_enc_add()
525 ent->pfn--; in hv_list_enc_add()
537 ent->pfn = pfn; in hv_list_enc_add()
555 u64 pfn; in hv_list_enc_remove() local
559 pfn = pfn_list[i]; in hv_list_enc_remove()
[all …]
/linux/arch/arm/mach-omap2/
H A Dio.c68 .pfn = __phys_to_pfn(L3_24XX_PHYS),
74 .pfn = __phys_to_pfn(L4_24XX_PHYS),
84 .pfn = __phys_to_pfn(DSP_MEM_2420_PHYS),
90 .pfn = __phys_to_pfn(DSP_IPI_2420_PHYS),
96 .pfn = __phys_to_pfn(DSP_MMU_2420_PHYS),
108 .pfn = __phys_to_pfn(L4_WK_243X_PHYS),
114 .pfn = __phys_to_pfn(OMAP243X_GPMC_PHYS),
120 .pfn = __phys_to_pfn(OMAP243X_SDRC_PHYS),
126 .pfn = __phys_to_pfn(OMAP243X_SMS_PHYS),
138 .pfn = __phys_to_pfn(L3_34XX_PHYS),
[all …]
/linux/include/linux/
H A Dpageblock-flags.h76 #define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages) argument
77 #define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages) argument
78 #define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages) argument
79 #define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages) argument
85 unsigned long pfn);
86 bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
88 void set_pfnblock_bit(const struct page *page, unsigned long pfn,
90 void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
H A Dmmzone.h36 #define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES) argument
102 # define is_migrate_cma_folio(folio, pfn) \ argument
103 (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA)
107 # define is_migrate_cma_folio(folio, pfn) false argument
1122 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) in zone_spans_pfn() argument
1124 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
1838 #define pfn_to_nid(pfn) (0) argument
1862 static inline unsigned long pfn_to_section_nr(unsigned long pfn) in pfn_to_section_nr() argument
1864 return pfn >> PFN_SECTION_SHIFT; in pfn_to_section_nr()
1871 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) argument
[all …]
/linux/arch/x86/virt/svm/
H A Dsev.c672 static struct rmpentry_raw *get_raw_rmpentry(u64 pfn) in get_raw_rmpentry() argument
680 paddr = pfn << PAGE_SHIFT; in get_raw_rmpentry()
701 static int get_rmpentry(u64 pfn, struct rmpentry *e) in get_rmpentry() argument
711 : "a" (pfn << PAGE_SHIFT), "c" (e) in get_rmpentry()
717 e_raw = get_raw_rmpentry(pfn); in get_rmpentry()
737 static int __snp_lookup_rmpentry(u64 pfn, struct rmpentry *e, int *level) in __snp_lookup_rmpentry() argument
745 ret = get_rmpentry(pfn, e); in __snp_lookup_rmpentry()
754 ret = get_rmpentry(pfn & PFN_PMD_MASK, &e_large); in __snp_lookup_rmpentry()
763 int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) in snp_lookup_rmpentry() argument
768 ret = __snp_lookup_rmpentry(pfn, &e, level); in snp_lookup_rmpentry()
[all …]
/linux/arch/riscv/include/asm/
H A Dpgalloc.h21 unsigned long pfn = virt_to_pfn(pte); in pmd_populate_kernel() local
23 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pmd_populate_kernel()
29 unsigned long pfn = virt_to_pfn(page_address(pte)); in pmd_populate() local
31 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pmd_populate()
37 unsigned long pfn = virt_to_pfn(pmd); in pud_populate() local
39 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pud_populate()
45 unsigned long pfn = virt_to_pfn(pud); in p4d_populate() local
47 set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in p4d_populate()
55 unsigned long pfn = virt_to_pfn(pud); in p4d_populate_safe() local
58 __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in p4d_populate_safe()
[all …]
/linux/include/xen/arm/
H A Dpage.h15 #define phys_to_machine_mapping_valid(pfn) (1) argument
43 unsigned long __pfn_to_mfn(unsigned long pfn);
47 static inline unsigned long pfn_to_gfn(unsigned long pfn) in pfn_to_gfn() argument
49 return pfn; in pfn_to_gfn()
58 static inline unsigned long pfn_to_bfn(unsigned long pfn) in pfn_to_bfn() argument
63 mfn = __pfn_to_mfn(pfn); in pfn_to_bfn()
68 return pfn; in pfn_to_bfn()
103 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
104 bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
107 static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) in set_phys_to_machine() argument
[all …]
/linux/arch/arm/mm/
H A Dflush.c38 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) in flush_pfn_alias() argument
43 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); in flush_pfn_alias()
52 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) in flush_icache_alias() argument
58 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); in flush_icache_alias()
98 void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsi… in flush_cache_pages() argument
101 vivt_flush_cache_pages(vma, user_addr, pfn, nr); in flush_cache_pages()
106 flush_pfn_alias(pfn, user_addr); in flush_cache_pages()
115 #define flush_pfn_alias(pfn,vaddr) do { } while (0) argument
116 #define flush_icache_alias(pfn,vaddr,len) do { } while (0) argument
255 unsigned long start, offset, pfn; in __flush_dcache_aliases() local
[all …]
H A Dfault-armv.c37 unsigned long pfn, pte_t *ptep) in do_adjust_pte() argument
52 flush_cache_page(vma, address, pfn); in do_adjust_pte()
53 outer_flush_range((pfn << PAGE_SHIFT), in do_adjust_pte()
54 (pfn << PAGE_SHIFT) + PAGE_SIZE); in do_adjust_pte()
65 unsigned long pfn, bool need_lock) in adjust_pte() argument
114 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte()
125 unsigned long addr, pte_t *ptep, unsigned long pfn) in make_coherent() argument
167 aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock); in make_coherent()
171 do_adjust_pte(vma, addr, pfn, ptep); in make_coherent()
190 unsigned long pfn = pte_pfn(*ptep); in update_mmu_cache_range() local
[all …]
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dmem_protect.c106 unsigned long nr_pages, pfn; in prepare_s2_pool() local
109 pfn = hyp_virt_to_pfn(pgt_pool_base); in prepare_s2_pool()
111 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0); in prepare_s2_pool()
376 static bool pfn_range_is_valid(u64 pfn, u64 nr_pages) in pfn_range_is_valid() argument
380 return pfn < limit && ((limit - pfn) >= nr_pages); in pfn_range_is_valid()
728 int __pkvm_host_share_hyp(u64 pfn) in __pkvm_host_share_hyp() argument
730 u64 phys = hyp_pfn_to_phys(pfn); in __pkvm_host_share_hyp()
754 int __pkvm_host_unshare_hyp(u64 pfn) in __pkvm_host_unshare_hyp() argument
756 u64 phys = hyp_pfn_to_phys(pfn); in __pkvm_host_unshare_hyp()
785 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) in __pkvm_host_donate_hyp() argument
[all …]

12345678910>>...20