Lines Matching defs:pfn
59 * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
60 * non-identity pfn. To protect ourselves against we elect to set (and get) the
127 static inline unsigned p2m_top_index(unsigned long pfn)
129 BUG_ON(pfn >= MAX_P2M_PFN);
130 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
133 static inline unsigned p2m_mid_index(unsigned long pfn)
135 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
170 static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
175 p2m[i] = IDENTITY_FRAME(pfn + i);
209 unsigned long pfn, mfn;
232 for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
233 pfn += P2M_PER_PAGE) {
234 topidx = p2m_top_index(pfn);
235 mididx = p2m_mid_index(pfn);
238 ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
252 pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
287 unsigned long pfn;
292 for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
293 xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
303 static int xen_p2m_elem_type(unsigned long pfn)
307 if (pfn >= xen_p2m_size)
310 mfn = xen_p2m_addr[pfn];
324 unsigned long pfn;
346 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
357 chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
360 type = xen_p2m_elem_type(pfn);
364 if (xen_p2m_elem_type(pfn + i) != type)
373 copy_page(mfns, xen_p2m_addr + pfn);
374 ptep = populate_extra_pte((unsigned long)(p2m + pfn));
384 ptep = populate_extra_pte((unsigned long)(p2m + pfn));
395 (unsigned long)(p2m + pfn) + i * PMD_SIZE);
425 unsigned long get_phys_to_machine(unsigned long pfn)
430 if (unlikely(pfn >= xen_p2m_size)) {
431 if (pfn < xen_max_p2m_pfn)
432 return xen_chk_extra_mem(pfn);
434 return IDENTITY_FRAME(pfn);
437 ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
446 return IDENTITY_FRAME(pfn);
448 return xen_p2m_addr[pfn];
515 * Fully allocate the p2m structure for a given pfn. We need to check
521 int xen_alloc_p2m_entry(unsigned long pfn)
528 unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
542 if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
543 topidx = p2m_top_index(pfn);
587 p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
599 mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
610 if (pfn >= xen_p2m_last_pfn) {
611 xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
622 unsigned long pfn;
633 for (pfn = pfn_s; pfn < pfn_e; pfn++)
634 xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
636 return pfn - pfn_s;
639 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
645 if (unlikely(pfn >= xen_p2m_size))
652 if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
655 ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
662 return mfn == IDENTITY_FRAME(pfn);
667 bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
669 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
672 ret = xen_alloc_p2m_entry(pfn);
676 return __set_phys_to_machine(pfn, mfn);
700 unsigned long mfn, pfn;
716 pfn = page_to_pfn(pages[i]);
718 WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
720 if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
777 unsigned long pfn = page_to_pfn(pages[i]);
780 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
810 unsigned long pfn, mfn, end_pfn;
814 pfn = PFN_DOWN(remap->paddr);
816 while (pfn < end_pfn) {
817 if (!set_phys_to_machine(pfn, mfn))
818 panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n",
819 pfn, mfn);
821 pfn++;
894 [P2M_TYPE_PFN] = "pfn",
896 unsigned long pfn, first_pfn;
902 for (pfn = 0; pfn < xen_p2m_size; pfn++) {
903 type = xen_p2m_elem_type(pfn);
905 seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
908 first_pfn = pfn;
911 seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,