Home
last modified time | relevance | path

Searched refs:pfns (Results 1 – 21 of 21) sorted by relevance

/linux/drivers/iommu/iommufd/
H A Dpages.c277 unsigned long *pfns; member
290 batch->pfns[0] = 0; in batch_clear()
308 batch->pfns[0] = batch->pfns[batch->end - 1] + in batch_clear_carry()
321 batch->pfns[0] += skip_pfns; in batch_skip_carry()
329 const size_t elmsz = sizeof(*batch->pfns) + sizeof(*batch->npfns); in __batch_init()
332 batch->pfns = temp_kmalloc(&size, backup, backup_len); in __batch_init()
333 if (!batch->pfns) in __batch_init()
338 batch->npfns = (u32 *)(batch->pfns + batch->array_size); in __batch_init()
356 if (batch->pfns != backup) in batch_destroy()
357 kfree(batch->pfns); in batch_destroy()
[all …]
/linux/drivers/xen/
H A Dxlate_mmu.c193 xen_pfn_t *pfns; member
201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
218 xen_pfn_t *pfns; in xen_xlate_map_ballooned_pages() local
230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages()
231 if (!pfns) { in xen_xlate_map_ballooned_pages()
240 kfree(pfns); in xen_xlate_map_ballooned_pages()
244 data.pfns = pfns; in xen_xlate_map_ballooned_pages()
254 kfree(pfns); in xen_xlate_map_ballooned_pages()
259 *gfns = pfns; in xen_xlate_map_ballooned_pages()
H A Dprivcmd.c741 xen_pfn_t *pfns = NULL; in privcmd_ioctl_mmap_resource() local
776 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); in privcmd_ioctl_mmap_resource()
777 if (!pfns) { in privcmd_ioctl_mmap_resource()
797 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_resource()
804 set_xen_guest_handle(xdata.frame_list, pfns); in privcmd_ioctl_mmap_resource()
819 int num, *errs = (int *)pfns; in privcmd_ioctl_mmap_resource()
821 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns)); in privcmd_ioctl_mmap_resource()
824 pfns, kdata.num, errs, in privcmd_ioctl_mmap_resource()
843 kfree(pfns); in privcmd_ioctl_mmap_resource()
/linux/mm/
H A Dhmm.c777 unsigned long *pfns = map->pfn_list; in hmm_dma_map_pfn() local
778 struct page *page = hmm_pfn_to_page(pfns[idx]); in hmm_dma_map_pfn()
779 phys_addr_t paddr = hmm_pfn_to_phys(pfns[idx]); in hmm_dma_map_pfn()
785 if ((pfns[idx] & HMM_PFN_DMA_MAPPED) && in hmm_dma_map_pfn()
786 !(pfns[idx] & HMM_PFN_P2PDMA_BUS)) { in hmm_dma_map_pfn()
815 pfns[idx] |= HMM_PFN_P2PDMA; in hmm_dma_map_pfn()
818 pfns[idx] |= HMM_PFN_P2PDMA_BUS | HMM_PFN_DMA_MAPPED; in hmm_dma_map_pfn()
851 pfns[idx] |= HMM_PFN_DMA_MAPPED; in hmm_dma_map_pfn()
854 pfns[idx] &= ~HMM_PFN_P2PDMA; in hmm_dma_map_pfn()
873 unsigned long *pfns = map->pfn_list; in hmm_dma_unmap_pfn() local
[all …]
H A Dvmalloc.c3563 unsigned long *pfns; member
3571 unsigned long pfn = data->pfns[data->idx]; in vmap_pfn_apply()
3593 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) in vmap_pfn() argument
3595 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; in vmap_pfn()
/linux/tools/mm/
H A Dthpmaps229 def thp_parse(vma, kpageflags, ranges, indexes, vfns, pfns, anons, heads):
399 pfns = pmes & PM_PFN_MASK
400 pfns = pfns[present]
404 pfn_vec = cont_ranges_all([pfns], [pfns])[0]
411 pfns = pfns[thps]
416 ranges = cont_ranges_all([vfns, pfns], [indexes, vfns, pfns])
418 thpstats = thp_parse(vma, kpageflags, ranges, indexes, vfns, pfns, anons, heads)
/linux/drivers/hv/
H A Dmshv_regions.c413 unsigned long *pfns; in mshv_region_range_fault() local
417 pfns = kmalloc_array(page_count, sizeof(*pfns), GFP_KERNEL); in mshv_region_range_fault()
418 if (!pfns) in mshv_region_range_fault()
421 range.hmm_pfns = pfns; in mshv_region_range_fault()
433 region->pages[page_offset + i] = hmm_pfn_to_page(pfns[i]); in mshv_region_range_fault()
440 kfree(pfns); in mshv_region_range_fault()
H A Dmshv_root_hv_call.c513 input->data[i].pfns = page_to_pfn(pages[i]); in hv_call_set_vp_state()
/linux/drivers/virtio/
H A Dvirtio_balloon.c108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; member
188 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); in tell_host()
227 __virtio32 pfns[], struct page *page) in set_page_pfns() argument
238 pfns[i] = cpu_to_virtio32(vb->vdev, in set_page_pfns()
250 num = min(num, ARRAY_SIZE(vb->pfns)); in fill_balloon()
275 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in fill_balloon()
314 num = min(num, ARRAY_SIZE(vb->pfns)); in leak_balloon()
324 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in leak_balloon()
863 set_page_pfns(vb, vb->pfns, newpage); in virtballoon_migratepage()
868 set_page_pfns(vb, vb->pfns, page); in virtballoon_migratepage()
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_svm.h34 void nouveau_pfns_free(u64 *pfns);
36 unsigned long addr, u64 *pfns, unsigned long npages,
H A Dnouveau_dmem.c785 struct nouveau_dmem_dma_info *dma_info, u64 *pfns) in nouveau_dmem_migrate_chunk() argument
795 args->src[i], dma_info + nr_dma, pfns + i); in nouveau_dmem_migrate_chunk()
812 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i, order); in nouveau_dmem_migrate_chunk()
838 u64 *pfns; in nouveau_dmem_migrate_vma() local
862 pfns = nouveau_pfns_alloc(max); in nouveau_dmem_migrate_vma()
863 if (!pfns) in nouveau_dmem_migrate_vma()
878 pfns); in nouveau_dmem_migrate_vma()
884 nouveau_pfns_free(pfns); in nouveau_dmem_migrate_vma()
H A Dnouveau_svm.c893 nouveau_pfns_to_args(void *pfns) in nouveau_pfns_to_args() argument
895 return container_of(pfns, struct nouveau_pfnmap_args, p.phys); in nouveau_pfns_to_args()
915 nouveau_pfns_free(u64 *pfns) in nouveau_pfns_free() argument
917 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_free()
924 unsigned long addr, u64 *pfns, unsigned long npages, in nouveau_pfns_map() argument
927 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_map()
/linux/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c230 unsigned long *pfns; member
255 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); in kvmppc_uvmem_slot_init()
256 if (!p->pfns) { in kvmppc_uvmem_slot_init()
280 vfree(p->pfns); in kvmppc_uvmem_slot_free()
299 p->pfns[index] = uvmem_pfn | flag; in kvmppc_mark_gfn()
301 p->pfns[index] = flag; in kvmppc_mark_gfn()
342 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) { in kvmppc_gfn_is_uvmem_pfn()
344 *uvmem_pfn = p->pfns[index] & in kvmppc_gfn_is_uvmem_pfn()
382 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) { in kvmppc_next_nontransitioned_gfn()
/linux/lib/
H A Dtest_hmm.c212 unsigned long *pfns = range->hmm_pfns; in dmirror_do_fault() local
217 pfn++, pfns++) { in dmirror_do_fault()
225 WARN_ON(*pfns & HMM_PFN_ERROR); in dmirror_do_fault()
226 WARN_ON(!(*pfns & HMM_PFN_VALID)); in dmirror_do_fault()
228 page = hmm_pfn_to_page(*pfns); in dmirror_do_fault()
232 if (*pfns & HMM_PFN_WRITE) in dmirror_do_fault()
335 unsigned long pfns[32]; in dmirror_fault() local
338 .hmm_pfns = pfns, in dmirror_fault()
352 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault()
1330 unsigned long pfns[32]; in dmirror_snapshot() local
[all …]
/linux/Documentation/translations/zh_CN/mm/
H A Dmemory-model.rst115 为给定的pfns范围执行足够的内存热插拔来开启 :c:func:`pfn_to_page`,
/linux/include/xen/
H A Dxen-ops.h188 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
/linux/include/linux/
H A Dvmalloc.h219 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
/linux/arch/s390/mm/
H A Dgmap.c2321 unsigned long pfns[GATHER_GET_PAGES]; member
2333 p->pfns[p->count] = phys_to_pfn(pte_val(pte)); in s390_gather_pages()
2349 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns) in s390_uv_destroy_pfns() argument
2355 folio = pfn_folio(pfns[i]); in s390_uv_destroy_pfns()
2391 s390_uv_destroy_pfns(state.count, state.pfns); in __s390_uv_destroy_range()
/linux/include/hyperv/
H A Dhvhdk.h738 u64 pfns; member
/linux/Documentation/mm/
H A Dmemory-model.rst147 :c:func:`get_user_pages` service for the given range of pfns. Since the
/linux/Documentation/virt/kvm/x86/
H A Dmmu.rst55 spte shadow pte (referring to pfns)