/linux/drivers/iommu/iommufd/ |
H A D | pages.c | 269 unsigned long *pfns; member 280 batch->pfns[0] = 0; in batch_clear() 298 batch->pfns[0] = batch->pfns[batch->end - 1] + in batch_clear_carry() 311 batch->pfns[0] += skip_pfns; in batch_skip_carry() 319 const size_t elmsz = sizeof(*batch->pfns) + sizeof(*batch->npfns); in __batch_init() 322 batch->pfns = temp_kmalloc(&size, backup, backup_len); in __batch_init() 323 if (!batch->pfns) in __batch_init() 328 batch->npfns = (u32 *)(batch->pfns + batch->array_size); in __batch_init() 346 if (batch->pfns != backup) in batch_destroy() 347 kfree(batch->pfns); in batch_destroy() [all …]
|
/linux/drivers/xen/ |
H A D | xlate_mmu.c | 193 xen_pfn_t *pfns; member 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn() 218 xen_pfn_t *pfns; in xen_xlate_map_ballooned_pages() local 230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() 231 if (!pfns) { in xen_xlate_map_ballooned_pages() 240 kfree(pfns); in xen_xlate_map_ballooned_pages() 244 data.pfns = pfns; in xen_xlate_map_ballooned_pages() 254 kfree(pfns); in xen_xlate_map_ballooned_pages() 259 *gfns = pfns; in xen_xlate_map_ballooned_pages()
|
H A D | privcmd.c | 741 xen_pfn_t *pfns = NULL; in privcmd_ioctl_mmap_resource() local 776 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); in privcmd_ioctl_mmap_resource() 777 if (!pfns) { in privcmd_ioctl_mmap_resource() 798 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_resource() 805 set_xen_guest_handle(xdata.frame_list, pfns); in privcmd_ioctl_mmap_resource() 821 int num, *errs = (int *)pfns; in privcmd_ioctl_mmap_resource() 823 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns)); in privcmd_ioctl_mmap_resource() 826 pfns, kdata.num, errs, in privcmd_ioctl_mmap_resource() 845 kfree(pfns); in privcmd_ioctl_mmap_resource()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_hmm.c | 177 unsigned long *pfns; in amdgpu_hmm_range_get_pages() local 184 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in amdgpu_hmm_range_get_pages() 185 if (unlikely(!pfns)) { in amdgpu_hmm_range_get_pages() 194 hmm_range->hmm_pfns = pfns; in amdgpu_hmm_range_get_pages() 223 hmm_range->hmm_pfns = pfns; in amdgpu_hmm_range_get_pages() 231 pages[i] = hmm_pfn_to_page(pfns[i]); in amdgpu_hmm_range_get_pages() 238 kvfree(pfns); in amdgpu_hmm_range_get_pages()
|
H A D | amdgpu_umc.h | 163 uint64_t pa_addr, uint64_t *pfns, int len);
|
H A D | amdgpu_umc.c | 467 uint64_t pa_addr, uint64_t *pfns, int len) in amdgpu_umc_lookup_bad_pages_in_a_row() argument 487 pfns[i] = err_data.err_addr[i].retired_page; in amdgpu_umc_lookup_bad_pages_in_a_row()
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_hmm.c | 238 unsigned long *pfns; in xe_hmm_userptr_populate_range() local 273 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in xe_hmm_userptr_populate_range() 274 if (unlikely(!pfns)) in xe_hmm_userptr_populate_range() 285 hmm_range.hmm_pfns = pfns; in xe_hmm_userptr_populate_range() 335 kvfree(pfns); in xe_hmm_userptr_populate_range() 343 kvfree(pfns); in xe_hmm_userptr_populate_range()
|
/linux/tools/mm/ |
H A D | thpmaps | 229 def thp_parse(vma, kpageflags, ranges, indexes, vfns, pfns, anons, heads): 399 pfns = pmes & PM_PFN_MASK 400 pfns = pfns[present] 404 pfn_vec = cont_ranges_all([pfns], [pfns])[0] 411 pfns = pfns[thps] 416 ranges = cont_ranges_all([vfns, pfns], [indexes, vfns, pfns]) 418 thpstats = thp_parse(vma, kpageflags, ranges, indexes, vfns, pfns, anons, heads)
|
/linux/drivers/dma-buf/ |
H A D | udmabuf.c | 112 unsigned long *pfns; in vmap_udmabuf() local 122 pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); in vmap_udmabuf() 123 if (!pfns) in vmap_udmabuf() 130 pfns[pg] = pfn; in vmap_udmabuf() 133 vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); in vmap_udmabuf() 134 kvfree(pfns); in vmap_udmabuf()
|
/linux/drivers/virtio/ |
H A D | virtio_balloon.c | 106 /* The array of pfns we tell the Host about. */ 108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; member 188 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); in tell_host() 227 __virtio32 pfns[], struct page *page) in set_page_pfns() argument 234 * Set balloon pfns pointing at this page. in set_page_pfns() 238 pfns[i] = cpu_to_virtio32(vb->vdev, in set_page_pfns() 250 num = min(num, ARRAY_SIZE(vb->pfns)); in fill_balloon() 275 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in fill_balloon() 314 num = min(num, ARRAY_SIZE(vb->pfns)); in leak_balloon() [all...] |
/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_svm.h | 34 void nouveau_pfns_free(u64 *pfns); 36 unsigned long addr, u64 *pfns, unsigned long npages);
|
H A D | nouveau_dmem.c | 665 dma_addr_t *dma_addrs, u64 *pfns) in nouveau_dmem_migrate_chunk() argument 672 args->src[i], dma_addrs + nr_dma, pfns + i); in nouveau_dmem_migrate_chunk() 681 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i); in nouveau_dmem_migrate_chunk() 707 u64 *pfns; in nouveau_dmem_migrate_vma() local 724 pfns = nouveau_pfns_alloc(max); in nouveau_dmem_migrate_vma() 725 if (!pfns) in nouveau_dmem_migrate_vma() 740 pfns); in nouveau_dmem_migrate_vma() 746 nouveau_pfns_free(pfns); in nouveau_dmem_migrate_vma()
|
H A D | nouveau_svm.c | 897 nouveau_pfns_to_args(void *pfns) in nouveau_pfns_to_args() argument 899 return container_of(pfns, struct nouveau_pfnmap_args, p.phys); in nouveau_pfns_to_args() 919 nouveau_pfns_free(u64 *pfns) in nouveau_pfns_free() argument 921 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_free() 928 unsigned long addr, u64 *pfns, unsigned long npages) in nouveau_pfns_map() argument 930 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_map()
|
/linux/drivers/accel/amdxdna/ |
H A D | amdxdna_gem.c | 124 kvfree(abo->mem.pfns); in amdxdna_hmm_unregister() 125 abo->mem.pfns = NULL; in amdxdna_hmm_unregister() 138 if (abo->mem.pfns) in amdxdna_hmm_register() 142 abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns), in amdxdna_hmm_register() 144 if (!abo->mem.pfns) in amdxdna_hmm_register() 154 kvfree(abo->mem.pfns); in amdxdna_hmm_register()
|
H A D | amdxdna_gem.h | 17 unsigned long *pfns; member
|
H A D | aie2_ctx.c | 763 range.hmm_pfns = abo->mem.pfns; in aie2_populate_range()
|
/linux/lib/ |
H A D | test_hmm.c | 209 unsigned long *pfns = range->hmm_pfns; in dmirror_do_fault() local 214 pfn++, pfns++) { in dmirror_do_fault() 222 WARN_ON(*pfns & HMM_PFN_ERROR); in dmirror_do_fault() 223 WARN_ON(!(*pfns & HMM_PFN_VALID)); in dmirror_do_fault() 225 page = hmm_pfn_to_page(*pfns); in dmirror_do_fault() 229 if (*pfns & HMM_PFN_WRITE) in dmirror_do_fault() 332 unsigned long pfns[64]; in dmirror_fault() local 335 .hmm_pfns = pfns, in dmirror_fault() 349 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault() 1169 unsigned long pfns[64]; in dmirror_snapshot() local [all …]
|
/linux/Documentation/translations/zh_CN/mm/ |
H A D | memory-model.rst | 115 为给定的pfns范围执行足够的内存热插拔来开启 :c:func:`pfn_to_page`,
|
/linux/arch/s390/include/asm/ |
H A D | gmap.h | 139 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
|
/linux/include/xen/ |
H A D | xen-ops.h | 189 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
|
/linux/arch/s390/mm/ |
H A D | gmap.c | 2513 unsigned long pfns[GATHER_GET_PAGES]; member 2525 p->pfns[p->count] = phys_to_pfn(pte_val(pte)); in s390_gather_pages() 2541 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns) in s390_uv_destroy_pfns() argument 2547 folio = pfn_folio(pfns[i]); in s390_uv_destroy_pfns() 2583 s390_uv_destroy_pfns(state.count, state.pfns); in __s390_uv_destroy_range()
|
/linux/Documentation/mm/ |
H A D | hmm.rst | 199 /* Use pfns array content to update device page table, 216 for each entry in the pfns array. 232 range->pfns[index_of_write] = HMM_PFN_REQ_WRITE;
|
H A D | memory-model.rst | 147 :c:func:`get_user_pages` service for the given range of pfns. Since the
|
/linux/mm/ |
H A D | vmalloc.c | 3480 unsigned long *pfns; member 3488 unsigned long pfn = data->pfns[data->idx]; in vmap_pfn_apply() 3510 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) in vmap_pfn() argument 3512 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; in vmap_pfn()
|
/linux/Documentation/virt/kvm/x86/ |
H A D | mmu.rst | 55 spte shadow pte (referring to pfns)
|