| /linux/tools/testing/selftests/vfio/lib/ |
| H A D | iommu.c | 152 static int __vfio_iommu_unmap(int fd, u64 iova, u64 size, u32 flags, u64 *unmapped) in __vfio_iommu_unmap() argument 164 if (unmapped) in __vfio_iommu_unmap() 165 *unmapped = args.size; in __vfio_iommu_unmap() 171 u64 *unmapped) in vfio_iommu_unmap() argument 174 region->size, 0, unmapped); in vfio_iommu_unmap() 177 static int __iommufd_unmap(int fd, u64 iova, u64 length, u32 ioas_id, u64 *unmapped) in __iommufd_unmap() argument 189 if (unmapped) in __iommufd_unmap() 190 *unmapped = args.length; in __iommufd_unmap() 196 u64 *unmapped) in iommufd_unmap() argument 199 iommu->ioas_id, unmapped); in iommufd_unmap() [all …]
|
| /linux/tools/testing/selftests/vfio/ |
| H A D | vfio_dma_mapping_test.c | 141 u64 unmapped; in TEST_F() local 197 rc = __iommu_unmap(self->iommu, ®ion, &unmapped); in TEST_F() 199 ASSERT_EQ(unmapped, region.size); in TEST_F() 267 u64 unmapped; in TEST_F() local 273 rc = __iommu_unmap(self->iommu, region, &unmapped); in TEST_F() 275 ASSERT_EQ(unmapped, region->size); in TEST_F() 281 u64 unmapped; in TEST_F() local 287 rc = __iommu_unmap_all(self->iommu, &unmapped); in TEST_F() 289 ASSERT_EQ(unmapped, region->size); in TEST_F()
|
| /linux/include/trace/events/ |
| H A D | huge_memory.h | 57 int referenced, int none_or_zero, int status, int unmapped), 59 TP_ARGS(mm, folio, referenced, none_or_zero, status, unmapped), 67 __field(int, unmapped) 76 __entry->unmapped = unmapped; 85 __entry->unmapped)
|
| /linux/tools/testing/selftests/vfio/lib/include/libvfio/ |
| H A D | iommu.h | 45 int __iommu_unmap(struct iommu *iommu, struct dma_region *region, u64 *unmapped); 52 int __iommu_unmap_all(struct iommu *iommu, u64 *unmapped);
|
| /linux/drivers/staging/media/ipu3/ |
| H A D | ipu3-mmu.c | 383 size_t unmapped_page, unmapped = 0; in imgu_mmu_unmap() local 402 while (unmapped < size) { in imgu_mmu_unmap() 411 unmapped += unmapped_page; in imgu_mmu_unmap() 416 return unmapped; in imgu_mmu_unmap()
|
| /linux/drivers/vfio/ |
| H A D | vfio_iommu_type1.c | 1099 size_t unmapped = 0; in unmap_unpin_fast() local 1103 unmapped = iommu_unmap_fast(domain->domain, iova, len, in unmap_unpin_fast() 1106 if (!unmapped) { in unmap_unpin_fast() 1111 entry->len = unmapped; in unmap_unpin_fast() 1122 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { in unmap_unpin_fast() 1128 return unmapped; in unmap_unpin_fast() 1136 size_t unmapped = iommu_unmap(domain->domain, iova, len); in unmap_unpin_slow() local 1138 if (unmapped) { in unmap_unpin_slow() 1141 unmapped >> PAGE_SHIFT, in unmap_unpin_slow() 1145 return unmapped; in unmap_unpin_slow() [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_iommu.c | 110 ssize_t unmapped; in msm_iommu_pagetable_unmap() local 114 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap() 115 if (unmapped <= 0) { in msm_iommu_pagetable_unmap() 122 unmapped = PAGE_SIZE; in msm_iommu_pagetable_unmap() 125 iova += unmapped; in msm_iommu_pagetable_unmap() 126 size -= unmapped; in msm_iommu_pagetable_unmap()
|
| /linux/drivers/iommu/ |
| H A D | virtio-iommu.c | 364 size_t unmapped = 0; in viommu_del_mappings() local 384 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings() 391 return unmapped; in viommu_del_mappings() 891 size_t unmapped; in viommu_unmap_pages() local 896 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages() 897 if (unmapped < size) in viommu_unmap_pages() 902 return unmapped; in viommu_unmap_pages() 908 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap_pages() 912 return ret ? 0 : unmapped; in viommu_unmap_pages()
|
| H A D | dma-iommu.c | 820 size_t unmapped; in __iommu_dma_unmap() local 827 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); in __iommu_dma_unmap() 828 WARN_ON(unmapped != size); in __iommu_dma_unmap() 2032 size_t unmapped; in __iommu_dma_iova_unlink() local 2044 unmapped = iommu_unmap_fast(domain, addr, size, &iotlb_gather); in __iommu_dma_iova_unlink() 2045 WARN_ON(unmapped != size); in __iommu_dma_iova_unlink()
|
| /linux/mm/ |
| H A D | migrate_device.c | 255 unsigned long addr = start, unmapped = 0; in migrate_vma_collect_pmd() local 322 if (unmapped) in migrate_vma_collect_pmd() 365 if (unmapped) in migrate_vma_collect_pmd() 473 unmapped++; in migrate_vma_collect_pmd() 485 if (unmapped) in migrate_vma_collect_pmd() 574 unsigned long unmapped = 0; in migrate_device_unmap() local 585 unmapped++; in migrate_device_unmap() 629 unmapped++; in migrate_device_unmap() 651 return unmapped; in migrate_device_unmap()
|
| H A D | khugepaged.c | 1092 int referenced, int unmapped, in collapse_huge_page() argument 1132 if (unmapped) { in collapse_huge_page() 1260 int node = NUMA_NO_NODE, unmapped = 0; in hpage_collapse_scan_pmd() local 1292 ++unmapped; in hpage_collapse_scan_pmd() 1294 unmapped <= khugepaged_max_ptes_swap) { in hpage_collapse_scan_pmd() 1399 (unmapped && referenced < HPAGE_PMD_NR / 2))) { in hpage_collapse_scan_pmd() 1408 unmapped, cc); in hpage_collapse_scan_pmd() 1414 none_or_zero, result, unmapped); in hpage_collapse_scan_pmd()
|
| /linux/drivers/iommu/iommufd/ |
| H A D | vfio_compat.c | 209 unsigned long unmapped = 0; in iommufd_vfio_unmap_dma() local 228 rc = iopt_unmap_all(&ioas->iopt, &unmapped); in iommufd_vfio_unmap_dma() 245 &unmapped); in iommufd_vfio_unmap_dma() 247 unmap.size = unmapped; in iommufd_vfio_unmap_dma()
|
| H A D | ioas.c | 344 unsigned long unmapped = 0; in iommufd_ioas_unmap() local 352 rc = iopt_unmap_all(&ioas->iopt, &unmapped); in iommufd_ioas_unmap() 361 &unmapped); in iommufd_ioas_unmap() 364 if (!unmapped) { in iommufd_ioas_unmap() 370 cmd->length = unmapped; in iommufd_ioas_unmap()
|
| H A D | iommufd_private.h | 120 unsigned long length, unsigned long *unmapped); 121 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
|
| /linux/Documentation/features/vm/TLB/ |
| H A D | arch-support.txt | 4 # description: arch supports deferral of TLB flush until multiple pages are unmapped
|
| /linux/drivers/iommu/riscv/ |
| H A D | iommu.c | 1254 size_t unmapped = 0; in riscv_iommu_unmap_pages() local 1257 while (unmapped < size) { in riscv_iommu_unmap_pages() 1260 return unmapped; in riscv_iommu_unmap_pages() 1264 return unmapped; in riscv_iommu_unmap_pages() 1274 unmapped += pte_size; in riscv_iommu_unmap_pages() 1277 return unmapped; in riscv_iommu_unmap_pages()
|
| /linux/Documentation/userspace-api/ |
| H A D | tee.rst | 16 any longer it should be unmapped with munmap() to allow the reuse of
|
| /linux/Documentation/networking/device_drivers/ethernet/marvell/ |
| H A D | octeontx2.rst | 173 - Error due to operation of unmapped PF. 187 - Error due to unmapped slot. 237 - Receive packet on an unmapped PF. 249 - Error due to unmapped slot. 291 Rx on unmapped PF_FUNC
|
| /linux/arch/arm64/kvm/hyp/ |
| H A D | pgtable.c | 461 u64 *unmapped = ctx->arg; in hyp_unmap_walker() local 483 *unmapped += granule; in hyp_unmap_walker() 498 u64 unmapped = 0; in kvm_pgtable_hyp_unmap() local 501 .arg = &unmapped, in kvm_pgtable_hyp_unmap() 509 return unmapped; in kvm_pgtable_hyp_unmap()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-class-rnbd-server | 32 When the device is unmapped by that client, the directory will be removed.
|
| H A D | sysfs-block-rnbd | 9 is using the device. When "force" is used, the device is also unmapped
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_umsch_mm.h | 112 uint32_t unmapped; member
|
| /linux/Documentation/userspace-api/media/v4l/ |
| H A D | vidioc-reqbufs.rst | 67 orphaned and will be freed when they are unmapped or when the exported DMABUF 149 when they are unmapped or when the exported DMABUF fds are closed.
|
| /linux/drivers/dax/ |
| H A D | Kconfig | 56 instance is created to access that unmapped-by-default address range.
|
| /linux/Documentation/mm/damon/ |
| H A D | design.rst | 96 mapped to the physical memory and accessed. Thus, tracking the unmapped 100 cases. That said, too huge unmapped areas inside the monitoring target should 105 gaps between the three regions are the two biggest unmapped areas in the given 106 address space. The two biggest unmapped areas would be the gap between the 268 virtual memory could be dynamically mapped and unmapped. Physical memory could 696 - unmapped 697 - Applied to pages that unmapped.
|