| /linux/drivers/iommu/iommufd/ |
| H A D | iova_bitmap.c | 36 unsigned long iova; member 115 unsigned long iova; member 131 unsigned long iova) in iova_bitmap_offset_to_index() 133 return (iova >> bitmap->mapped.pgshift) / in iova_bitmap_offset_to_index() 155 return bitmap->iova + iova_bitmap_index_to_offset(bitmap, skip); in iova_bitmap_mapped_iova() 202 mapped->iova = iova_bitmap_mapped_iova(bitmap); in iova_bitmap_get() 243 struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, in iova_bitmap_alloc() argument 259 bitmap->iova = iova; in iova_bitmap_alloc() 261 mapped->iova = iova; in iova_bitmap_alloc() 320 unsigned long max_iova = bitmap->iova + bitmap->length - 1; in iova_bitmap_mapped_length() [all …]
|
| /linux/include/trace/events/ |
| H A D | iommu.h | 81 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 83 TP_ARGS(iova, paddr, size), 86 __field(u64, iova) 92 __entry->iova = iova; 97 TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu", 98 __entry->iova, __entry->iova + __entry->size, __entry->paddr, 105 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), 107 TP_ARGS(iova, siz [all...] |
| /linux/drivers/iommu/ |
| H A D | io-pgtable-arm-selftests.c | 26 static void dummy_tlb_flush(unsigned long iova, size_t size, in dummy_tlb_flush() argument 34 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument 37 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page() 59 unsigned long iova; in arm_lpae_run_tests() local 87 iova = 0; in arm_lpae_run_tests() 91 if (ops->map_pages(ops, iova, iova, size, 1, in arm_lpae_run_tests() 98 if (!ops->map_pages(ops, iova, iova + size, size, 1, in arm_lpae_run_tests() 103 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_lpae_run_tests() 106 iova += SZ_1G; in arm_lpae_run_tests() 110 iova = 0; in arm_lpae_run_tests() [all …]
|
| H A D | sun50i-iommu.c | 166 static u32 sun50i_iova_get_dte_index(dma_addr_t iova) in sun50i_iova_get_dte_index() argument 168 return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova); in sun50i_iova_get_dte_index() 171 static u32 sun50i_iova_get_pte_index(dma_addr_t iova) in sun50i_iova_get_pte_index() argument 173 return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova); in sun50i_iova_get_pte_index() 176 static u32 sun50i_iova_get_page_offset(dma_addr_t iova) in sun50i_iova_get_page_offset() argument 178 return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova); in sun50i_iova_get_page_offset() 302 unsigned long iova) in sun50i_iommu_zap_iova() argument 307 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova); in sun50i_iommu_zap_iova() 319 unsigned long iova) in sun50i_iommu_zap_ptw_cache() argument 324 iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova); in sun50i_iommu_zap_ptw_cache() [all …]
|
| H A D | exynos-iommu.c | 101 #define section_offs(iova) (iova & (SECT_SIZE - 1)) argument 103 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) argument 105 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) argument 110 static u32 lv1ent_offset(sysmmu_iova_t iova) in lv1ent_offset() argument 112 return iova >> SECT_ORDER; in lv1ent_offset() 115 static u32 lv2ent_offset(sysmmu_iova_t iova) in lv2ent_offset() argument 117 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset() 181 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) in section_entry() argument 183 return pgtable + lv1ent_offset(iova); in section_entry() 186 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) in page_entry() argument [all …]
|
| H A D | tegra-smmu.c | 169 static unsigned int iova_pd_index(unsigned long iova) in iova_pd_index() argument 171 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); in iova_pd_index() 174 static unsigned int iova_pt_index(unsigned long iova) in iova_pt_index() argument 176 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); in iova_pt_index() 236 unsigned long iova) in smmu_flush_tlb_section() argument 245 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); in smmu_flush_tlb_section() 251 unsigned long iova) in smmu_flush_tlb_group() argument 260 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); in smmu_flush_tlb_group() 559 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pde() argument 562 unsigned int pd_index = iova_pd_index(iova); in tegra_smmu_set_pde() [all …]
|
| H A D | virtio-iommu.c | 59 struct interval_tree_node iova; member 330 static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end, in viommu_add_mapping() argument 341 mapping->iova.start = iova; in viommu_add_mapping() 342 mapping->iova.last = end; in viommu_add_mapping() 346 interval_tree_insert(&mapping->iova, &vdomain->mappings); in viommu_add_mapping() 362 u64 iova, u64 end) in viommu_del_mappings() argument 370 next = interval_tree_iter_first(&vdomain->mappings, iova, end); in viommu_del_mappings() 373 mapping = container_of(node, struct viommu_mapping, iova); in viommu_del_mappings() 374 next = interval_tree_iter_next(node, iova, end); in viommu_del_mappings() 377 if (mapping->iova.start < iova) in viommu_del_mappings() [all …]
|
| H A D | rockchip-iommu.c | 322 static u32 rk_iova_dte_index(dma_addr_t iova) in rk_iova_dte_index() argument 324 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; in rk_iova_dte_index() 327 static u32 rk_iova_pte_index(dma_addr_t iova) in rk_iova_pte_index() argument 329 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; in rk_iova_pte_index() 332 static u32 rk_iova_page_offset(dma_addr_t iova) in rk_iova_page_offset() argument 334 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; in rk_iova_page_offset() 369 dma_addr_t iova; in rk_iommu_zap_lines() local 371 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) in rk_iommu_zap_lines() 372 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines() 535 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) in log_iova() argument [all …]
|
| /linux/drivers/staging/media/ipu3/ |
| H A D | ipu3-dmamap.c | 102 struct iova *iova; in imgu_dmamap_alloc() local 107 iova = alloc_iova(&imgu->iova_domain, size >> shift, in imgu_dmamap_alloc() 109 if (!iova) in imgu_dmamap_alloc() 117 iovaddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 133 map->daddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 142 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova), in imgu_dmamap_alloc() 146 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 153 struct iova *iova; in imgu_dmamap_unmap() local 155 iova = find_iova(&imgu->iova_domain, in imgu_dmamap_unmap() 157 if (WARN_ON(!iova)) in imgu_dmamap_unmap() [all …]
|
| H A D | ipu3-mmu.c | 154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, in address_to_pte_idx() argument 157 iova >>= IPU3_PAGE_SHIFT; in address_to_pte_idx() 160 *l2pt_idx = iova & IPU3_L2PT_MASK; in address_to_pte_idx() 162 iova >>= IPU3_L2PT_SHIFT; in address_to_pte_idx() 165 *l1pt_idx = iova & IPU3_L1PT_MASK; in address_to_pte_idx() 210 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, in __imgu_mmu_map() argument 220 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); in __imgu_mmu_map() 251 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova, in imgu_mmu_map() argument 262 if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) { in imgu_mmu_map() 264 iova, &paddr, size); in imgu_mmu_map() [all …]
|
| /linux/drivers/vfio/ |
| H A D | vfio_iommu_type1.c | 90 dma_addr_t iova; /* Device address */ member 130 dma_addr_t iova; /* Device address */ member 137 dma_addr_t iova; member 176 if (start + size - 1 < dma->iova) in vfio_find_dma() 178 else if (start > dma->iova + dma->size - 1) in vfio_find_dma() 200 if (start <= dma->iova + dma->size - 1) { in vfio_find_dma_first_node() 203 if (start >= dma->iova) in vfio_find_dma_first_node() 210 if (res && dma_res->iova > end) in vfio_find_dma_first_node() 226 if (new->iova <= dma->iova) in vfio_link_dma() 276 bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1); in vfio_dma_populate_bitmap() [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_odp.c | 80 u64 iova, int access_flags, struct rxe_mr *mr) in rxe_odp_mr_init_user() argument 91 if (iova != 0) in rxe_odp_mr_init_user() 112 mr->ibmr.iova = iova; in rxe_odp_mr_init_user() 127 static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova, in rxe_check_pagefault() argument 134 addr = iova & (~(BIT(umem_odp->page_shift) - 1)); in rxe_check_pagefault() 137 while (addr < iova + length) { in rxe_check_pagefault() 150 static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova) in rxe_odp_iova_to_index() argument 152 return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in rxe_odp_iova_to_index() 155 static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova) in rxe_odp_iova_to_page_offset() argument 157 return iova & (BIT(umem_odp->page_shift) - 1); in rxe_odp_iova_to_page_offset() [all …]
|
| H A D | rxe_mr.c | 27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) in mr_check_range() argument 35 if (iova < mr->ibmr.iova || in mr_check_range() 36 iova + length > mr->ibmr.iova + mr->ibmr.length) { in mr_check_range() 75 static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) in rxe_mr_iova_to_index() argument 77 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); in rxe_mr_iova_to_index() 80 static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) in rxe_mr_iova_to_page_offset() argument 82 return iova & (mr_page_size(mr) - 1); in rxe_mr_iova_to_page_offset() 240 mr->page_offset = mr->ibmr.iova & (page_size - 1); in rxe_map_mr_sg() 245 static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, in rxe_mr_copy_xarray() argument 248 unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova); in rxe_mr_copy_xarray() [all …]
|
| /linux/drivers/media/pci/intel/ipu6/ |
| H A D | ipu6-mmu.c | 99 u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT; in page_table_dump() local 108 l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys); in page_table_dump() 112 u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); in page_table_dump() 257 static void l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, in l2_unmap() argument 267 for (l1_idx = iova >> ISP_L1PT_SHIFT; in l2_unmap() 271 l1_idx, iova); in l2_unmap() 276 iova, l1_idx); in l2_unmap() 282 for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; in l2_unmap() 291 iova += ISP_PAGE_SIZE; in l2_unmap() 306 static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, in l2_map() argument [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_iommu.c | 47 unsigned long iova, phys_addr_t paddr, in calc_pgsize() argument 53 unsigned long addr_merge = paddr | iova; in calc_pgsize() 83 if ((iova ^ paddr) & (pgsize_next - 1)) in calc_pgsize() 101 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument 112 pgsize = calc_pgsize(pagetable, iova, iova, size, &count); in msm_iommu_pagetable_unmap() 114 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap() 125 iova += unmapped; in msm_iommu_pagetable_unmap() 134 static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot) in msm_iommu_pagetable_map_prr() argument 140 u64 addr = iova; in msm_iommu_pagetable_map_prr() 156 msm_iommu_pagetable_unmap(mmu, iova, addr - iova); in msm_iommu_pagetable_map_prr() [all …]
|
| /linux/tools/testing/selftests/iommu/ |
| H A D | iommufd_fail_nth.c | 243 __u64 iova; in TEST_FAIL_NTH() local 279 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova, in TEST_FAIL_NTH() 291 .src_iova = iova, in TEST_FAIL_NTH() 299 if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, in TEST_FAIL_NTH() 303 _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL); in TEST_FAIL_NTH() 313 __u64 iova; in TEST_FAIL_NTH() local 330 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova, in TEST_FAIL_NTH() 349 __u64 iova; in TEST_FAIL_NTH() local 366 if (_test_ioctl_ioas_map_file(self->fd, ioas_id, mfd, 0, 262144, &iova, in TEST_FAIL_NTH() 386 __u64 iova; in TEST_FAIL_NTH() local [all …]
|
| H A D | iommufd_utils.h | 405 __u64 iova, size_t page_size, in _test_cmd_get_dirty_bitmap() argument 412 .iova = iova, in _test_cmd_get_dirty_bitmap() 425 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \ argument 427 ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \ 431 __u64 iova, size_t page_size, in _test_cmd_mock_domain_set_dirty() argument 439 .iova = iova, in _test_cmd_mock_domain_set_dirty() 455 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \ argument 458 _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \ 462 __u64 iova, size_t page_size, in _test_mock_dirty_bitmaps() argument 477 test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, in _test_mock_dirty_bitmaps() [all …]
|
| H A D | iommufd.c | 130 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova); in TEST_F() 135 TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova); in TEST_F() 295 __u64 iova; in TEST_F() local 301 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova); in TEST_F() 303 test_ioctl_ioas_unmap(iova, PAGE_SIZE); in TEST_F() 306 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova); in TEST_F() 978 __u64 iova; in TEST_F() 992 test_err_ioctl_ioas_map(ENOSPC, buffer, map_len, &iova); in TEST_F() 1142 .access_pages = { .iova = self->base_iova + PAGE_SIZE, in TEST_F() 1176 .access_pages = { .iova in TEST_F() 980 __u64 iova; TEST_F() local 1272 check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags) check_access_rw() argument 1336 __u64 iova; TEST_F() local 1350 __u64 iova; TEST_F() local 1416 __u64 iova; TEST_F() local 1476 __u64 iova; TEST_F() local 1543 uint64_t iova; TEST_F() local 1577 __u64 iova; TEST_F() local 1593 __u64 iova; TEST_F() local 1746 __u64 iova; test_basic_mmap() local 1772 __u64 iova; test_basic_file() local 1802 __u64 iova; TEST_F() local 1857 __u64 iova; TEST_F() local 1913 __u64 iova; TEST_F() local 3167 __u64 iova = MOCK_APERTURE_START, iova2; TEST_F() local [all...] |
| /linux/drivers/media/platform/nvidia/tegra-vde/ |
| H A D | dmabuf-cache.c | 28 struct iova *iova; member 39 tegra_vde_iommu_unmap(entry->vde, entry->iova); in tegra_vde_release_entry() 73 struct iova *iova; in tegra_vde_dmabuf_cache_map() local 91 *addrp = iova_dma_addr(&vde->iova, entry->iova); in tegra_vde_dmabuf_cache_map() 125 err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size); in tegra_vde_dmabuf_cache_map() 129 *addrp = iova_dma_addr(&vde->iova, iova); in tegra_vde_dmabuf_cache_map() 132 iova = NULL; in tegra_vde_dmabuf_cache_map() 139 entry->iova = iova; in tegra_vde_dmabuf_cache_map()
|
| H A D | vde.h | 83 struct iova *iova; member 115 struct iova_domain iova; member 116 struct iova *iova_resv_static_addresses; 117 struct iova *iova_resv_last_page; 161 struct iova *iova[VB2_MAX_PLANES]; member 190 struct iova **iovap, 192 void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova);
|
| /linux/tools/testing/selftests/vfio/lib/ |
| H A D | iommu.c | 69 int __iommu_hva2iova(struct iommu *iommu, void *vaddr, iova_t *iova) in __iommu_hva2iova() argument 80 if (iova) in __iommu_hva2iova() 81 *iova = region->iova + (vaddr - region->vaddr); in __iommu_hva2iova() 91 iova_t iova; in iommu_hva2iova() local 94 ret = __iommu_hva2iova(iommu, vaddr, &iova); in iommu_hva2iova() 97 return iova; in iommu_hva2iova() 106 .iova = region->iova, in vfio_iommu_map() 124 .iova = region->iova, in iommufd_map() 152 static int __vfio_iommu_unmap(int fd, u64 iova, u64 size, u32 flags, u64 *unmapped) in __vfio_iommu_unmap() argument 156 .iova = iova, in __vfio_iommu_unmap() [all …]
|
| H A D | iova_allocator.c | 58 iova_t iova, last; in iova_allocator_alloc() local 64 iova = range->start + allocator->range_offset; in iova_allocator_alloc() 67 if (check_add_overflow(iova, size - 1, &last) || in iova_allocator_alloc() 72 iova = last & ~(size - 1); in iova_allocator_alloc() 75 if (check_add_overflow(iova, size - 1, &last) || in iova_allocator_alloc() 86 return iova; in iova_allocator_alloc()
|
| /linux/include/linux/ |
| H A D | io-pgtable.h | 41 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, 44 unsigned long iova, size_t granule, void *cookie); 211 int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova, 214 size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, 218 unsigned long iova); 219 int (*pgtable_walk)(struct io_pgtable_ops *ops, unsigned long iova, void *wd); 221 unsigned long iova, size_t size, 279 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, in io_pgtable_tlb_flush_walk() argument 283 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk() 288 struct iommu_iotlb_gather * gather, unsigned long iova, in io_pgtable_tlb_add_page() argument [all …]
|
| H A D | iommu.h | 387 unsigned long iova, size_t size, 759 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 762 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 767 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 775 dma_addr_t iova); 914 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 916 int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova, 918 int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, 920 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 923 unsigned long iova, size_t size, [all …]
|
| /linux/tools/testing/selftests/vfio/ |
| H A D | vfio_dma_mapping_test.c | 38 static int intel_iommu_mapping_get(const char *bdf, u64 iova, in intel_iommu_mapping_get() argument 51 printf("Searching for IOVA 0x%lx in %s\n", iova, iommu_mapping_path); in intel_iommu_mapping_get() 60 if (line_iova != (iova / getpagesize())) in intel_iommu_mapping_get() 86 static int iommu_mapping_get(const char *bdf, u64 iova, in iommu_mapping_get() argument 90 return intel_iommu_mapping_get(bdf, iova, mapping); in iommu_mapping_get() 152 region.iova = iova_allocator_alloc(self->iova_allocator, size); in TEST_F() 156 printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova); in TEST_F() 158 ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr)); in TEST_F() 160 rc = iommu_mapping_get(device_bdf, region.iova, &mapping); in TEST_F() 172 printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova); in TEST_F() [all …]
|