1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NVIDIA Tegra Video decoder driver 4 * 5 * Copyright (C) 2016-2019 GRATE-DRIVER project 6 */ 7 8 #include <linux/iommu.h> 9 #include <linux/iova.h> 10 #include <linux/kernel.h> 11 #include <linux/platform_device.h> 12 13 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 14 #include <asm/dma-iommu.h> 15 #endif 16 17 #include "vde.h" 18 19 int tegra_vde_iommu_map(struct tegra_vde *vde, 20 struct sg_table *sgt, 21 struct iova **iovap, 22 size_t size) 23 { 24 struct iova *iova; 25 unsigned long shift; 26 unsigned long end; 27 dma_addr_t addr; 28 29 end = vde->domain->geometry.aperture_end; 30 size = iova_align(&vde->iova, size); 31 shift = iova_shift(&vde->iova); 32 33 iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true); 34 if (!iova) 35 return -ENOMEM; 36 37 addr = iova_dma_addr(&vde->iova, iova); 38 39 size = iommu_map_sgtable(vde->domain, addr, sgt, 40 IOMMU_READ | IOMMU_WRITE); 41 if (!size) { 42 __free_iova(&vde->iova, iova); 43 return -ENXIO; 44 } 45 46 *iovap = iova; 47 48 return 0; 49 } 50 51 void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova) 52 { 53 unsigned long shift = iova_shift(&vde->iova); 54 unsigned long size = iova_size(iova) << shift; 55 dma_addr_t addr = iova_dma_addr(&vde->iova, iova); 56 57 iommu_unmap(vde->domain, addr, size); 58 __free_iova(&vde->iova, iova); 59 } 60 61 int tegra_vde_iommu_init(struct tegra_vde *vde) 62 { 63 struct device *dev = vde->dev; 64 struct iova *iova; 65 unsigned long order; 66 unsigned long shift; 67 int err; 68 69 vde->group = iommu_group_get(dev); 70 if (!vde->group) 71 return 0; 72 73 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 74 if (dev->archdata.mapping) { 75 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 76 77 arm_iommu_detach_device(dev); 78 arm_iommu_release_mapping(mapping); 79 } 80 #endif 81 vde->domain = iommu_paging_domain_alloc(dev); 82 if (IS_ERR(vde->domain)) { 83 err = PTR_ERR(vde->domain); 84 vde->domain = NULL; 85 goto put_group; 86 } 87 88 err = iova_cache_get(); 89 if (err) 90 goto free_domain; 91 92 order = __ffs(vde->domain->pgsize_bitmap); 93 init_iova_domain(&vde->iova, 1UL << order, 0); 94 95 err = iommu_attach_group(vde->domain, vde->group); 96 if (err) 97 goto put_iova; 98 99 /* 100 * We're using some static addresses that are not accessible by VDE 101 * to trap invalid memory accesses. 102 */ 103 shift = iova_shift(&vde->iova); 104 iova = reserve_iova(&vde->iova, 0x60000000 >> shift, 105 0x70000000 >> shift); 106 if (!iova) { 107 err = -ENOMEM; 108 goto detach_group; 109 } 110 111 vde->iova_resv_static_addresses = iova; 112 113 /* 114 * BSEV's end-address wraps around due to integer overflow during 115 * of hardware context preparation if IOVA is allocated at the end 116 * of address space and VDE can't handle that. Hence simply reserve 117 * the last page to avoid the problem. 118 */ 119 iova = reserve_iova(&vde->iova, 0xffffffff >> shift, 120 (0xffffffff >> shift) + 1); 121 if (!iova) { 122 err = -ENOMEM; 123 goto unreserve_iova; 124 } 125 126 vde->iova_resv_last_page = iova; 127 128 return 0; 129 130 unreserve_iova: 131 __free_iova(&vde->iova, vde->iova_resv_static_addresses); 132 detach_group: 133 iommu_detach_group(vde->domain, vde->group); 134 put_iova: 135 put_iova_domain(&vde->iova); 136 iova_cache_put(); 137 free_domain: 138 iommu_domain_free(vde->domain); 139 put_group: 140 iommu_group_put(vde->group); 141 142 return err; 143 } 144 145 void tegra_vde_iommu_deinit(struct tegra_vde *vde) 146 { 147 if (vde->domain) { 148 __free_iova(&vde->iova, vde->iova_resv_last_page); 149 __free_iova(&vde->iova, vde->iova_resv_static_addresses); 150 iommu_detach_group(vde->domain, vde->group); 151 put_iova_domain(&vde->iova); 152 iova_cache_put(); 153 iommu_domain_free(vde->domain); 154 iommu_group_put(vde->group); 155 156 vde->domain = NULL; 157 } 158 } 159