1*9b18ef7cSMauro Carvalho Chehab // SPDX-License-Identifier: GPL-2.0+
2*9b18ef7cSMauro Carvalho Chehab /*
3*9b18ef7cSMauro Carvalho Chehab * NVIDIA Tegra Video decoder driver
4*9b18ef7cSMauro Carvalho Chehab *
5*9b18ef7cSMauro Carvalho Chehab * Copyright (C) 2016-2019 GRATE-DRIVER project
6*9b18ef7cSMauro Carvalho Chehab */
7*9b18ef7cSMauro Carvalho Chehab
8*9b18ef7cSMauro Carvalho Chehab #include <linux/iommu.h>
9*9b18ef7cSMauro Carvalho Chehab #include <linux/iova.h>
10*9b18ef7cSMauro Carvalho Chehab #include <linux/kernel.h>
11*9b18ef7cSMauro Carvalho Chehab #include <linux/platform_device.h>
12*9b18ef7cSMauro Carvalho Chehab
13*9b18ef7cSMauro Carvalho Chehab #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
14*9b18ef7cSMauro Carvalho Chehab #include <asm/dma-iommu.h>
15*9b18ef7cSMauro Carvalho Chehab #endif
16*9b18ef7cSMauro Carvalho Chehab
17*9b18ef7cSMauro Carvalho Chehab #include "vde.h"
18*9b18ef7cSMauro Carvalho Chehab
tegra_vde_iommu_map(struct tegra_vde * vde,struct sg_table * sgt,struct iova ** iovap,size_t size)19*9b18ef7cSMauro Carvalho Chehab int tegra_vde_iommu_map(struct tegra_vde *vde,
20*9b18ef7cSMauro Carvalho Chehab struct sg_table *sgt,
21*9b18ef7cSMauro Carvalho Chehab struct iova **iovap,
22*9b18ef7cSMauro Carvalho Chehab size_t size)
23*9b18ef7cSMauro Carvalho Chehab {
24*9b18ef7cSMauro Carvalho Chehab struct iova *iova;
25*9b18ef7cSMauro Carvalho Chehab unsigned long shift;
26*9b18ef7cSMauro Carvalho Chehab unsigned long end;
27*9b18ef7cSMauro Carvalho Chehab dma_addr_t addr;
28*9b18ef7cSMauro Carvalho Chehab
29*9b18ef7cSMauro Carvalho Chehab end = vde->domain->geometry.aperture_end;
30*9b18ef7cSMauro Carvalho Chehab size = iova_align(&vde->iova, size);
31*9b18ef7cSMauro Carvalho Chehab shift = iova_shift(&vde->iova);
32*9b18ef7cSMauro Carvalho Chehab
33*9b18ef7cSMauro Carvalho Chehab iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true);
34*9b18ef7cSMauro Carvalho Chehab if (!iova)
35*9b18ef7cSMauro Carvalho Chehab return -ENOMEM;
36*9b18ef7cSMauro Carvalho Chehab
37*9b18ef7cSMauro Carvalho Chehab addr = iova_dma_addr(&vde->iova, iova);
38*9b18ef7cSMauro Carvalho Chehab
39*9b18ef7cSMauro Carvalho Chehab size = iommu_map_sgtable(vde->domain, addr, sgt,
40*9b18ef7cSMauro Carvalho Chehab IOMMU_READ | IOMMU_WRITE);
41*9b18ef7cSMauro Carvalho Chehab if (!size) {
42*9b18ef7cSMauro Carvalho Chehab __free_iova(&vde->iova, iova);
43*9b18ef7cSMauro Carvalho Chehab return -ENXIO;
44*9b18ef7cSMauro Carvalho Chehab }
45*9b18ef7cSMauro Carvalho Chehab
46*9b18ef7cSMauro Carvalho Chehab *iovap = iova;
47*9b18ef7cSMauro Carvalho Chehab
48*9b18ef7cSMauro Carvalho Chehab return 0;
49*9b18ef7cSMauro Carvalho Chehab }
50*9b18ef7cSMauro Carvalho Chehab
tegra_vde_iommu_unmap(struct tegra_vde * vde,struct iova * iova)51*9b18ef7cSMauro Carvalho Chehab void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova)
52*9b18ef7cSMauro Carvalho Chehab {
53*9b18ef7cSMauro Carvalho Chehab unsigned long shift = iova_shift(&vde->iova);
54*9b18ef7cSMauro Carvalho Chehab unsigned long size = iova_size(iova) << shift;
55*9b18ef7cSMauro Carvalho Chehab dma_addr_t addr = iova_dma_addr(&vde->iova, iova);
56*9b18ef7cSMauro Carvalho Chehab
57*9b18ef7cSMauro Carvalho Chehab iommu_unmap(vde->domain, addr, size);
58*9b18ef7cSMauro Carvalho Chehab __free_iova(&vde->iova, iova);
59*9b18ef7cSMauro Carvalho Chehab }
60*9b18ef7cSMauro Carvalho Chehab
tegra_vde_iommu_init(struct tegra_vde * vde)61*9b18ef7cSMauro Carvalho Chehab int tegra_vde_iommu_init(struct tegra_vde *vde)
62*9b18ef7cSMauro Carvalho Chehab {
63*9b18ef7cSMauro Carvalho Chehab struct device *dev = vde->dev;
64*9b18ef7cSMauro Carvalho Chehab struct iova *iova;
65*9b18ef7cSMauro Carvalho Chehab unsigned long order;
66*9b18ef7cSMauro Carvalho Chehab unsigned long shift;
67*9b18ef7cSMauro Carvalho Chehab int err;
68*9b18ef7cSMauro Carvalho Chehab
69*9b18ef7cSMauro Carvalho Chehab vde->group = iommu_group_get(dev);
70*9b18ef7cSMauro Carvalho Chehab if (!vde->group)
71*9b18ef7cSMauro Carvalho Chehab return 0;
72*9b18ef7cSMauro Carvalho Chehab
73*9b18ef7cSMauro Carvalho Chehab #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
74*9b18ef7cSMauro Carvalho Chehab if (dev->archdata.mapping) {
75*9b18ef7cSMauro Carvalho Chehab struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
76*9b18ef7cSMauro Carvalho Chehab
77*9b18ef7cSMauro Carvalho Chehab arm_iommu_detach_device(dev);
78*9b18ef7cSMauro Carvalho Chehab arm_iommu_release_mapping(mapping);
79*9b18ef7cSMauro Carvalho Chehab }
80*9b18ef7cSMauro Carvalho Chehab #endif
81*9b18ef7cSMauro Carvalho Chehab vde->domain = iommu_domain_alloc(&platform_bus_type);
82*9b18ef7cSMauro Carvalho Chehab if (!vde->domain) {
83*9b18ef7cSMauro Carvalho Chehab err = -ENOMEM;
84*9b18ef7cSMauro Carvalho Chehab goto put_group;
85*9b18ef7cSMauro Carvalho Chehab }
86*9b18ef7cSMauro Carvalho Chehab
87*9b18ef7cSMauro Carvalho Chehab err = iova_cache_get();
88*9b18ef7cSMauro Carvalho Chehab if (err)
89*9b18ef7cSMauro Carvalho Chehab goto free_domain;
90*9b18ef7cSMauro Carvalho Chehab
91*9b18ef7cSMauro Carvalho Chehab order = __ffs(vde->domain->pgsize_bitmap);
92*9b18ef7cSMauro Carvalho Chehab init_iova_domain(&vde->iova, 1UL << order, 0);
93*9b18ef7cSMauro Carvalho Chehab
94*9b18ef7cSMauro Carvalho Chehab err = iommu_attach_group(vde->domain, vde->group);
95*9b18ef7cSMauro Carvalho Chehab if (err)
96*9b18ef7cSMauro Carvalho Chehab goto put_iova;
97*9b18ef7cSMauro Carvalho Chehab
98*9b18ef7cSMauro Carvalho Chehab /*
99*9b18ef7cSMauro Carvalho Chehab * We're using some static addresses that are not accessible by VDE
100*9b18ef7cSMauro Carvalho Chehab * to trap invalid memory accesses.
101*9b18ef7cSMauro Carvalho Chehab */
102*9b18ef7cSMauro Carvalho Chehab shift = iova_shift(&vde->iova);
103*9b18ef7cSMauro Carvalho Chehab iova = reserve_iova(&vde->iova, 0x60000000 >> shift,
104*9b18ef7cSMauro Carvalho Chehab 0x70000000 >> shift);
105*9b18ef7cSMauro Carvalho Chehab if (!iova) {
106*9b18ef7cSMauro Carvalho Chehab err = -ENOMEM;
107*9b18ef7cSMauro Carvalho Chehab goto detach_group;
108*9b18ef7cSMauro Carvalho Chehab }
109*9b18ef7cSMauro Carvalho Chehab
110*9b18ef7cSMauro Carvalho Chehab vde->iova_resv_static_addresses = iova;
111*9b18ef7cSMauro Carvalho Chehab
112*9b18ef7cSMauro Carvalho Chehab /*
113*9b18ef7cSMauro Carvalho Chehab * BSEV's end-address wraps around due to integer overflow during
114*9b18ef7cSMauro Carvalho Chehab * of hardware context preparation if IOVA is allocated at the end
115*9b18ef7cSMauro Carvalho Chehab * of address space and VDE can't handle that. Hence simply reserve
116*9b18ef7cSMauro Carvalho Chehab * the last page to avoid the problem.
117*9b18ef7cSMauro Carvalho Chehab */
118*9b18ef7cSMauro Carvalho Chehab iova = reserve_iova(&vde->iova, 0xffffffff >> shift,
119*9b18ef7cSMauro Carvalho Chehab (0xffffffff >> shift) + 1);
120*9b18ef7cSMauro Carvalho Chehab if (!iova) {
121*9b18ef7cSMauro Carvalho Chehab err = -ENOMEM;
122*9b18ef7cSMauro Carvalho Chehab goto unreserve_iova;
123*9b18ef7cSMauro Carvalho Chehab }
124*9b18ef7cSMauro Carvalho Chehab
125*9b18ef7cSMauro Carvalho Chehab vde->iova_resv_last_page = iova;
126*9b18ef7cSMauro Carvalho Chehab
127*9b18ef7cSMauro Carvalho Chehab return 0;
128*9b18ef7cSMauro Carvalho Chehab
129*9b18ef7cSMauro Carvalho Chehab unreserve_iova:
130*9b18ef7cSMauro Carvalho Chehab __free_iova(&vde->iova, vde->iova_resv_static_addresses);
131*9b18ef7cSMauro Carvalho Chehab detach_group:
132*9b18ef7cSMauro Carvalho Chehab iommu_detach_group(vde->domain, vde->group);
133*9b18ef7cSMauro Carvalho Chehab put_iova:
134*9b18ef7cSMauro Carvalho Chehab put_iova_domain(&vde->iova);
135*9b18ef7cSMauro Carvalho Chehab iova_cache_put();
136*9b18ef7cSMauro Carvalho Chehab free_domain:
137*9b18ef7cSMauro Carvalho Chehab iommu_domain_free(vde->domain);
138*9b18ef7cSMauro Carvalho Chehab put_group:
139*9b18ef7cSMauro Carvalho Chehab iommu_group_put(vde->group);
140*9b18ef7cSMauro Carvalho Chehab
141*9b18ef7cSMauro Carvalho Chehab return err;
142*9b18ef7cSMauro Carvalho Chehab }
143*9b18ef7cSMauro Carvalho Chehab
tegra_vde_iommu_deinit(struct tegra_vde * vde)144*9b18ef7cSMauro Carvalho Chehab void tegra_vde_iommu_deinit(struct tegra_vde *vde)
145*9b18ef7cSMauro Carvalho Chehab {
146*9b18ef7cSMauro Carvalho Chehab if (vde->domain) {
147*9b18ef7cSMauro Carvalho Chehab __free_iova(&vde->iova, vde->iova_resv_last_page);
148*9b18ef7cSMauro Carvalho Chehab __free_iova(&vde->iova, vde->iova_resv_static_addresses);
149*9b18ef7cSMauro Carvalho Chehab iommu_detach_group(vde->domain, vde->group);
150*9b18ef7cSMauro Carvalho Chehab put_iova_domain(&vde->iova);
151*9b18ef7cSMauro Carvalho Chehab iova_cache_put();
152*9b18ef7cSMauro Carvalho Chehab iommu_domain_free(vde->domain);
153*9b18ef7cSMauro Carvalho Chehab iommu_group_put(vde->group);
154*9b18ef7cSMauro Carvalho Chehab
155*9b18ef7cSMauro Carvalho Chehab vde->domain = NULL;
156*9b18ef7cSMauro Carvalho Chehab }
157*9b18ef7cSMauro Carvalho Chehab }
158