xref: /linux/drivers/gpu/drm/msm/msm_iommu.c (revision c98be0c96db00e9b6b02d31e0fa7590c54cdaaac)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "msm_drv.h"
19 #include "msm_mmu.h"
20 
21 struct msm_iommu {
22 	struct msm_mmu base;
23 	struct iommu_domain *domain;
24 };
25 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
26 
27 static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
28 		unsigned long iova, int flags, void *arg)
29 {
30 	DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
31 	return 0;
32 }
33 
34 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
35 {
36 	struct drm_device *dev = mmu->dev;
37 	struct msm_iommu *iommu = to_msm_iommu(mmu);
38 	int i, ret;
39 
40 	for (i = 0; i < cnt; i++) {
41 		struct device *msm_iommu_get_ctx(const char *ctx_name);
42 		struct device *ctx = msm_iommu_get_ctx(names[i]);
43 		if (IS_ERR_OR_NULL(ctx))
44 			continue;
45 		ret = iommu_attach_device(iommu->domain, ctx);
46 		if (ret) {
47 			dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
48 			return ret;
49 		}
50 	}
51 
52 	return 0;
53 }
54 
55 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
56 		struct sg_table *sgt, unsigned len, int prot)
57 {
58 	struct msm_iommu *iommu = to_msm_iommu(mmu);
59 	struct iommu_domain *domain = iommu->domain;
60 	struct scatterlist *sg;
61 	unsigned int da = iova;
62 	unsigned int i, j;
63 	int ret;
64 
65 	if (!domain || !sgt)
66 		return -EINVAL;
67 
68 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
69 		u32 pa = sg_phys(sg) - sg->offset;
70 		size_t bytes = sg->length + sg->offset;
71 
72 		VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
73 
74 		ret = iommu_map(domain, da, pa, bytes, prot);
75 		if (ret)
76 			goto fail;
77 
78 		da += bytes;
79 	}
80 
81 	return 0;
82 
83 fail:
84 	da = iova;
85 
86 	for_each_sg(sgt->sgl, sg, i, j) {
87 		size_t bytes = sg->length + sg->offset;
88 		iommu_unmap(domain, da, bytes);
89 		da += bytes;
90 	}
91 	return ret;
92 }
93 
94 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
95 		struct sg_table *sgt, unsigned len)
96 {
97 	struct msm_iommu *iommu = to_msm_iommu(mmu);
98 	struct iommu_domain *domain = iommu->domain;
99 	struct scatterlist *sg;
100 	unsigned int da = iova;
101 	int i;
102 
103 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
104 		size_t bytes = sg->length + sg->offset;
105 		size_t unmapped;
106 
107 		unmapped = iommu_unmap(domain, da, bytes);
108 		if (unmapped < bytes)
109 			return unmapped;
110 
111 		VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
112 
113 		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
114 
115 		da += bytes;
116 	}
117 
118 	return 0;
119 }
120 
121 static void msm_iommu_destroy(struct msm_mmu *mmu)
122 {
123 	struct msm_iommu *iommu = to_msm_iommu(mmu);
124 	iommu_domain_free(iommu->domain);
125 	kfree(iommu);
126 }
127 
128 static const struct msm_mmu_funcs funcs = {
129 		.attach = msm_iommu_attach,
130 		.map = msm_iommu_map,
131 		.unmap = msm_iommu_unmap,
132 		.destroy = msm_iommu_destroy,
133 };
134 
135 struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
136 {
137 	struct msm_iommu *iommu;
138 
139 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
140 	if (!iommu)
141 		return ERR_PTR(-ENOMEM);
142 
143 	iommu->domain = domain;
144 	msm_mmu_init(&iommu->base, dev, &funcs);
145 	iommu_set_fault_handler(domain, msm_fault_handler, dev);
146 
147 	return &iommu->base;
148 }
149