xref: /linux/drivers/xen/grant-dma-ops.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xen grant DMA-mapping layer - contains special DMA-mapping routines
4  * for providing grant references as DMA addresses to be used by frontends
5  * (e.g. virtio) in Xen guests
6  *
7  * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/dma-map-ops.h>
12 #include <linux/of.h>
13 #include <linux/pfn.h>
14 #include <linux/xarray.h>
15 #include <linux/virtio_anchor.h>
16 #include <linux/virtio.h>
17 #include <xen/xen.h>
18 #include <xen/xen-ops.h>
19 #include <xen/grant_table.h>
20 
21 struct xen_grant_dma_data {
22 	/* The ID of backend domain */
23 	domid_t backend_domid;
24 	/* Is device behaving sane? */
25 	bool broken;
26 };
27 
28 static DEFINE_XARRAY(xen_grant_dma_devices);
29 
30 #define XEN_GRANT_DMA_ADDR_OFF	(1ULL << 63)
31 
32 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
33 {
34 	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
35 }
36 
37 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
38 {
39 	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
40 }
41 
42 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
43 {
44 	struct xen_grant_dma_data *data;
45 
46 	xa_lock(&xen_grant_dma_devices);
47 	data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
48 	xa_unlock(&xen_grant_dma_devices);
49 
50 	return data;
51 }
52 
53 /*
54  * DMA ops for Xen frontends (e.g. virtio).
55  *
56  * Used to act as a kind of software IOMMU for Xen guests by using grants as
57  * DMA addresses.
58  * Such a DMA address is formed by using the grant reference as a frame
59  * number and setting the highest address bit (this bit is for the backend
60  * to be able to distinguish it from e.g. a mmio address).
61  */
62 static void *xen_grant_dma_alloc(struct device *dev, size_t size,
63 				 dma_addr_t *dma_handle, gfp_t gfp,
64 				 unsigned long attrs)
65 {
66 	struct xen_grant_dma_data *data;
67 	unsigned int i, n_pages = PFN_UP(size);
68 	unsigned long pfn;
69 	grant_ref_t grant;
70 	void *ret;
71 
72 	data = find_xen_grant_dma_data(dev);
73 	if (!data)
74 		return NULL;
75 
76 	if (unlikely(data->broken))
77 		return NULL;
78 
79 	ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
80 	if (!ret)
81 		return NULL;
82 
83 	pfn = virt_to_pfn(ret);
84 
85 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
86 		free_pages_exact(ret, n_pages * PAGE_SIZE);
87 		return NULL;
88 	}
89 
90 	for (i = 0; i < n_pages; i++) {
91 		gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
92 				pfn_to_gfn(pfn + i), 0);
93 	}
94 
95 	*dma_handle = grant_to_dma(grant);
96 
97 	return ret;
98 }
99 
100 static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
101 			       dma_addr_t dma_handle, unsigned long attrs)
102 {
103 	struct xen_grant_dma_data *data;
104 	unsigned int i, n_pages = PFN_UP(size);
105 	grant_ref_t grant;
106 
107 	data = find_xen_grant_dma_data(dev);
108 	if (!data)
109 		return;
110 
111 	if (unlikely(data->broken))
112 		return;
113 
114 	grant = dma_to_grant(dma_handle);
115 
116 	for (i = 0; i < n_pages; i++) {
117 		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
118 			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
119 			data->broken = true;
120 			return;
121 		}
122 	}
123 
124 	gnttab_free_grant_reference_seq(grant, n_pages);
125 
126 	free_pages_exact(vaddr, n_pages * PAGE_SIZE);
127 }
128 
129 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
130 					      dma_addr_t *dma_handle,
131 					      enum dma_data_direction dir,
132 					      gfp_t gfp)
133 {
134 	void *vaddr;
135 
136 	vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
137 	if (!vaddr)
138 		return NULL;
139 
140 	return virt_to_page(vaddr);
141 }
142 
143 static void xen_grant_dma_free_pages(struct device *dev, size_t size,
144 				     struct page *vaddr, dma_addr_t dma_handle,
145 				     enum dma_data_direction dir)
146 {
147 	xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
148 }
149 
150 static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
151 					 unsigned long offset, size_t size,
152 					 enum dma_data_direction dir,
153 					 unsigned long attrs)
154 {
155 	struct xen_grant_dma_data *data;
156 	unsigned int i, n_pages = PFN_UP(size);
157 	grant_ref_t grant;
158 	dma_addr_t dma_handle;
159 
160 	if (WARN_ON(dir == DMA_NONE))
161 		return DMA_MAPPING_ERROR;
162 
163 	data = find_xen_grant_dma_data(dev);
164 	if (!data)
165 		return DMA_MAPPING_ERROR;
166 
167 	if (unlikely(data->broken))
168 		return DMA_MAPPING_ERROR;
169 
170 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
171 		return DMA_MAPPING_ERROR;
172 
173 	for (i = 0; i < n_pages; i++) {
174 		gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
175 				xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
176 	}
177 
178 	dma_handle = grant_to_dma(grant) + offset;
179 
180 	return dma_handle;
181 }
182 
183 static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
184 				     size_t size, enum dma_data_direction dir,
185 				     unsigned long attrs)
186 {
187 	struct xen_grant_dma_data *data;
188 	unsigned int i, n_pages = PFN_UP(size);
189 	grant_ref_t grant;
190 
191 	if (WARN_ON(dir == DMA_NONE))
192 		return;
193 
194 	data = find_xen_grant_dma_data(dev);
195 	if (!data)
196 		return;
197 
198 	if (unlikely(data->broken))
199 		return;
200 
201 	grant = dma_to_grant(dma_handle);
202 
203 	for (i = 0; i < n_pages; i++) {
204 		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
205 			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
206 			data->broken = true;
207 			return;
208 		}
209 	}
210 
211 	gnttab_free_grant_reference_seq(grant, n_pages);
212 }
213 
214 static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
215 				   int nents, enum dma_data_direction dir,
216 				   unsigned long attrs)
217 {
218 	struct scatterlist *s;
219 	unsigned int i;
220 
221 	if (WARN_ON(dir == DMA_NONE))
222 		return;
223 
224 	for_each_sg(sg, s, nents, i)
225 		xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
226 				attrs);
227 }
228 
229 static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
230 				int nents, enum dma_data_direction dir,
231 				unsigned long attrs)
232 {
233 	struct scatterlist *s;
234 	unsigned int i;
235 
236 	if (WARN_ON(dir == DMA_NONE))
237 		return -EINVAL;
238 
239 	for_each_sg(sg, s, nents, i) {
240 		s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
241 				s->length, dir, attrs);
242 		if (s->dma_address == DMA_MAPPING_ERROR)
243 			goto out;
244 
245 		sg_dma_len(s) = s->length;
246 	}
247 
248 	return nents;
249 
250 out:
251 	xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
252 	sg_dma_len(sg) = 0;
253 
254 	return -EIO;
255 }
256 
257 static int xen_grant_dma_supported(struct device *dev, u64 mask)
258 {
259 	return mask == DMA_BIT_MASK(64);
260 }
261 
262 static const struct dma_map_ops xen_grant_dma_ops = {
263 	.alloc = xen_grant_dma_alloc,
264 	.free = xen_grant_dma_free,
265 	.alloc_pages = xen_grant_dma_alloc_pages,
266 	.free_pages = xen_grant_dma_free_pages,
267 	.mmap = dma_common_mmap,
268 	.get_sgtable = dma_common_get_sgtable,
269 	.map_page = xen_grant_dma_map_page,
270 	.unmap_page = xen_grant_dma_unmap_page,
271 	.map_sg = xen_grant_dma_map_sg,
272 	.unmap_sg = xen_grant_dma_unmap_sg,
273 	.dma_supported = xen_grant_dma_supported,
274 };
275 
276 bool xen_is_grant_dma_device(struct device *dev)
277 {
278 	struct device_node *iommu_np;
279 	bool has_iommu;
280 
281 	/* XXX Handle only DT devices for now */
282 	if (!dev->of_node)
283 		return false;
284 
285 	iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
286 	has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
287 	of_node_put(iommu_np);
288 
289 	return has_iommu;
290 }
291 
292 bool xen_virtio_mem_acc(struct virtio_device *dev)
293 {
294 	if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))
295 		return true;
296 
297 	return xen_is_grant_dma_device(dev->dev.parent);
298 }
299 
300 void xen_grant_setup_dma_ops(struct device *dev)
301 {
302 	struct xen_grant_dma_data *data;
303 	struct of_phandle_args iommu_spec;
304 
305 	data = find_xen_grant_dma_data(dev);
306 	if (data) {
307 		dev_err(dev, "Xen grant DMA data is already created\n");
308 		return;
309 	}
310 
311 	/* XXX ACPI device unsupported for now */
312 	if (!dev->of_node)
313 		goto err;
314 
315 	if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
316 			0, &iommu_spec)) {
317 		dev_err(dev, "Cannot parse iommus property\n");
318 		goto err;
319 	}
320 
321 	if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
322 			iommu_spec.args_count != 1) {
323 		dev_err(dev, "Incompatible IOMMU node\n");
324 		of_node_put(iommu_spec.np);
325 		goto err;
326 	}
327 
328 	of_node_put(iommu_spec.np);
329 
330 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
331 	if (!data)
332 		goto err;
333 
334 	/*
335 	 * The endpoint ID here means the ID of the domain where the corresponding
336 	 * backend is running
337 	 */
338 	data->backend_domid = iommu_spec.args[0];
339 
340 	if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
341 			GFP_KERNEL))) {
342 		dev_err(dev, "Cannot store Xen grant DMA data\n");
343 		goto err;
344 	}
345 
346 	dev->dma_ops = &xen_grant_dma_ops;
347 
348 	return;
349 
350 err:
351 	dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
352 }
353 
354 MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
355 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
356 MODULE_LICENSE("GPL");
357