1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Xen grant DMA-mapping layer - contains special DMA-mapping routines
4 * for providing grant references as DMA addresses to be used by frontends
5 * (e.g. virtio) in Xen guests
6 *
7 * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/dma-map-ops.h>
12 #include <linux/of.h>
13 #include <linux/pci.h>
14 #include <linux/pfn.h>
15 #include <linux/xarray.h>
16 #include <linux/virtio_anchor.h>
17 #include <linux/virtio.h>
18 #include <xen/xen.h>
19 #include <xen/xen-ops.h>
20 #include <xen/grant_table.h>
21
22 struct xen_grant_dma_data {
23 /* The ID of backend domain */
24 domid_t backend_domid;
25 /* Is device behaving sane? */
26 bool broken;
27 };
28
29 static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
30
31 #define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
32
grant_to_dma(grant_ref_t grant)33 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
34 {
35 return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
36 }
37
dma_to_grant(dma_addr_t dma)38 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
39 {
40 return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
41 }
42
find_xen_grant_dma_data(struct device * dev)43 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
44 {
45 struct xen_grant_dma_data *data;
46 unsigned long flags;
47
48 xa_lock_irqsave(&xen_grant_dma_devices, flags);
49 data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
50 xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
51
52 return data;
53 }
54
store_xen_grant_dma_data(struct device * dev,struct xen_grant_dma_data * data)55 static int store_xen_grant_dma_data(struct device *dev,
56 struct xen_grant_dma_data *data)
57 {
58 unsigned long flags;
59 int ret;
60
61 xa_lock_irqsave(&xen_grant_dma_devices, flags);
62 ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
63 GFP_ATOMIC));
64 xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
65
66 return ret;
67 }
68
69 /*
70 * DMA ops for Xen frontends (e.g. virtio).
71 *
72 * Used to act as a kind of software IOMMU for Xen guests by using grants as
73 * DMA addresses.
74 * Such a DMA address is formed by using the grant reference as a frame
75 * number and setting the highest address bit (this bit is for the backend
76 * to be able to distinguish it from e.g. a mmio address).
77 */
xen_grant_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)78 static void *xen_grant_dma_alloc(struct device *dev, size_t size,
79 dma_addr_t *dma_handle, gfp_t gfp,
80 unsigned long attrs)
81 {
82 struct xen_grant_dma_data *data;
83 unsigned int i, n_pages = XEN_PFN_UP(size);
84 unsigned long pfn;
85 grant_ref_t grant;
86 void *ret;
87
88 data = find_xen_grant_dma_data(dev);
89 if (!data)
90 return NULL;
91
92 if (unlikely(data->broken))
93 return NULL;
94
95 ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
96 if (!ret)
97 return NULL;
98
99 pfn = virt_to_pfn(ret);
100
101 if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
102 free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
103 return NULL;
104 }
105
106 for (i = 0; i < n_pages; i++) {
107 gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
108 pfn_to_gfn(pfn + i), 0);
109 }
110
111 *dma_handle = grant_to_dma(grant);
112
113 return ret;
114 }
115
xen_grant_dma_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)116 static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
117 dma_addr_t dma_handle, unsigned long attrs)
118 {
119 struct xen_grant_dma_data *data;
120 unsigned int i, n_pages = XEN_PFN_UP(size);
121 grant_ref_t grant;
122
123 data = find_xen_grant_dma_data(dev);
124 if (!data)
125 return;
126
127 if (unlikely(data->broken))
128 return;
129
130 grant = dma_to_grant(dma_handle);
131
132 for (i = 0; i < n_pages; i++) {
133 if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
134 dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
135 data->broken = true;
136 return;
137 }
138 }
139
140 gnttab_free_grant_reference_seq(grant, n_pages);
141
142 free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
143 }
144
xen_grant_dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)145 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
146 dma_addr_t *dma_handle,
147 enum dma_data_direction dir,
148 gfp_t gfp)
149 {
150 void *vaddr;
151
152 vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
153 if (!vaddr)
154 return NULL;
155
156 return virt_to_page(vaddr);
157 }
158
xen_grant_dma_free_pages(struct device * dev,size_t size,struct page * vaddr,dma_addr_t dma_handle,enum dma_data_direction dir)159 static void xen_grant_dma_free_pages(struct device *dev, size_t size,
160 struct page *vaddr, dma_addr_t dma_handle,
161 enum dma_data_direction dir)
162 {
163 xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
164 }
165
xen_grant_dma_map_phys(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)166 static dma_addr_t xen_grant_dma_map_phys(struct device *dev, phys_addr_t phys,
167 size_t size,
168 enum dma_data_direction dir,
169 unsigned long attrs)
170 {
171 struct xen_grant_dma_data *data;
172 unsigned long offset = offset_in_page(phys);
173 unsigned long dma_offset = xen_offset_in_page(offset),
174 pfn_offset = XEN_PFN_DOWN(offset);
175 unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
176 grant_ref_t grant;
177 dma_addr_t dma_handle;
178
179 if (unlikely(attrs & DMA_ATTR_MMIO))
180 return DMA_MAPPING_ERROR;
181
182 if (WARN_ON(dir == DMA_NONE))
183 return DMA_MAPPING_ERROR;
184
185 data = find_xen_grant_dma_data(dev);
186 if (!data)
187 return DMA_MAPPING_ERROR;
188
189 if (unlikely(data->broken))
190 return DMA_MAPPING_ERROR;
191
192 if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
193 return DMA_MAPPING_ERROR;
194
195 for (i = 0; i < n_pages; i++) {
196 gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
197 pfn_to_gfn(page_to_xen_pfn(phys_to_page(phys)) + i + pfn_offset),
198 dir == DMA_TO_DEVICE);
199 }
200
201 dma_handle = grant_to_dma(grant) + dma_offset;
202
203 return dma_handle;
204 }
205
xen_grant_dma_unmap_phys(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)206 static void xen_grant_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
207 size_t size, enum dma_data_direction dir,
208 unsigned long attrs)
209 {
210 struct xen_grant_dma_data *data;
211 unsigned long dma_offset = xen_offset_in_page(dma_handle);
212 unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
213 grant_ref_t grant;
214
215 if (WARN_ON(dir == DMA_NONE))
216 return;
217
218 data = find_xen_grant_dma_data(dev);
219 if (!data)
220 return;
221
222 if (unlikely(data->broken))
223 return;
224
225 grant = dma_to_grant(dma_handle);
226
227 for (i = 0; i < n_pages; i++) {
228 if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
229 dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
230 data->broken = true;
231 return;
232 }
233 }
234
235 gnttab_free_grant_reference_seq(grant, n_pages);
236 }
237
xen_grant_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)238 static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
239 int nents, enum dma_data_direction dir,
240 unsigned long attrs)
241 {
242 struct scatterlist *s;
243 unsigned int i;
244
245 if (WARN_ON(dir == DMA_NONE))
246 return;
247
248 for_each_sg(sg, s, nents, i)
249 xen_grant_dma_unmap_phys(dev, s->dma_address, sg_dma_len(s), dir,
250 attrs);
251 }
252
xen_grant_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)253 static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
254 int nents, enum dma_data_direction dir,
255 unsigned long attrs)
256 {
257 struct scatterlist *s;
258 unsigned int i;
259
260 if (WARN_ON(dir == DMA_NONE))
261 return -EINVAL;
262
263 for_each_sg(sg, s, nents, i) {
264 s->dma_address = xen_grant_dma_map_phys(dev, sg_phys(s),
265 s->length, dir, attrs);
266 if (s->dma_address == DMA_MAPPING_ERROR)
267 goto out;
268
269 sg_dma_len(s) = s->length;
270 }
271
272 return nents;
273
274 out:
275 xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
276 sg_dma_len(sg) = 0;
277
278 return -EIO;
279 }
280
xen_grant_dma_supported(struct device * dev,u64 mask)281 static int xen_grant_dma_supported(struct device *dev, u64 mask)
282 {
283 return mask == DMA_BIT_MASK(64);
284 }
285
286 static const struct dma_map_ops xen_grant_dma_ops = {
287 .alloc = xen_grant_dma_alloc,
288 .free = xen_grant_dma_free,
289 .alloc_pages_op = xen_grant_dma_alloc_pages,
290 .free_pages = xen_grant_dma_free_pages,
291 .mmap = dma_common_mmap,
292 .get_sgtable = dma_common_get_sgtable,
293 .map_phys = xen_grant_dma_map_phys,
294 .unmap_phys = xen_grant_dma_unmap_phys,
295 .map_sg = xen_grant_dma_map_sg,
296 .unmap_sg = xen_grant_dma_unmap_sg,
297 .dma_supported = xen_grant_dma_supported,
298 };
299
xen_dt_get_node(struct device * dev)300 static struct device_node *xen_dt_get_node(struct device *dev)
301 {
302 if (dev_is_pci(dev)) {
303 struct pci_dev *pdev = to_pci_dev(dev);
304 struct pci_bus *bus = pdev->bus;
305
306 /* Walk up to the root bus to look for PCI Host controller */
307 while (!pci_is_root_bus(bus))
308 bus = bus->parent;
309
310 if (!bus->bridge->parent)
311 return NULL;
312 return of_node_get(bus->bridge->parent->of_node);
313 }
314
315 return of_node_get(dev->of_node);
316 }
317
xen_dt_grant_init_backend_domid(struct device * dev,struct device_node * np,domid_t * backend_domid)318 static int xen_dt_grant_init_backend_domid(struct device *dev,
319 struct device_node *np,
320 domid_t *backend_domid)
321 {
322 struct of_phandle_args iommu_spec = { .args_count = 1 };
323
324 if (dev_is_pci(dev)) {
325 struct pci_dev *pdev = to_pci_dev(dev);
326 u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
327
328 if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np,
329 iommu_spec.args)) {
330 dev_dbg(dev, "Cannot translate ID\n");
331 return -ESRCH;
332 }
333 } else {
334 if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
335 0, &iommu_spec)) {
336 dev_dbg(dev, "Cannot parse iommus property\n");
337 return -ESRCH;
338 }
339 }
340
341 if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
342 iommu_spec.args_count != 1) {
343 dev_dbg(dev, "Incompatible IOMMU node\n");
344 of_node_put(iommu_spec.np);
345 return -ESRCH;
346 }
347
348 of_node_put(iommu_spec.np);
349
350 /*
351 * The endpoint ID here means the ID of the domain where the
352 * corresponding backend is running
353 */
354 *backend_domid = iommu_spec.args[0];
355
356 return 0;
357 }
358
xen_grant_init_backend_domid(struct device * dev,domid_t * backend_domid)359 static int xen_grant_init_backend_domid(struct device *dev,
360 domid_t *backend_domid)
361 {
362 struct device_node *np;
363 int ret = -ENODEV;
364
365 np = xen_dt_get_node(dev);
366 if (np) {
367 ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid);
368 of_node_put(np);
369 } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
370 dev_info(dev, "Using dom0 as backend\n");
371 *backend_domid = 0;
372 ret = 0;
373 }
374
375 return ret;
376 }
377
xen_grant_setup_dma_ops(struct device * dev,domid_t backend_domid)378 static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
379 {
380 struct xen_grant_dma_data *data;
381
382 data = find_xen_grant_dma_data(dev);
383 if (data) {
384 dev_err(dev, "Xen grant DMA data is already created\n");
385 return;
386 }
387
388 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
389 if (!data)
390 goto err;
391
392 data->backend_domid = backend_domid;
393
394 if (store_xen_grant_dma_data(dev, data)) {
395 dev_err(dev, "Cannot store Xen grant DMA data\n");
396 goto err;
397 }
398
399 dev->dma_ops = &xen_grant_dma_ops;
400
401 return;
402
403 err:
404 devm_kfree(dev, data);
405 dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
406 }
407
xen_virtio_restricted_mem_acc(struct virtio_device * dev)408 bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
409 {
410 domid_t backend_domid;
411
412 if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
413 xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
414 return true;
415 }
416
417 return false;
418 }
419
420 MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
421 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
422 MODULE_LICENSE("GPL");
423