1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Xen dma-buf functionality for gntdev.
5 *
6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
7 *
8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-direct.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
19
20 #include <xen/xen.h>
21 #include <xen/grant_table.h>
22
23 #include "gntdev-common.h"
24 #include "gntdev-dmabuf.h"
25
26 MODULE_IMPORT_NS("DMA_BUF");
27
28 struct gntdev_dmabuf {
29 struct gntdev_dmabuf_priv *priv;
30 struct dma_buf *dmabuf;
31 struct list_head next;
32 int fd;
33
34 union {
35 struct {
36 /* Exported buffers are reference counted. */
37 struct kref refcount;
38
39 struct gntdev_priv *priv;
40 struct gntdev_grant_map *map;
41 } exp;
42 struct {
43 /* Granted references of the imported buffer. */
44 grant_ref_t *refs;
45 /* Scatter-gather table of the imported buffer. */
46 struct sg_table *sgt;
47 /* dma-buf attachment of the imported buffer. */
48 struct dma_buf_attachment *attach;
49 } imp;
50 } u;
51
52 /* Number of pages this buffer has. */
53 int nr_pages;
54 /* Pages of this buffer (only for dma-buf export). */
55 struct page **pages;
56 };
57
58 struct gntdev_dmabuf_wait_obj {
59 struct list_head next;
60 struct gntdev_dmabuf *gntdev_dmabuf;
61 struct completion completion;
62 };
63
64 struct gntdev_dmabuf_attachment {
65 struct sg_table *sgt;
66 enum dma_data_direction dir;
67 };
68
69 struct gntdev_dmabuf_priv {
70 /* List of exported DMA buffers. */
71 struct list_head exp_list;
72 /* List of wait objects. */
73 struct list_head exp_wait_list;
74 /* List of imported DMA buffers. */
75 struct list_head imp_list;
76 /* This is the lock which protects dma_buf_xxx lists. */
77 struct mutex lock;
78 /*
79 * We reference this file while exporting dma-bufs, so
80 * the grant device context is not destroyed while there are
81 * external users alive.
82 */
83 struct file *filp;
84 };
85
86 /* DMA buffer export support. */
87
88 /* Implementation of wait for exported DMA buffer to be released. */
89
90 static void dmabuf_exp_release(struct kref *kref);
91
92 static struct gntdev_dmabuf_wait_obj *
dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv * priv,struct gntdev_dmabuf * gntdev_dmabuf)93 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
94 struct gntdev_dmabuf *gntdev_dmabuf)
95 {
96 struct gntdev_dmabuf_wait_obj *obj;
97
98 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
99 if (!obj)
100 return ERR_PTR(-ENOMEM);
101
102 init_completion(&obj->completion);
103 obj->gntdev_dmabuf = gntdev_dmabuf;
104
105 mutex_lock(&priv->lock);
106 list_add(&obj->next, &priv->exp_wait_list);
107 /* Put our reference and wait for gntdev_dmabuf's release to fire. */
108 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
109 mutex_unlock(&priv->lock);
110 return obj;
111 }
112
dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv * priv,struct gntdev_dmabuf_wait_obj * obj)113 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
114 struct gntdev_dmabuf_wait_obj *obj)
115 {
116 mutex_lock(&priv->lock);
117 list_del(&obj->next);
118 mutex_unlock(&priv->lock);
119 kfree(obj);
120 }
121
dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj * obj,u32 wait_to_ms)122 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
123 u32 wait_to_ms)
124 {
125 if (wait_for_completion_timeout(&obj->completion,
126 msecs_to_jiffies(wait_to_ms)) <= 0)
127 return -ETIMEDOUT;
128
129 return 0;
130 }
131
dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv * priv,struct gntdev_dmabuf * gntdev_dmabuf)132 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
133 struct gntdev_dmabuf *gntdev_dmabuf)
134 {
135 struct gntdev_dmabuf_wait_obj *obj;
136
137 list_for_each_entry(obj, &priv->exp_wait_list, next)
138 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
139 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
140 complete_all(&obj->completion);
141 break;
142 }
143 }
144
145 static struct gntdev_dmabuf *
dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv * priv,int fd)146 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
147 {
148 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
149
150 mutex_lock(&priv->lock);
151 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
152 if (gntdev_dmabuf->fd == fd) {
153 pr_debug("Found gntdev_dmabuf in the wait list\n");
154 kref_get(&gntdev_dmabuf->u.exp.refcount);
155 ret = gntdev_dmabuf;
156 break;
157 }
158 mutex_unlock(&priv->lock);
159 return ret;
160 }
161
dmabuf_exp_wait_released(struct gntdev_dmabuf_priv * priv,int fd,int wait_to_ms)162 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
163 int wait_to_ms)
164 {
165 struct gntdev_dmabuf *gntdev_dmabuf;
166 struct gntdev_dmabuf_wait_obj *obj;
167 int ret;
168
169 pr_debug("Will wait for dma-buf with fd %d\n", fd);
170 /*
171 * Try to find the DMA buffer: if not found means that
172 * either the buffer has already been released or file descriptor
173 * provided is wrong.
174 */
175 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
176 if (IS_ERR(gntdev_dmabuf))
177 return PTR_ERR(gntdev_dmabuf);
178
179 /*
180 * gntdev_dmabuf still exists and is reference count locked by us now,
181 * so prepare to wait: allocate wait object and add it to the wait list,
182 * so we can find it on release.
183 */
184 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
185 if (IS_ERR(obj))
186 return PTR_ERR(obj);
187
188 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
189 dmabuf_exp_wait_obj_free(priv, obj);
190 return ret;
191 }
192
193 /* DMA buffer export support. */
194
195 static struct sg_table *
dmabuf_pages_to_sgt(struct page ** pages,unsigned int nr_pages)196 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
197 {
198 struct sg_table *sgt;
199 int ret;
200
201 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
202 if (!sgt) {
203 ret = -ENOMEM;
204 goto out;
205 }
206
207 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
208 nr_pages << PAGE_SHIFT,
209 GFP_KERNEL);
210 if (ret)
211 goto out;
212
213 return sgt;
214
215 out:
216 kfree(sgt);
217 return ERR_PTR(ret);
218 }
219
dmabuf_exp_ops_attach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)220 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
221 struct dma_buf_attachment *attach)
222 {
223 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
224
225 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
226 GFP_KERNEL);
227 if (!gntdev_dmabuf_attach)
228 return -ENOMEM;
229
230 gntdev_dmabuf_attach->dir = DMA_NONE;
231 attach->priv = gntdev_dmabuf_attach;
232 return 0;
233 }
234
dmabuf_exp_ops_detach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)235 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
236 struct dma_buf_attachment *attach)
237 {
238 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
239
240 if (gntdev_dmabuf_attach) {
241 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
242
243 if (sgt) {
244 if (gntdev_dmabuf_attach->dir != DMA_NONE)
245 dma_unmap_sgtable(attach->dev, sgt,
246 gntdev_dmabuf_attach->dir,
247 DMA_ATTR_SKIP_CPU_SYNC);
248 sg_free_table(sgt);
249 }
250
251 kfree(sgt);
252 kfree(gntdev_dmabuf_attach);
253 attach->priv = NULL;
254 }
255 }
256
257 static struct sg_table *
dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)258 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
259 enum dma_data_direction dir)
260 {
261 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
262 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
263 struct sg_table *sgt;
264
265 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
266 attach->dev);
267
268 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
269 return ERR_PTR(-EINVAL);
270
271 /* Return the cached mapping when possible. */
272 if (gntdev_dmabuf_attach->dir == dir)
273 return gntdev_dmabuf_attach->sgt;
274
275 /*
276 * Two mappings with different directions for the same attachment are
277 * not allowed.
278 */
279 if (gntdev_dmabuf_attach->dir != DMA_NONE)
280 return ERR_PTR(-EBUSY);
281
282 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
283 gntdev_dmabuf->nr_pages);
284 if (!IS_ERR(sgt)) {
285 if (dma_map_sgtable(attach->dev, sgt, dir,
286 DMA_ATTR_SKIP_CPU_SYNC)) {
287 sg_free_table(sgt);
288 kfree(sgt);
289 sgt = ERR_PTR(-ENOMEM);
290 } else {
291 gntdev_dmabuf_attach->sgt = sgt;
292 gntdev_dmabuf_attach->dir = dir;
293 }
294 }
295 if (IS_ERR(sgt))
296 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
297 return sgt;
298 }
299
dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)300 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
301 struct sg_table *sgt,
302 enum dma_data_direction dir)
303 {
304 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
305 }
306
dmabuf_exp_release(struct kref * kref)307 static void dmabuf_exp_release(struct kref *kref)
308 {
309 struct gntdev_dmabuf *gntdev_dmabuf =
310 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
311
312 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
313 list_del(&gntdev_dmabuf->next);
314 fput(gntdev_dmabuf->priv->filp);
315 kfree(gntdev_dmabuf);
316 }
317
dmabuf_exp_remove_map(struct gntdev_priv * priv,struct gntdev_grant_map * map)318 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
319 struct gntdev_grant_map *map)
320 {
321 mutex_lock(&priv->lock);
322 list_del(&map->next);
323 gntdev_put_map(NULL /* already removed */, map);
324 mutex_unlock(&priv->lock);
325 }
326
dmabuf_exp_ops_release(struct dma_buf * dma_buf)327 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
328 {
329 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
330 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
331
332 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
333 gntdev_dmabuf->u.exp.map);
334 mutex_lock(&priv->lock);
335 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
336 mutex_unlock(&priv->lock);
337 }
338
339 static const struct dma_buf_ops dmabuf_exp_ops = {
340 .attach = dmabuf_exp_ops_attach,
341 .detach = dmabuf_exp_ops_detach,
342 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
343 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
344 .release = dmabuf_exp_ops_release,
345 };
346
347 struct gntdev_dmabuf_export_args {
348 struct gntdev_priv *priv;
349 struct gntdev_grant_map *map;
350 struct gntdev_dmabuf_priv *dmabuf_priv;
351 struct device *dev;
352 int count;
353 struct page **pages;
354 u32 fd;
355 };
356
dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args * args)357 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
358 {
359 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
360 struct gntdev_dmabuf *gntdev_dmabuf __free(kfree) = NULL;
361 CLASS(get_unused_fd, ret)(O_CLOEXEC);
362
363 if (ret < 0)
364 return ret;
365
366 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
367 if (!gntdev_dmabuf)
368 return -ENOMEM;
369
370 kref_init(&gntdev_dmabuf->u.exp.refcount);
371
372 gntdev_dmabuf->priv = args->dmabuf_priv;
373 gntdev_dmabuf->nr_pages = args->count;
374 gntdev_dmabuf->pages = args->pages;
375 gntdev_dmabuf->u.exp.priv = args->priv;
376 gntdev_dmabuf->u.exp.map = args->map;
377
378 exp_info.exp_name = KBUILD_MODNAME;
379 if (args->dev->driver && args->dev->driver->owner)
380 exp_info.owner = args->dev->driver->owner;
381 else
382 exp_info.owner = THIS_MODULE;
383 exp_info.ops = &dmabuf_exp_ops;
384 exp_info.size = args->count << PAGE_SHIFT;
385 exp_info.flags = O_RDWR;
386 exp_info.priv = gntdev_dmabuf;
387
388 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
389 if (IS_ERR(gntdev_dmabuf->dmabuf))
390 return PTR_ERR(gntdev_dmabuf->dmabuf);
391
392 gntdev_dmabuf->fd = ret;
393 args->fd = ret;
394
395 pr_debug("Exporting DMA buffer with fd %d\n", ret);
396
397 get_file(gntdev_dmabuf->priv->filp);
398 mutex_lock(&args->dmabuf_priv->lock);
399 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
400 mutex_unlock(&args->dmabuf_priv->lock);
401
402 fd_install(take_fd(ret), no_free_ptr(gntdev_dmabuf)->dmabuf->file);
403 return 0;
404 }
405
406 static struct gntdev_grant_map *
dmabuf_exp_alloc_backing_storage(struct gntdev_priv * priv,int dmabuf_flags,int count)407 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
408 int count)
409 {
410 struct gntdev_grant_map *map;
411
412 if (unlikely(gntdev_test_page_count(count)))
413 return ERR_PTR(-EINVAL);
414
415 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
416 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
417 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
418 return ERR_PTR(-EINVAL);
419 }
420
421 map = gntdev_alloc_map(priv, count, dmabuf_flags);
422 if (!map)
423 return ERR_PTR(-ENOMEM);
424
425 return map;
426 }
427
dmabuf_exp_from_refs(struct gntdev_priv * priv,int flags,int count,u32 domid,u32 * refs,u32 * fd)428 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
429 int count, u32 domid, u32 *refs, u32 *fd)
430 {
431 struct gntdev_grant_map *map;
432 struct gntdev_dmabuf_export_args args;
433 int i, ret;
434
435 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
436 if (IS_ERR(map))
437 return PTR_ERR(map);
438
439 for (i = 0; i < count; i++) {
440 map->grants[i].domid = domid;
441 map->grants[i].ref = refs[i];
442 }
443
444 mutex_lock(&priv->lock);
445 gntdev_add_map(priv, map);
446 mutex_unlock(&priv->lock);
447
448 map->flags |= GNTMAP_host_map;
449 #if defined(CONFIG_X86)
450 map->flags |= GNTMAP_device_map;
451 #endif
452
453 ret = gntdev_map_grant_pages(map);
454 if (ret < 0)
455 goto out;
456
457 args.priv = priv;
458 args.map = map;
459 args.dev = priv->dma_dev;
460 args.dmabuf_priv = priv->dmabuf_priv;
461 args.count = map->count;
462 args.pages = map->pages;
463 args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
464
465 ret = dmabuf_exp_from_pages(&args);
466 if (ret < 0)
467 goto out;
468
469 *fd = args.fd;
470 return 0;
471
472 out:
473 dmabuf_exp_remove_map(priv, map);
474 return ret;
475 }
476
477 /* DMA buffer import support. */
478
479 static int
dmabuf_imp_grant_foreign_access(unsigned long * gfns,u32 * refs,int count,int domid)480 dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
481 int count, int domid)
482 {
483 grant_ref_t priv_gref_head;
484 int i, ret;
485
486 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
487 if (ret < 0) {
488 pr_debug("Cannot allocate grant references, ret %d\n", ret);
489 return ret;
490 }
491
492 for (i = 0; i < count; i++) {
493 int cur_ref;
494
495 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
496 if (cur_ref < 0) {
497 ret = cur_ref;
498 pr_debug("Cannot claim grant reference, ret %d\n", ret);
499 goto out;
500 }
501
502 gnttab_grant_foreign_access_ref(cur_ref, domid,
503 gfns[i], 0);
504 refs[i] = cur_ref;
505 }
506
507 return 0;
508
509 out:
510 gnttab_free_grant_references(priv_gref_head);
511 return ret;
512 }
513
dmabuf_imp_end_foreign_access(u32 * refs,int count)514 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
515 {
516 int i;
517
518 for (i = 0; i < count; i++)
519 if (refs[i] != INVALID_GRANT_REF)
520 gnttab_end_foreign_access(refs[i], NULL);
521 }
522
dmabuf_imp_free_storage(struct gntdev_dmabuf * gntdev_dmabuf)523 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
524 {
525 kfree(gntdev_dmabuf->u.imp.refs);
526 kfree(gntdev_dmabuf);
527 }
528
dmabuf_imp_alloc_storage(int count)529 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
530 {
531 struct gntdev_dmabuf *gntdev_dmabuf;
532 int i;
533
534 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
535 if (!gntdev_dmabuf)
536 goto fail_no_free;
537
538 gntdev_dmabuf->u.imp.refs = kcalloc(count,
539 sizeof(gntdev_dmabuf->u.imp.refs[0]),
540 GFP_KERNEL);
541 if (!gntdev_dmabuf->u.imp.refs)
542 goto fail;
543
544 gntdev_dmabuf->nr_pages = count;
545
546 for (i = 0; i < count; i++)
547 gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF;
548
549 return gntdev_dmabuf;
550
551 fail:
552 dmabuf_imp_free_storage(gntdev_dmabuf);
553 fail_no_free:
554 return ERR_PTR(-ENOMEM);
555 }
556
557 static struct gntdev_dmabuf *
dmabuf_imp_to_refs(struct gntdev_dmabuf_priv * priv,struct device * dev,int fd,int count,int domid)558 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
559 int fd, int count, int domid)
560 {
561 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
562 struct dma_buf *dma_buf;
563 struct dma_buf_attachment *attach;
564 struct sg_table *sgt;
565 struct sg_dma_page_iter sg_iter;
566 unsigned long *gfns;
567 int i;
568
569 dma_buf = dma_buf_get(fd);
570 if (IS_ERR(dma_buf))
571 return ERR_CAST(dma_buf);
572
573 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
574 if (IS_ERR(gntdev_dmabuf)) {
575 ret = gntdev_dmabuf;
576 goto fail_put;
577 }
578
579 gntdev_dmabuf->priv = priv;
580 gntdev_dmabuf->fd = fd;
581
582 attach = dma_buf_attach(dma_buf, dev);
583 if (IS_ERR(attach)) {
584 ret = ERR_CAST(attach);
585 goto fail_free_obj;
586 }
587
588 gntdev_dmabuf->u.imp.attach = attach;
589
590 sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
591 if (IS_ERR(sgt)) {
592 ret = ERR_CAST(sgt);
593 goto fail_detach;
594 }
595
596 /* Check that we have zero offset. */
597 if (sgt->sgl->offset) {
598 ret = ERR_PTR(-EINVAL);
599 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
600 sgt->sgl->offset);
601 goto fail_unmap;
602 }
603
604 /* Check number of pages that imported buffer has. */
605 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
606 ret = ERR_PTR(-EINVAL);
607 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
608 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
609 goto fail_unmap;
610 }
611
612 gntdev_dmabuf->u.imp.sgt = sgt;
613
614 gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
615 if (!gfns) {
616 ret = ERR_PTR(-ENOMEM);
617 goto fail_unmap;
618 }
619
620 /*
621 * Now convert sgt to array of gfns without accessing underlying pages.
622 * It is not allowed to access the underlying struct page of an sg table
623 * exported by DMA-buf, but since we deal with special Xen dma device here
624 * (not a normal physical one) look at the dma addresses in the sg table
625 * and then calculate gfns directly from them.
626 */
627 i = 0;
628 for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
629 dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
630 unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
631
632 gfns[i++] = pfn_to_gfn(pfn);
633 }
634
635 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
636 gntdev_dmabuf->u.imp.refs,
637 count, domid));
638 kfree(gfns);
639 if (IS_ERR(ret))
640 goto fail_end_access;
641
642 pr_debug("Imported DMA buffer with fd %d\n", fd);
643
644 mutex_lock(&priv->lock);
645 list_add(&gntdev_dmabuf->next, &priv->imp_list);
646 mutex_unlock(&priv->lock);
647
648 return gntdev_dmabuf;
649
650 fail_end_access:
651 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
652 fail_unmap:
653 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
654 fail_detach:
655 dma_buf_detach(dma_buf, attach);
656 fail_free_obj:
657 dmabuf_imp_free_storage(gntdev_dmabuf);
658 fail_put:
659 dma_buf_put(dma_buf);
660 return ret;
661 }
662
663 /*
664 * Find the hyper dma-buf by its file descriptor and remove
665 * it from the buffer's list.
666 */
667 static struct gntdev_dmabuf *
dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv * priv,int fd)668 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
669 {
670 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
671
672 mutex_lock(&priv->lock);
673 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
674 if (gntdev_dmabuf->fd == fd) {
675 pr_debug("Found gntdev_dmabuf in the import list\n");
676 ret = gntdev_dmabuf;
677 list_del(&gntdev_dmabuf->next);
678 break;
679 }
680 }
681 mutex_unlock(&priv->lock);
682 return ret;
683 }
684
dmabuf_imp_release(struct gntdev_dmabuf_priv * priv,u32 fd)685 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
686 {
687 struct gntdev_dmabuf *gntdev_dmabuf;
688 struct dma_buf_attachment *attach;
689 struct dma_buf *dma_buf;
690
691 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
692 if (IS_ERR(gntdev_dmabuf))
693 return PTR_ERR(gntdev_dmabuf);
694
695 pr_debug("Releasing DMA buffer with fd %d\n", fd);
696
697 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
698 gntdev_dmabuf->nr_pages);
699
700 attach = gntdev_dmabuf->u.imp.attach;
701
702 if (gntdev_dmabuf->u.imp.sgt)
703 dma_buf_unmap_attachment_unlocked(attach, gntdev_dmabuf->u.imp.sgt,
704 DMA_BIDIRECTIONAL);
705 dma_buf = attach->dmabuf;
706 dma_buf_detach(attach->dmabuf, attach);
707 dma_buf_put(dma_buf);
708
709 dmabuf_imp_free_storage(gntdev_dmabuf);
710 return 0;
711 }
712
dmabuf_imp_release_all(struct gntdev_dmabuf_priv * priv)713 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
714 {
715 struct gntdev_dmabuf *q, *gntdev_dmabuf;
716
717 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
718 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
719 }
720
721 /* DMA buffer IOCTL support. */
722
gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv * priv,int use_ptemod,struct ioctl_gntdev_dmabuf_exp_from_refs __user * u)723 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
724 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
725 {
726 struct ioctl_gntdev_dmabuf_exp_from_refs op;
727 u32 *refs;
728 long ret;
729
730 if (use_ptemod) {
731 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
732 use_ptemod);
733 return -EINVAL;
734 }
735
736 if (copy_from_user(&op, u, sizeof(op)) != 0)
737 return -EFAULT;
738
739 if (unlikely(gntdev_test_page_count(op.count)))
740 return -EINVAL;
741
742 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
743 if (!refs)
744 return -ENOMEM;
745
746 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
747 ret = -EFAULT;
748 goto out;
749 }
750
751 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
752 op.domid, refs, &op.fd);
753 if (ret)
754 goto out;
755
756 if (copy_to_user(u, &op, sizeof(op)) != 0)
757 ret = -EFAULT;
758
759 out:
760 kfree(refs);
761 return ret;
762 }
763
gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv * priv,struct ioctl_gntdev_dmabuf_exp_wait_released __user * u)764 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
765 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
766 {
767 struct ioctl_gntdev_dmabuf_exp_wait_released op;
768
769 if (copy_from_user(&op, u, sizeof(op)) != 0)
770 return -EFAULT;
771
772 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
773 op.wait_to_ms);
774 }
775
gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv * priv,struct ioctl_gntdev_dmabuf_imp_to_refs __user * u)776 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
777 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
778 {
779 struct ioctl_gntdev_dmabuf_imp_to_refs op;
780 struct gntdev_dmabuf *gntdev_dmabuf;
781 long ret;
782
783 if (copy_from_user(&op, u, sizeof(op)) != 0)
784 return -EFAULT;
785
786 if (unlikely(gntdev_test_page_count(op.count)))
787 return -EINVAL;
788
789 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
790 priv->dma_dev, op.fd,
791 op.count, op.domid);
792 if (IS_ERR(gntdev_dmabuf))
793 return PTR_ERR(gntdev_dmabuf);
794
795 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
796 sizeof(*u->refs) * op.count) != 0) {
797 ret = -EFAULT;
798 goto out_release;
799 }
800 return 0;
801
802 out_release:
803 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
804 return ret;
805 }
806
gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv * priv,struct ioctl_gntdev_dmabuf_imp_release __user * u)807 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
808 struct ioctl_gntdev_dmabuf_imp_release __user *u)
809 {
810 struct ioctl_gntdev_dmabuf_imp_release op;
811
812 if (copy_from_user(&op, u, sizeof(op)) != 0)
813 return -EFAULT;
814
815 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
816 }
817
gntdev_dmabuf_init(struct file * filp)818 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
819 {
820 struct gntdev_dmabuf_priv *priv;
821
822 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
823 if (!priv)
824 return ERR_PTR(-ENOMEM);
825
826 mutex_init(&priv->lock);
827 INIT_LIST_HEAD(&priv->exp_list);
828 INIT_LIST_HEAD(&priv->exp_wait_list);
829 INIT_LIST_HEAD(&priv->imp_list);
830
831 priv->filp = filp;
832
833 return priv;
834 }
835
gntdev_dmabuf_fini(struct gntdev_dmabuf_priv * priv)836 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
837 {
838 dmabuf_imp_release_all(priv);
839 kfree(priv);
840 }
841