xref: /linux/drivers/xen/gntdev.c (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 /******************************************************************************
2  * gntdev.c
3  *
4  * Device for accessing (in user-space) pages that have been granted by other
5  * domains.
6  *
7  * Copyright (c) 2006-2007, D G Murray.
8  *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
9  *           (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20 
21 #undef DEBUG
22 
23 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
24 
25 #include <linux/dma-mapping.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/miscdevice.h>
30 #include <linux/fs.h>
31 #include <linux/uaccess.h>
32 #include <linux/sched.h>
33 #include <linux/sched/mm.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/highmem.h>
37 #include <linux/refcount.h>
38 #include <linux/workqueue.h>
39 
40 #include <xen/xen.h>
41 #include <xen/grant_table.h>
42 #include <xen/balloon.h>
43 #include <xen/gntdev.h>
44 #include <xen/events.h>
45 #include <xen/page.h>
46 #include <asm/xen/hypervisor.h>
47 #include <asm/xen/hypercall.h>
48 
49 #include "gntdev-common.h"
50 #ifdef CONFIG_XEN_GNTDEV_DMABUF
51 #include "gntdev-dmabuf.h"
52 #endif
53 
54 MODULE_LICENSE("GPL");
55 MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
56 	      "Gerd Hoffmann <kraxel@redhat.com>");
57 MODULE_DESCRIPTION("User-space granted page access driver");
58 
59 #define GNTDEV_COPY_BATCH 16
60 
61 struct gntdev_copy_batch {
62 	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
63 	struct page *pages[GNTDEV_COPY_BATCH];
64 	s16 __user *status[GNTDEV_COPY_BATCH];
65 	unsigned int nr_ops;
66 	unsigned int nr_pages;
67 	bool writeable;
68 	struct gntdev_copy_batch *next;
69 };
70 
71 static unsigned int limit = 64*1024;
72 module_param(limit, uint, 0644);
73 MODULE_PARM_DESC(limit,
74 	"Maximum number of grants that may be mapped by one mapping request");
75 
76 static void unmap_grant_pages(struct gntdev_grant_map *map,
77 			      int offset, int pages);
78 
79 static struct miscdevice gntdev_miscdev;
80 
81 /* ------------------------------------------------------------------ */
82 
gntdev_test_page_count(unsigned int count)83 bool gntdev_test_page_count(unsigned int count)
84 {
85 	return !count || count > limit;
86 }
87 
gntdev_print_maps(struct gntdev_priv * priv,char * text,int text_index)88 static void gntdev_print_maps(struct gntdev_priv *priv,
89 			      char *text, int text_index)
90 {
91 #ifdef DEBUG
92 	struct gntdev_grant_map *map;
93 
94 	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
95 	list_for_each_entry(map, &priv->maps, next)
96 		pr_debug("  index %2d, count %2d %s\n",
97 		       map->index, map->count,
98 		       map->index == text_index && text ? text : "");
99 #endif
100 }
101 
gntdev_free_map(struct gntdev_grant_map * map)102 static void gntdev_free_map(struct gntdev_grant_map *map)
103 {
104 	if (map == NULL)
105 		return;
106 
107 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
108 	if (map->dma_vaddr) {
109 		struct gnttab_dma_alloc_args args;
110 
111 		args.dev = map->dma_dev;
112 		args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
113 		args.nr_pages = map->count;
114 		args.pages = map->pages;
115 		args.frames = map->frames;
116 		args.vaddr = map->dma_vaddr;
117 		args.dev_bus_addr = map->dma_bus_addr;
118 
119 		gnttab_dma_free_pages(&args);
120 	} else
121 #endif
122 	if (map->pages)
123 		gnttab_free_pages(map->count, map->pages);
124 
125 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
126 	kvfree(map->frames);
127 #endif
128 	kvfree(map->pages);
129 	kvfree(map->grants);
130 	kvfree(map->map_ops);
131 	kvfree(map->unmap_ops);
132 	kvfree(map->kmap_ops);
133 	kvfree(map->kunmap_ops);
134 	kvfree(map->being_removed);
135 	kfree(map);
136 }
137 
gntdev_alloc_map(struct gntdev_priv * priv,int count,int dma_flags)138 struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
139 					  int dma_flags)
140 {
141 	struct gntdev_grant_map *add;
142 	int i;
143 
144 	add = kzalloc(sizeof(*add), GFP_KERNEL);
145 	if (NULL == add)
146 		return NULL;
147 
148 	add->grants    = kvmalloc_array(count, sizeof(add->grants[0]),
149 					GFP_KERNEL);
150 	add->map_ops   = kvmalloc_array(count, sizeof(add->map_ops[0]),
151 					GFP_KERNEL);
152 	add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
153 					GFP_KERNEL);
154 	add->pages     = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
155 	add->being_removed =
156 		kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
157 	if (NULL == add->grants    ||
158 	    NULL == add->map_ops   ||
159 	    NULL == add->unmap_ops ||
160 	    NULL == add->pages     ||
161 	    NULL == add->being_removed)
162 		goto err;
163 	if (xen_pv_domain()) {
164 		add->kmap_ops   = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
165 						 GFP_KERNEL);
166 		add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
167 						 GFP_KERNEL);
168 		if (NULL == add->kmap_ops || NULL == add->kunmap_ops)
169 			goto err;
170 	}
171 
172 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
173 	add->dma_flags = dma_flags;
174 
175 	/*
176 	 * Check if this mapping is requested to be backed
177 	 * by a DMA buffer.
178 	 */
179 	if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
180 		struct gnttab_dma_alloc_args args;
181 
182 		add->frames = kvcalloc(count, sizeof(add->frames[0]),
183 				       GFP_KERNEL);
184 		if (!add->frames)
185 			goto err;
186 
187 		/* Remember the device, so we can free DMA memory. */
188 		add->dma_dev = priv->dma_dev;
189 
190 		args.dev = priv->dma_dev;
191 		args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
192 		args.nr_pages = count;
193 		args.pages = add->pages;
194 		args.frames = add->frames;
195 
196 		if (gnttab_dma_alloc_pages(&args))
197 			goto err;
198 
199 		add->dma_vaddr = args.vaddr;
200 		add->dma_bus_addr = args.dev_bus_addr;
201 	} else
202 #endif
203 	if (gnttab_alloc_pages(count, add->pages))
204 		goto err;
205 
206 	for (i = 0; i < count; i++) {
207 		add->grants[i].domid = DOMID_INVALID;
208 		add->grants[i].ref = INVALID_GRANT_REF;
209 		add->map_ops[i].handle = INVALID_GRANT_HANDLE;
210 		add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
211 		if (xen_pv_domain()) {
212 			add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
213 			add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
214 		}
215 	}
216 
217 	add->index = 0;
218 	add->count = count;
219 	refcount_set(&add->users, 1);
220 
221 	return add;
222 
223 err:
224 	gntdev_free_map(add);
225 	return NULL;
226 }
227 
gntdev_add_map(struct gntdev_priv * priv,struct gntdev_grant_map * add)228 void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
229 {
230 	struct gntdev_grant_map *map;
231 
232 	list_for_each_entry(map, &priv->maps, next) {
233 		if (add->index + add->count < map->index) {
234 			list_add_tail(&add->next, &map->next);
235 			goto done;
236 		}
237 		add->index = map->index + map->count;
238 	}
239 	list_add_tail(&add->next, &priv->maps);
240 
241 done:
242 	gntdev_print_maps(priv, "[new]", add->index);
243 }
244 
gntdev_find_map_index(struct gntdev_priv * priv,int index,int count)245 static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
246 						      int index, int count)
247 {
248 	struct gntdev_grant_map *map;
249 
250 	list_for_each_entry(map, &priv->maps, next) {
251 		if (map->index != index)
252 			continue;
253 		if (count && map->count != count)
254 			continue;
255 		return map;
256 	}
257 	return NULL;
258 }
259 
gntdev_put_map(struct gntdev_priv * priv,struct gntdev_grant_map * map)260 void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
261 {
262 	if (!map)
263 		return;
264 
265 	if (!refcount_dec_and_test(&map->users))
266 		return;
267 
268 	if (map->pages && !xen_pv_domain()) {
269 		/*
270 		 * Increment the reference count.  This ensures that the
271 		 * subsequent call to unmap_grant_pages() will not wind up
272 		 * re-entering itself.  It *can* wind up calling
273 		 * gntdev_put_map() recursively, but such calls will be with a
274 		 * reference count greater than 1, so they will return before
275 		 * this code is reached.  The recursion depth is thus limited to
276 		 * 1.  Do NOT use refcount_inc() here, as it will detect that
277 		 * the reference count is zero and WARN().
278 		 */
279 		refcount_set(&map->users, 1);
280 
281 		/*
282 		 * Unmap the grants.  This may or may not be asynchronous, so it
283 		 * is possible that the reference count is 1 on return, but it
284 		 * could also be greater than 1.
285 		 */
286 		unmap_grant_pages(map, 0, map->count);
287 
288 		/* Check if the memory now needs to be freed */
289 		if (!refcount_dec_and_test(&map->users))
290 			return;
291 
292 		/*
293 		 * All pages have been returned to the hypervisor, so free the
294 		 * map.
295 		 */
296 	}
297 
298 	if (xen_pv_domain() && map->notifier_init)
299 		mmu_interval_notifier_remove(&map->notifier);
300 
301 	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
302 		notify_remote_via_evtchn(map->notify.event);
303 		evtchn_put(map->notify.event);
304 	}
305 	gntdev_free_map(map);
306 }
307 
308 /* ------------------------------------------------------------------ */
309 
find_grant_ptes(pte_t * pte,unsigned long addr,void * data)310 static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
311 {
312 	struct gntdev_grant_map *map = data;
313 	unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
314 	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
315 		    (1 << _GNTMAP_guest_avail0);
316 	u64 pte_maddr;
317 
318 	BUG_ON(pgnr >= map->count);
319 	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
320 
321 	/* Note: this will perform a pte_mkspecial() through the hypercall. */
322 	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
323 			  map->grants[pgnr].ref,
324 			  map->grants[pgnr].domid);
325 	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
326 			    INVALID_GRANT_HANDLE);
327 	return 0;
328 }
329 
gntdev_map_grant_pages(struct gntdev_grant_map * map)330 int gntdev_map_grant_pages(struct gntdev_grant_map *map)
331 {
332 	size_t alloced = 0;
333 	int i, err = 0;
334 
335 	if (!xen_pv_domain()) {
336 		/* Note: it could already be mapped */
337 		if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
338 			return 0;
339 		for (i = 0; i < map->count; i++) {
340 			unsigned long addr = (unsigned long)
341 				pfn_to_kaddr(page_to_pfn(map->pages[i]));
342 			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
343 				map->grants[i].ref,
344 				map->grants[i].domid);
345 			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
346 				map->flags, INVALID_GRANT_HANDLE);
347 		}
348 	} else {
349 		/*
350 		 * Setup the map_ops corresponding to the pte entries pointing
351 		 * to the kernel linear addresses of the struct pages.
352 		 * These ptes are completely different from the user ptes dealt
353 		 * with find_grant_ptes.
354 		 * Note that GNTMAP_device_map isn't needed here: The
355 		 * dev_bus_addr output field gets consumed only from ->map_ops,
356 		 * and by not requesting it when mapping we also avoid needing
357 		 * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
358 		 * reference to the page in the hypervisor).
359 		 */
360 		unsigned int flags = (map->flags & ~GNTMAP_device_map) |
361 				     GNTMAP_host_map;
362 
363 		for (i = 0; i < map->count; i++) {
364 			unsigned long address = (unsigned long)
365 				pfn_to_kaddr(page_to_pfn(map->pages[i]));
366 			BUG_ON(PageHighMem(map->pages[i]));
367 
368 			gnttab_set_map_op(&map->kmap_ops[i], address, flags,
369 				map->grants[i].ref,
370 				map->grants[i].domid);
371 			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
372 				flags, INVALID_GRANT_HANDLE);
373 		}
374 	}
375 
376 	pr_debug("map %d+%d\n", map->index, map->count);
377 	err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages,
378 			map->count);
379 
380 	for (i = 0; i < map->count; i++) {
381 		if (map->map_ops[i].status == GNTST_okay) {
382 			map->unmap_ops[i].handle = map->map_ops[i].handle;
383 			alloced++;
384 		} else if (!err)
385 			err = -EINVAL;
386 
387 		if (map->flags & GNTMAP_device_map)
388 			map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
389 
390 		if (xen_pv_domain()) {
391 			if (map->kmap_ops[i].status == GNTST_okay) {
392 				alloced++;
393 				map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
394 			} else if (!err)
395 				err = -EINVAL;
396 		}
397 	}
398 	atomic_add(alloced, &map->live_grants);
399 	return err;
400 }
401 
__unmap_grant_pages_done(int result,struct gntab_unmap_queue_data * data)402 static void __unmap_grant_pages_done(int result,
403 		struct gntab_unmap_queue_data *data)
404 {
405 	unsigned int i;
406 	struct gntdev_grant_map *map = data->data;
407 	unsigned int offset = data->unmap_ops - map->unmap_ops;
408 	int successful_unmaps = 0;
409 	int live_grants;
410 
411 	for (i = 0; i < data->count; i++) {
412 		if (map->unmap_ops[offset + i].status == GNTST_okay &&
413 		    map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
414 			successful_unmaps++;
415 
416 		WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
417 			map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
418 		pr_debug("unmap handle=%d st=%d\n",
419 			map->unmap_ops[offset+i].handle,
420 			map->unmap_ops[offset+i].status);
421 		map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
422 		if (xen_pv_domain()) {
423 			if (map->kunmap_ops[offset + i].status == GNTST_okay &&
424 			    map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
425 				successful_unmaps++;
426 
427 			WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
428 				map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
429 			pr_debug("kunmap handle=%u st=%d\n",
430 				 map->kunmap_ops[offset+i].handle,
431 				 map->kunmap_ops[offset+i].status);
432 			map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
433 		}
434 	}
435 
436 	/*
437 	 * Decrease the live-grant counter.  This must happen after the loop to
438 	 * prevent premature reuse of the grants by gnttab_mmap().
439 	 */
440 	live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
441 	if (WARN_ON(live_grants < 0))
442 		pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
443 		       __func__, live_grants, successful_unmaps);
444 
445 	/* Release reference taken by __unmap_grant_pages */
446 	gntdev_put_map(NULL, map);
447 }
448 
__unmap_grant_pages(struct gntdev_grant_map * map,int offset,int pages)449 static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
450 			       int pages)
451 {
452 	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
453 		int pgno = (map->notify.addr >> PAGE_SHIFT);
454 
455 		if (pgno >= offset && pgno < offset + pages) {
456 			/* No need for kmap, pages are in lowmem */
457 			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
458 
459 			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
460 			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
461 		}
462 	}
463 
464 	map->unmap_data.unmap_ops = map->unmap_ops + offset;
465 	map->unmap_data.kunmap_ops = xen_pv_domain() ? map->kunmap_ops + offset : NULL;
466 	map->unmap_data.pages = map->pages + offset;
467 	map->unmap_data.count = pages;
468 	map->unmap_data.done = __unmap_grant_pages_done;
469 	map->unmap_data.data = map;
470 	refcount_inc(&map->users); /* to keep map alive during async call below */
471 
472 	gnttab_unmap_refs_async(&map->unmap_data);
473 }
474 
unmap_grant_pages(struct gntdev_grant_map * map,int offset,int pages)475 static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
476 			      int pages)
477 {
478 	int range;
479 
480 	if (atomic_read(&map->live_grants) == 0)
481 		return; /* Nothing to do */
482 
483 	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
484 
485 	/* It is possible the requested range will have a "hole" where we
486 	 * already unmapped some of the grants. Only unmap valid ranges.
487 	 */
488 	while (pages) {
489 		while (pages && map->being_removed[offset]) {
490 			offset++;
491 			pages--;
492 		}
493 		range = 0;
494 		while (range < pages) {
495 			if (map->being_removed[offset + range])
496 				break;
497 			map->being_removed[offset + range] = true;
498 			range++;
499 		}
500 		if (range)
501 			__unmap_grant_pages(map, offset, range);
502 		offset += range;
503 		pages -= range;
504 	}
505 }
506 
507 /* ------------------------------------------------------------------ */
508 
gntdev_vma_open(struct vm_area_struct * vma)509 static void gntdev_vma_open(struct vm_area_struct *vma)
510 {
511 	struct gntdev_grant_map *map = vma->vm_private_data;
512 
513 	pr_debug("gntdev_vma_open %p\n", vma);
514 	refcount_inc(&map->users);
515 }
516 
gntdev_vma_close(struct vm_area_struct * vma)517 static void gntdev_vma_close(struct vm_area_struct *vma)
518 {
519 	struct gntdev_grant_map *map = vma->vm_private_data;
520 	struct file *file = vma->vm_file;
521 	struct gntdev_priv *priv = file->private_data;
522 
523 	pr_debug("gntdev_vma_close %p\n", vma);
524 
525 	vma->vm_private_data = NULL;
526 	gntdev_put_map(priv, map);
527 }
528 
gntdev_vma_find_normal_page(struct vm_area_struct * vma,unsigned long addr)529 static struct page *gntdev_vma_find_normal_page(struct vm_area_struct *vma,
530 						 unsigned long addr)
531 {
532 	struct gntdev_grant_map *map = vma->vm_private_data;
533 
534 	return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
535 }
536 
537 static const struct vm_operations_struct gntdev_vmops = {
538 	.open = gntdev_vma_open,
539 	.close = gntdev_vma_close,
540 	.find_normal_page = gntdev_vma_find_normal_page,
541 };
542 
543 /* ------------------------------------------------------------------ */
544 
gntdev_invalidate(struct mmu_interval_notifier * mn,const struct mmu_notifier_range * range,unsigned long cur_seq)545 static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
546 			      const struct mmu_notifier_range *range,
547 			      unsigned long cur_seq)
548 {
549 	struct gntdev_grant_map *map =
550 		container_of(mn, struct gntdev_grant_map, notifier);
551 	unsigned long mstart, mend;
552 	unsigned long map_start, map_end;
553 
554 	if (!mmu_notifier_range_blockable(range))
555 		return false;
556 
557 	map_start = map->pages_vm_start;
558 	map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
559 
560 	/*
561 	 * If the VMA is split or otherwise changed the notifier is not
562 	 * updated, but we don't want to process VA's outside the modified
563 	 * VMA. FIXME: It would be much more understandable to just prevent
564 	 * modifying the VMA in the first place.
565 	 */
566 	if (map_start >= range->end || map_end <= range->start)
567 		return true;
568 
569 	mstart = max(range->start, map_start);
570 	mend = min(range->end, map_end);
571 	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
572 		 map->index, map->count, map_start, map_end,
573 		 range->start, range->end, mstart, mend);
574 	unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
575 			  (mend - mstart) >> PAGE_SHIFT);
576 
577 	return true;
578 }
579 
580 static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
581 	.invalidate = gntdev_invalidate,
582 };
583 
584 /* ------------------------------------------------------------------ */
585 
gntdev_open(struct inode * inode,struct file * flip)586 static int gntdev_open(struct inode *inode, struct file *flip)
587 {
588 	struct gntdev_priv *priv;
589 
590 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
591 	if (!priv)
592 		return -ENOMEM;
593 
594 	INIT_LIST_HEAD(&priv->maps);
595 	mutex_init(&priv->lock);
596 
597 	mutex_init(&priv->batch_lock);
598 
599 #ifdef CONFIG_XEN_GNTDEV_DMABUF
600 	priv->dmabuf_priv = gntdev_dmabuf_init(flip);
601 	if (IS_ERR(priv->dmabuf_priv)) {
602 		int ret = PTR_ERR(priv->dmabuf_priv);
603 
604 		kfree(priv);
605 		return ret;
606 	}
607 #endif
608 
609 	flip->private_data = priv;
610 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
611 	priv->dma_dev = gntdev_miscdev.this_device;
612 	dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
613 #endif
614 	pr_debug("priv %p\n", priv);
615 
616 	return 0;
617 }
618 
gntdev_release(struct inode * inode,struct file * flip)619 static int gntdev_release(struct inode *inode, struct file *flip)
620 {
621 	struct gntdev_priv *priv = flip->private_data;
622 	struct gntdev_grant_map *map;
623 	struct gntdev_copy_batch *batch;
624 
625 	pr_debug("priv %p\n", priv);
626 
627 	mutex_lock(&priv->lock);
628 	while (!list_empty(&priv->maps)) {
629 		map = list_entry(priv->maps.next,
630 				 struct gntdev_grant_map, next);
631 		list_del(&map->next);
632 		gntdev_put_map(NULL /* already removed */, map);
633 	}
634 	mutex_unlock(&priv->lock);
635 
636 	mutex_lock(&priv->batch_lock);
637 	while (priv->batch) {
638 		batch = priv->batch;
639 		priv->batch = batch->next;
640 		kfree(batch);
641 	}
642 	mutex_unlock(&priv->batch_lock);
643 
644 #ifdef CONFIG_XEN_GNTDEV_DMABUF
645 	gntdev_dmabuf_fini(priv->dmabuf_priv);
646 #endif
647 
648 	kfree(priv);
649 	return 0;
650 }
651 
gntdev_ioctl_map_grant_ref(struct gntdev_priv * priv,struct ioctl_gntdev_map_grant_ref __user * u)652 static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
653 				       struct ioctl_gntdev_map_grant_ref __user *u)
654 {
655 	struct ioctl_gntdev_map_grant_ref op;
656 	struct gntdev_grant_map *map;
657 	int err;
658 
659 	if (copy_from_user(&op, u, sizeof(op)) != 0)
660 		return -EFAULT;
661 	pr_debug("priv %p, add %d\n", priv, op.count);
662 	if (unlikely(gntdev_test_page_count(op.count)))
663 		return -EINVAL;
664 
665 	err = -ENOMEM;
666 	map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
667 	if (!map)
668 		return err;
669 
670 	if (copy_from_user(map->grants, &u->refs,
671 			   sizeof(map->grants[0]) * op.count) != 0) {
672 		gntdev_put_map(NULL, map);
673 		return -EFAULT;
674 	}
675 
676 	mutex_lock(&priv->lock);
677 	gntdev_add_map(priv, map);
678 	op.index = map->index << PAGE_SHIFT;
679 	mutex_unlock(&priv->lock);
680 
681 	if (copy_to_user(u, &op, sizeof(op)) != 0)
682 		return -EFAULT;
683 
684 	return 0;
685 }
686 
gntdev_ioctl_unmap_grant_ref(struct gntdev_priv * priv,struct ioctl_gntdev_unmap_grant_ref __user * u)687 static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
688 					 struct ioctl_gntdev_unmap_grant_ref __user *u)
689 {
690 	struct ioctl_gntdev_unmap_grant_ref op;
691 	struct gntdev_grant_map *map;
692 	int err = -ENOENT;
693 
694 	if (copy_from_user(&op, u, sizeof(op)) != 0)
695 		return -EFAULT;
696 	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
697 
698 	mutex_lock(&priv->lock);
699 	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
700 	if (map) {
701 		list_del(&map->next);
702 		err = 0;
703 	}
704 	mutex_unlock(&priv->lock);
705 	if (map)
706 		gntdev_put_map(priv, map);
707 	return err;
708 }
709 
gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv * priv,struct ioctl_gntdev_get_offset_for_vaddr __user * u)710 static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
711 					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
712 {
713 	struct ioctl_gntdev_get_offset_for_vaddr op;
714 	struct vm_area_struct *vma;
715 	struct gntdev_grant_map *map;
716 	int rv = -EINVAL;
717 
718 	if (copy_from_user(&op, u, sizeof(op)) != 0)
719 		return -EFAULT;
720 	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
721 
722 	mmap_read_lock(current->mm);
723 	vma = find_vma(current->mm, op.vaddr);
724 	if (!vma || vma->vm_ops != &gntdev_vmops)
725 		goto out_unlock;
726 
727 	map = vma->vm_private_data;
728 	if (!map)
729 		goto out_unlock;
730 
731 	op.offset = map->index << PAGE_SHIFT;
732 	op.count = map->count;
733 	rv = 0;
734 
735  out_unlock:
736 	mmap_read_unlock(current->mm);
737 
738 	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
739 		return -EFAULT;
740 	return rv;
741 }
742 
gntdev_ioctl_notify(struct gntdev_priv * priv,void __user * u)743 static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
744 {
745 	struct ioctl_gntdev_unmap_notify op;
746 	struct gntdev_grant_map *map;
747 	int rc;
748 	int out_flags;
749 	evtchn_port_t out_event;
750 
751 	if (copy_from_user(&op, u, sizeof(op)))
752 		return -EFAULT;
753 
754 	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
755 		return -EINVAL;
756 
757 	/* We need to grab a reference to the event channel we are going to use
758 	 * to send the notify before releasing the reference we may already have
759 	 * (if someone has called this ioctl twice). This is required so that
760 	 * it is possible to change the clear_byte part of the notification
761 	 * without disturbing the event channel part, which may now be the last
762 	 * reference to that event channel.
763 	 */
764 	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
765 		if (evtchn_get(op.event_channel_port))
766 			return -EINVAL;
767 	}
768 
769 	out_flags = op.action;
770 	out_event = op.event_channel_port;
771 
772 	mutex_lock(&priv->lock);
773 
774 	list_for_each_entry(map, &priv->maps, next) {
775 		uint64_t begin = map->index << PAGE_SHIFT;
776 		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
777 		if (op.index >= begin && op.index < end)
778 			goto found;
779 	}
780 	rc = -ENOENT;
781 	goto unlock_out;
782 
783  found:
784 	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
785 			(map->flags & GNTMAP_readonly)) {
786 		rc = -EINVAL;
787 		goto unlock_out;
788 	}
789 
790 	out_flags = map->notify.flags;
791 	out_event = map->notify.event;
792 
793 	map->notify.flags = op.action;
794 	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
795 	map->notify.event = op.event_channel_port;
796 
797 	rc = 0;
798 
799  unlock_out:
800 	mutex_unlock(&priv->lock);
801 
802 	/* Drop the reference to the event channel we did not save in the map */
803 	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
804 		evtchn_put(out_event);
805 
806 	return rc;
807 }
808 
gntdev_get_page(struct gntdev_copy_batch * batch,void __user * virt,unsigned long * gfn)809 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
810 				unsigned long *gfn)
811 {
812 	unsigned long addr = (unsigned long)virt;
813 	struct page *page;
814 	unsigned long xen_pfn;
815 	int ret;
816 
817 	ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page);
818 	if (ret < 0)
819 		return ret;
820 
821 	batch->pages[batch->nr_pages++] = page;
822 
823 	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
824 	*gfn = pfn_to_gfn(xen_pfn);
825 
826 	return 0;
827 }
828 
gntdev_put_pages(struct gntdev_copy_batch * batch)829 static void gntdev_put_pages(struct gntdev_copy_batch *batch)
830 {
831 	unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable);
832 	batch->nr_pages = 0;
833 	batch->writeable = false;
834 }
835 
gntdev_copy(struct gntdev_copy_batch * batch)836 static int gntdev_copy(struct gntdev_copy_batch *batch)
837 {
838 	unsigned int i;
839 
840 	gnttab_batch_copy(batch->ops, batch->nr_ops);
841 	gntdev_put_pages(batch);
842 
843 	/*
844 	 * For each completed op, update the status if the op failed
845 	 * and all previous ops for the segment were successful.
846 	 */
847 	for (i = 0; i < batch->nr_ops; i++) {
848 		s16 status = batch->ops[i].status;
849 		s16 old_status;
850 
851 		if (status == GNTST_okay)
852 			continue;
853 
854 		if (__get_user(old_status, batch->status[i]))
855 			return -EFAULT;
856 
857 		if (old_status != GNTST_okay)
858 			continue;
859 
860 		if (__put_user(status, batch->status[i]))
861 			return -EFAULT;
862 	}
863 
864 	batch->nr_ops = 0;
865 	return 0;
866 }
867 
gntdev_grant_copy_seg(struct gntdev_copy_batch * batch,struct gntdev_grant_copy_segment * seg,s16 __user * status)868 static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
869 				 struct gntdev_grant_copy_segment *seg,
870 				 s16 __user *status)
871 {
872 	uint16_t copied = 0;
873 
874 	/*
875 	 * Disallow local -> local copies since there is only space in
876 	 * batch->pages for one page per-op and this would be a very
877 	 * expensive memcpy().
878 	 */
879 	if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
880 		return -EINVAL;
881 
882 	/* Can't cross page if source/dest is a grant ref. */
883 	if (seg->flags & GNTCOPY_source_gref) {
884 		if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
885 			return -EINVAL;
886 	}
887 	if (seg->flags & GNTCOPY_dest_gref) {
888 		if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
889 			return -EINVAL;
890 	}
891 
892 	if (put_user(GNTST_okay, status))
893 		return -EFAULT;
894 
895 	while (copied < seg->len) {
896 		struct gnttab_copy *op;
897 		void __user *virt;
898 		size_t len, off;
899 		unsigned long gfn;
900 		int ret;
901 
902 		if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
903 			ret = gntdev_copy(batch);
904 			if (ret < 0)
905 				return ret;
906 		}
907 
908 		len = seg->len - copied;
909 
910 		op = &batch->ops[batch->nr_ops];
911 		op->flags = 0;
912 
913 		if (seg->flags & GNTCOPY_source_gref) {
914 			op->source.u.ref = seg->source.foreign.ref;
915 			op->source.domid = seg->source.foreign.domid;
916 			op->source.offset = seg->source.foreign.offset + copied;
917 			op->flags |= GNTCOPY_source_gref;
918 		} else {
919 			virt = seg->source.virt + copied;
920 			off = (unsigned long)virt & ~XEN_PAGE_MASK;
921 			len = min(len, (size_t)XEN_PAGE_SIZE - off);
922 			batch->writeable = false;
923 
924 			ret = gntdev_get_page(batch, virt, &gfn);
925 			if (ret < 0)
926 				return ret;
927 
928 			op->source.u.gmfn = gfn;
929 			op->source.domid = DOMID_SELF;
930 			op->source.offset = off;
931 		}
932 
933 		if (seg->flags & GNTCOPY_dest_gref) {
934 			op->dest.u.ref = seg->dest.foreign.ref;
935 			op->dest.domid = seg->dest.foreign.domid;
936 			op->dest.offset = seg->dest.foreign.offset + copied;
937 			op->flags |= GNTCOPY_dest_gref;
938 		} else {
939 			virt = seg->dest.virt + copied;
940 			off = (unsigned long)virt & ~XEN_PAGE_MASK;
941 			len = min(len, (size_t)XEN_PAGE_SIZE - off);
942 			batch->writeable = true;
943 
944 			ret = gntdev_get_page(batch, virt, &gfn);
945 			if (ret < 0)
946 				return ret;
947 
948 			op->dest.u.gmfn = gfn;
949 			op->dest.domid = DOMID_SELF;
950 			op->dest.offset = off;
951 		}
952 
953 		op->len = len;
954 		copied += len;
955 
956 		batch->status[batch->nr_ops] = status;
957 		batch->nr_ops++;
958 	}
959 
960 	return 0;
961 }
962 
gntdev_ioctl_grant_copy(struct gntdev_priv * priv,void __user * u)963 static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
964 {
965 	struct ioctl_gntdev_grant_copy copy;
966 	struct gntdev_copy_batch *batch;
967 	unsigned int i;
968 	int ret = 0;
969 
970 	if (copy_from_user(&copy, u, sizeof(copy)))
971 		return -EFAULT;
972 
973 	mutex_lock(&priv->batch_lock);
974 	if (!priv->batch) {
975 		batch = kmalloc(sizeof(*batch), GFP_KERNEL);
976 	} else {
977 		batch = priv->batch;
978 		priv->batch = batch->next;
979 	}
980 	mutex_unlock(&priv->batch_lock);
981 	if (!batch)
982 		return -ENOMEM;
983 
984 	batch->nr_ops = 0;
985 	batch->nr_pages = 0;
986 
987 	for (i = 0; i < copy.count; i++) {
988 		struct gntdev_grant_copy_segment seg;
989 
990 		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
991 			ret = -EFAULT;
992 			gntdev_put_pages(batch);
993 			goto out;
994 		}
995 
996 		ret = gntdev_grant_copy_seg(batch, &seg, &copy.segments[i].status);
997 		if (ret < 0) {
998 			gntdev_put_pages(batch);
999 			goto out;
1000 		}
1001 
1002 		cond_resched();
1003 	}
1004 	if (batch->nr_ops)
1005 		ret = gntdev_copy(batch);
1006 
1007  out:
1008 	mutex_lock(&priv->batch_lock);
1009 	batch->next = priv->batch;
1010 	priv->batch = batch;
1011 	mutex_unlock(&priv->batch_lock);
1012 
1013 	return ret;
1014 }
1015 
gntdev_ioctl(struct file * flip,unsigned int cmd,unsigned long arg)1016 static long gntdev_ioctl(struct file *flip,
1017 			 unsigned int cmd, unsigned long arg)
1018 {
1019 	struct gntdev_priv *priv = flip->private_data;
1020 	void __user *ptr = (void __user *)arg;
1021 
1022 	switch (cmd) {
1023 	case IOCTL_GNTDEV_MAP_GRANT_REF:
1024 		return gntdev_ioctl_map_grant_ref(priv, ptr);
1025 
1026 	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
1027 		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
1028 
1029 	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
1030 		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
1031 
1032 	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
1033 		return gntdev_ioctl_notify(priv, ptr);
1034 
1035 	case IOCTL_GNTDEV_GRANT_COPY:
1036 		return gntdev_ioctl_grant_copy(priv, ptr);
1037 
1038 #ifdef CONFIG_XEN_GNTDEV_DMABUF
1039 	case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
1040 		return gntdev_ioctl_dmabuf_exp_from_refs(priv, ptr);
1041 
1042 	case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
1043 		return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
1044 
1045 	case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
1046 		return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
1047 
1048 	case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
1049 		return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
1050 #endif
1051 
1052 	default:
1053 		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
1054 		return -ENOIOCTLCMD;
1055 	}
1056 
1057 	return 0;
1058 }
1059 
gntdev_mmap(struct file * flip,struct vm_area_struct * vma)1060 static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1061 {
1062 	struct gntdev_priv *priv = flip->private_data;
1063 	int index = vma->vm_pgoff;
1064 	int count = vma_pages(vma);
1065 	struct gntdev_grant_map *map;
1066 	int err = -EINVAL;
1067 
1068 	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
1069 		return -EINVAL;
1070 
1071 	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
1072 		 index, count, vma->vm_start, vma->vm_pgoff);
1073 
1074 	mutex_lock(&priv->lock);
1075 	map = gntdev_find_map_index(priv, index, count);
1076 	if (!map)
1077 		goto unlock_out;
1078 	if (!atomic_add_unless(&map->in_use, 1, 1))
1079 		goto unlock_out;
1080 
1081 	refcount_inc(&map->users);
1082 
1083 	vma->vm_ops = &gntdev_vmops;
1084 
1085 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
1086 
1087 	if (xen_pv_domain())
1088 		vm_flags_set(vma, VM_DONTCOPY);
1089 
1090 	vma->vm_private_data = map;
1091 	if (map->flags) {
1092 		if ((vma->vm_flags & VM_WRITE) &&
1093 				(map->flags & GNTMAP_readonly))
1094 			goto out_unlock_put;
1095 	} else {
1096 		map->flags = GNTMAP_host_map;
1097 		if (!(vma->vm_flags & VM_WRITE))
1098 			map->flags |= GNTMAP_readonly;
1099 	}
1100 
1101 	map->pages_vm_start = vma->vm_start;
1102 
1103 	if (xen_pv_domain()) {
1104 		err = mmu_interval_notifier_insert_locked(
1105 			&map->notifier, vma->vm_mm, vma->vm_start,
1106 			vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
1107 		if (err)
1108 			goto out_unlock_put;
1109 
1110 		map->notifier_init = true;
1111 	}
1112 	mutex_unlock(&priv->lock);
1113 
1114 	if (xen_pv_domain()) {
1115 		/*
1116 		 * gntdev takes the address of the PTE in find_grant_ptes() and
1117 		 * passes it to the hypervisor in gntdev_map_grant_pages(). The
1118 		 * purpose of the notifier is to prevent the hypervisor pointer
1119 		 * to the PTE from going stale.
1120 		 *
1121 		 * Since this vma's mappings can't be touched without the
1122 		 * mmap_lock, and we are holding it now, there is no need for
1123 		 * the notifier_range locking pattern.
1124 		 */
1125 		mmu_interval_read_begin(&map->notifier);
1126 
1127 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1128 					  vma->vm_end - vma->vm_start,
1129 					  find_grant_ptes, map);
1130 		if (err) {
1131 			pr_warn("find_grant_ptes() failure.\n");
1132 			goto out_put_map;
1133 		}
1134 	}
1135 
1136 	err = gntdev_map_grant_pages(map);
1137 	if (err)
1138 		goto out_put_map;
1139 
1140 	if (!xen_pv_domain()) {
1141 		err = vm_map_pages_zero(vma, map->pages, map->count);
1142 		if (err)
1143 			goto out_put_map;
1144 	}
1145 
1146 	return 0;
1147 
1148 unlock_out:
1149 	mutex_unlock(&priv->lock);
1150 	return err;
1151 
1152 out_unlock_put:
1153 	mutex_unlock(&priv->lock);
1154 out_put_map:
1155 	if (xen_pv_domain())
1156 		unmap_grant_pages(map, 0, map->count);
1157 	gntdev_put_map(priv, map);
1158 	return err;
1159 }
1160 
1161 static const struct file_operations gntdev_fops = {
1162 	.owner = THIS_MODULE,
1163 	.open = gntdev_open,
1164 	.release = gntdev_release,
1165 	.mmap = gntdev_mmap,
1166 	.unlocked_ioctl = gntdev_ioctl
1167 };
1168 
1169 static struct miscdevice gntdev_miscdev = {
1170 	.minor        = MISC_DYNAMIC_MINOR,
1171 	.name         = "xen/gntdev",
1172 	.fops         = &gntdev_fops,
1173 };
1174 
1175 /* ------------------------------------------------------------------ */
1176 
gntdev_init(void)1177 static int __init gntdev_init(void)
1178 {
1179 	int err;
1180 
1181 	if (!xen_domain())
1182 		return -ENODEV;
1183 
1184 	err = misc_register(&gntdev_miscdev);
1185 	if (err != 0) {
1186 		pr_err("Could not register gntdev device\n");
1187 		return err;
1188 	}
1189 	return 0;
1190 }
1191 
gntdev_exit(void)1192 static void __exit gntdev_exit(void)
1193 {
1194 	misc_deregister(&gntdev_miscdev);
1195 }
1196 
1197 module_init(gntdev_init);
1198 module_exit(gntdev_exit);
1199 
1200 /* ------------------------------------------------------------------ */
1201