xref: /linux/drivers/vhost/vdpa.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/slab.h>
20 #include <linux/iommu.h>
21 #include <linux/uuid.h>
22 #include <linux/vdpa.h>
23 #include <linux/nospec.h>
24 #include <linux/vhost.h>
25 
26 #include "vhost.h"
27 
28 enum {
29 	VHOST_VDPA_BACKEND_FEATURES =
30 	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 	(1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
32 };
33 
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
35 
36 struct vhost_vdpa {
37 	struct vhost_dev vdev;
38 	struct iommu_domain *domain;
39 	struct vhost_virtqueue *vqs;
40 	struct completion completion;
41 	struct vdpa_device *vdpa;
42 	struct device dev;
43 	struct cdev cdev;
44 	atomic_t opened;
45 	int nvqs;
46 	int virtio_id;
47 	int minor;
48 	struct eventfd_ctx *config_ctx;
49 	int in_batch;
50 	struct vdpa_iova_range range;
51 };
52 
53 static DEFINE_IDA(vhost_vdpa_ida);
54 
55 static dev_t vhost_vdpa_major;
56 
57 static void handle_vq_kick(struct vhost_work *work)
58 {
59 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
60 						  poll.work);
61 	struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 	const struct vdpa_config_ops *ops = v->vdpa->config;
63 
64 	ops->kick_vq(v->vdpa, vq - v->vqs);
65 }
66 
67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
68 {
69 	struct vhost_virtqueue *vq = private;
70 	struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
71 
72 	if (call_ctx)
73 		eventfd_signal(call_ctx, 1);
74 
75 	return IRQ_HANDLED;
76 }
77 
78 static irqreturn_t vhost_vdpa_config_cb(void *private)
79 {
80 	struct vhost_vdpa *v = private;
81 	struct eventfd_ctx *config_ctx = v->config_ctx;
82 
83 	if (config_ctx)
84 		eventfd_signal(config_ctx, 1);
85 
86 	return IRQ_HANDLED;
87 }
88 
89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
90 {
91 	struct vhost_virtqueue *vq = &v->vqs[qid];
92 	const struct vdpa_config_ops *ops = v->vdpa->config;
93 	struct vdpa_device *vdpa = v->vdpa;
94 	int ret, irq;
95 
96 	if (!ops->get_vq_irq)
97 		return;
98 
99 	irq = ops->get_vq_irq(vdpa, qid);
100 	irq_bypass_unregister_producer(&vq->call_ctx.producer);
101 	if (!vq->call_ctx.ctx || irq < 0)
102 		return;
103 
104 	vq->call_ctx.producer.token = vq->call_ctx.ctx;
105 	vq->call_ctx.producer.irq = irq;
106 	ret = irq_bypass_register_producer(&vq->call_ctx.producer);
107 	if (unlikely(ret))
108 		dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret =  %d\n",
109 			 qid, vq->call_ctx.producer.token, ret);
110 }
111 
112 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
113 {
114 	struct vhost_virtqueue *vq = &v->vqs[qid];
115 
116 	irq_bypass_unregister_producer(&vq->call_ctx.producer);
117 }
118 
119 static void vhost_vdpa_reset(struct vhost_vdpa *v)
120 {
121 	struct vdpa_device *vdpa = v->vdpa;
122 
123 	vdpa_reset(vdpa);
124 	v->in_batch = 0;
125 }
126 
127 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
128 {
129 	struct vdpa_device *vdpa = v->vdpa;
130 	const struct vdpa_config_ops *ops = vdpa->config;
131 	u32 device_id;
132 
133 	device_id = ops->get_device_id(vdpa);
134 
135 	if (copy_to_user(argp, &device_id, sizeof(device_id)))
136 		return -EFAULT;
137 
138 	return 0;
139 }
140 
141 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
142 {
143 	struct vdpa_device *vdpa = v->vdpa;
144 	const struct vdpa_config_ops *ops = vdpa->config;
145 	u8 status;
146 
147 	status = ops->get_status(vdpa);
148 
149 	if (copy_to_user(statusp, &status, sizeof(status)))
150 		return -EFAULT;
151 
152 	return 0;
153 }
154 
155 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
156 {
157 	struct vdpa_device *vdpa = v->vdpa;
158 	const struct vdpa_config_ops *ops = vdpa->config;
159 	u8 status, status_old;
160 	int nvqs = v->nvqs;
161 	u16 i;
162 
163 	if (copy_from_user(&status, statusp, sizeof(status)))
164 		return -EFAULT;
165 
166 	status_old = ops->get_status(vdpa);
167 
168 	/*
169 	 * Userspace shouldn't remove status bits unless reset the
170 	 * status to 0.
171 	 */
172 	if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
173 		return -EINVAL;
174 
175 	ops->set_status(vdpa, status);
176 
177 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
178 		for (i = 0; i < nvqs; i++)
179 			vhost_vdpa_setup_vq_irq(v, i);
180 
181 	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
182 		for (i = 0; i < nvqs; i++)
183 			vhost_vdpa_unsetup_vq_irq(v, i);
184 
185 	return 0;
186 }
187 
188 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
189 				      struct vhost_vdpa_config *c)
190 {
191 	struct vdpa_device *vdpa = v->vdpa;
192 	long size = vdpa->config->get_config_size(vdpa);
193 
194 	if (c->len == 0)
195 		return -EINVAL;
196 
197 	if (c->len > size - c->off)
198 		return -E2BIG;
199 
200 	return 0;
201 }
202 
203 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
204 				  struct vhost_vdpa_config __user *c)
205 {
206 	struct vdpa_device *vdpa = v->vdpa;
207 	struct vhost_vdpa_config config;
208 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
209 	u8 *buf;
210 
211 	if (copy_from_user(&config, c, size))
212 		return -EFAULT;
213 	if (vhost_vdpa_config_validate(v, &config))
214 		return -EINVAL;
215 	buf = kvzalloc(config.len, GFP_KERNEL);
216 	if (!buf)
217 		return -ENOMEM;
218 
219 	vdpa_get_config(vdpa, config.off, buf, config.len);
220 
221 	if (copy_to_user(c->buf, buf, config.len)) {
222 		kvfree(buf);
223 		return -EFAULT;
224 	}
225 
226 	kvfree(buf);
227 	return 0;
228 }
229 
230 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
231 				  struct vhost_vdpa_config __user *c)
232 {
233 	struct vdpa_device *vdpa = v->vdpa;
234 	const struct vdpa_config_ops *ops = vdpa->config;
235 	struct vhost_vdpa_config config;
236 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
237 	u8 *buf;
238 
239 	if (copy_from_user(&config, c, size))
240 		return -EFAULT;
241 	if (vhost_vdpa_config_validate(v, &config))
242 		return -EINVAL;
243 
244 	buf = vmemdup_user(c->buf, config.len);
245 	if (IS_ERR(buf))
246 		return PTR_ERR(buf);
247 
248 	ops->set_config(vdpa, config.off, buf, config.len);
249 
250 	kvfree(buf);
251 	return 0;
252 }
253 
254 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
255 {
256 	struct vdpa_device *vdpa = v->vdpa;
257 	const struct vdpa_config_ops *ops = vdpa->config;
258 	u64 features;
259 
260 	features = ops->get_features(vdpa);
261 
262 	if (copy_to_user(featurep, &features, sizeof(features)))
263 		return -EFAULT;
264 
265 	return 0;
266 }
267 
268 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
269 {
270 	struct vdpa_device *vdpa = v->vdpa;
271 	const struct vdpa_config_ops *ops = vdpa->config;
272 	u64 features;
273 
274 	/*
275 	 * It's not allowed to change the features after they have
276 	 * been negotiated.
277 	 */
278 	if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
279 		return -EBUSY;
280 
281 	if (copy_from_user(&features, featurep, sizeof(features)))
282 		return -EFAULT;
283 
284 	if (vdpa_set_features(vdpa, features))
285 		return -EINVAL;
286 
287 	return 0;
288 }
289 
290 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
291 {
292 	struct vdpa_device *vdpa = v->vdpa;
293 	const struct vdpa_config_ops *ops = vdpa->config;
294 	u16 num;
295 
296 	num = ops->get_vq_num_max(vdpa);
297 
298 	if (copy_to_user(argp, &num, sizeof(num)))
299 		return -EFAULT;
300 
301 	return 0;
302 }
303 
304 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
305 {
306 	if (v->config_ctx) {
307 		eventfd_ctx_put(v->config_ctx);
308 		v->config_ctx = NULL;
309 	}
310 }
311 
312 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
313 {
314 	struct vdpa_callback cb;
315 	int fd;
316 	struct eventfd_ctx *ctx;
317 
318 	cb.callback = vhost_vdpa_config_cb;
319 	cb.private = v->vdpa;
320 	if (copy_from_user(&fd, argp, sizeof(fd)))
321 		return  -EFAULT;
322 
323 	ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
324 	swap(ctx, v->config_ctx);
325 
326 	if (!IS_ERR_OR_NULL(ctx))
327 		eventfd_ctx_put(ctx);
328 
329 	if (IS_ERR(v->config_ctx)) {
330 		long ret = PTR_ERR(v->config_ctx);
331 
332 		v->config_ctx = NULL;
333 		return ret;
334 	}
335 
336 	v->vdpa->config->set_config_cb(v->vdpa, &cb);
337 
338 	return 0;
339 }
340 
341 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
342 {
343 	struct vhost_vdpa_iova_range range = {
344 		.first = v->range.first,
345 		.last = v->range.last,
346 	};
347 
348 	if (copy_to_user(argp, &range, sizeof(range)))
349 		return -EFAULT;
350 	return 0;
351 }
352 
353 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
354 				   void __user *argp)
355 {
356 	struct vdpa_device *vdpa = v->vdpa;
357 	const struct vdpa_config_ops *ops = vdpa->config;
358 	struct vdpa_vq_state vq_state;
359 	struct vdpa_callback cb;
360 	struct vhost_virtqueue *vq;
361 	struct vhost_vring_state s;
362 	u32 idx;
363 	long r;
364 
365 	r = get_user(idx, (u32 __user *)argp);
366 	if (r < 0)
367 		return r;
368 
369 	if (idx >= v->nvqs)
370 		return -ENOBUFS;
371 
372 	idx = array_index_nospec(idx, v->nvqs);
373 	vq = &v->vqs[idx];
374 
375 	switch (cmd) {
376 	case VHOST_VDPA_SET_VRING_ENABLE:
377 		if (copy_from_user(&s, argp, sizeof(s)))
378 			return -EFAULT;
379 		ops->set_vq_ready(vdpa, idx, s.num);
380 		return 0;
381 	case VHOST_GET_VRING_BASE:
382 		r = ops->get_vq_state(v->vdpa, idx, &vq_state);
383 		if (r)
384 			return r;
385 
386 		vq->last_avail_idx = vq_state.split.avail_index;
387 		break;
388 	}
389 
390 	r = vhost_vring_ioctl(&v->vdev, cmd, argp);
391 	if (r)
392 		return r;
393 
394 	switch (cmd) {
395 	case VHOST_SET_VRING_ADDR:
396 		if (ops->set_vq_address(vdpa, idx,
397 					(u64)(uintptr_t)vq->desc,
398 					(u64)(uintptr_t)vq->avail,
399 					(u64)(uintptr_t)vq->used))
400 			r = -EINVAL;
401 		break;
402 
403 	case VHOST_SET_VRING_BASE:
404 		vq_state.split.avail_index = vq->last_avail_idx;
405 		if (ops->set_vq_state(vdpa, idx, &vq_state))
406 			r = -EINVAL;
407 		break;
408 
409 	case VHOST_SET_VRING_CALL:
410 		if (vq->call_ctx.ctx) {
411 			cb.callback = vhost_vdpa_virtqueue_cb;
412 			cb.private = vq;
413 		} else {
414 			cb.callback = NULL;
415 			cb.private = NULL;
416 		}
417 		ops->set_vq_cb(vdpa, idx, &cb);
418 		vhost_vdpa_setup_vq_irq(v, idx);
419 		break;
420 
421 	case VHOST_SET_VRING_NUM:
422 		ops->set_vq_num(vdpa, idx, vq->num);
423 		break;
424 	}
425 
426 	return r;
427 }
428 
429 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
430 				      unsigned int cmd, unsigned long arg)
431 {
432 	struct vhost_vdpa *v = filep->private_data;
433 	struct vhost_dev *d = &v->vdev;
434 	void __user *argp = (void __user *)arg;
435 	u64 __user *featurep = argp;
436 	u64 features;
437 	long r = 0;
438 
439 	if (cmd == VHOST_SET_BACKEND_FEATURES) {
440 		if (copy_from_user(&features, featurep, sizeof(features)))
441 			return -EFAULT;
442 		if (features & ~VHOST_VDPA_BACKEND_FEATURES)
443 			return -EOPNOTSUPP;
444 		vhost_set_backend_features(&v->vdev, features);
445 		return 0;
446 	}
447 
448 	mutex_lock(&d->mutex);
449 
450 	switch (cmd) {
451 	case VHOST_VDPA_GET_DEVICE_ID:
452 		r = vhost_vdpa_get_device_id(v, argp);
453 		break;
454 	case VHOST_VDPA_GET_STATUS:
455 		r = vhost_vdpa_get_status(v, argp);
456 		break;
457 	case VHOST_VDPA_SET_STATUS:
458 		r = vhost_vdpa_set_status(v, argp);
459 		break;
460 	case VHOST_VDPA_GET_CONFIG:
461 		r = vhost_vdpa_get_config(v, argp);
462 		break;
463 	case VHOST_VDPA_SET_CONFIG:
464 		r = vhost_vdpa_set_config(v, argp);
465 		break;
466 	case VHOST_GET_FEATURES:
467 		r = vhost_vdpa_get_features(v, argp);
468 		break;
469 	case VHOST_SET_FEATURES:
470 		r = vhost_vdpa_set_features(v, argp);
471 		break;
472 	case VHOST_VDPA_GET_VRING_NUM:
473 		r = vhost_vdpa_get_vring_num(v, argp);
474 		break;
475 	case VHOST_SET_LOG_BASE:
476 	case VHOST_SET_LOG_FD:
477 		r = -ENOIOCTLCMD;
478 		break;
479 	case VHOST_VDPA_SET_CONFIG_CALL:
480 		r = vhost_vdpa_set_config_call(v, argp);
481 		break;
482 	case VHOST_GET_BACKEND_FEATURES:
483 		features = VHOST_VDPA_BACKEND_FEATURES;
484 		if (copy_to_user(featurep, &features, sizeof(features)))
485 			r = -EFAULT;
486 		break;
487 	case VHOST_VDPA_GET_IOVA_RANGE:
488 		r = vhost_vdpa_get_iova_range(v, argp);
489 		break;
490 	default:
491 		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
492 		if (r == -ENOIOCTLCMD)
493 			r = vhost_vdpa_vring_ioctl(v, cmd, argp);
494 		break;
495 	}
496 
497 	mutex_unlock(&d->mutex);
498 	return r;
499 }
500 
501 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
502 {
503 	struct vhost_dev *dev = &v->vdev;
504 	struct vhost_iotlb *iotlb = dev->iotlb;
505 	struct vhost_iotlb_map *map;
506 	struct page *page;
507 	unsigned long pfn, pinned;
508 
509 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
510 		pinned = map->size >> PAGE_SHIFT;
511 		for (pfn = map->addr >> PAGE_SHIFT;
512 		     pinned > 0; pfn++, pinned--) {
513 			page = pfn_to_page(pfn);
514 			if (map->perm & VHOST_ACCESS_WO)
515 				set_page_dirty_lock(page);
516 			unpin_user_page(page);
517 		}
518 		atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
519 		vhost_iotlb_map_free(iotlb, map);
520 	}
521 }
522 
523 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
524 {
525 	struct vhost_dev *dev = &v->vdev;
526 
527 	vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
528 	kfree(dev->iotlb);
529 	dev->iotlb = NULL;
530 }
531 
532 static int perm_to_iommu_flags(u32 perm)
533 {
534 	int flags = 0;
535 
536 	switch (perm) {
537 	case VHOST_ACCESS_WO:
538 		flags |= IOMMU_WRITE;
539 		break;
540 	case VHOST_ACCESS_RO:
541 		flags |= IOMMU_READ;
542 		break;
543 	case VHOST_ACCESS_RW:
544 		flags |= (IOMMU_WRITE | IOMMU_READ);
545 		break;
546 	default:
547 		WARN(1, "invalidate vhost IOTLB permission\n");
548 		break;
549 	}
550 
551 	return flags | IOMMU_CACHE;
552 }
553 
554 static int vhost_vdpa_map(struct vhost_vdpa *v,
555 			  u64 iova, u64 size, u64 pa, u32 perm)
556 {
557 	struct vhost_dev *dev = &v->vdev;
558 	struct vdpa_device *vdpa = v->vdpa;
559 	const struct vdpa_config_ops *ops = vdpa->config;
560 	int r = 0;
561 
562 	r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
563 				  pa, perm);
564 	if (r)
565 		return r;
566 
567 	if (ops->dma_map) {
568 		r = ops->dma_map(vdpa, iova, size, pa, perm);
569 	} else if (ops->set_map) {
570 		if (!v->in_batch)
571 			r = ops->set_map(vdpa, dev->iotlb);
572 	} else {
573 		r = iommu_map(v->domain, iova, pa, size,
574 			      perm_to_iommu_flags(perm));
575 	}
576 
577 	if (r)
578 		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
579 	else
580 		atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
581 
582 	return r;
583 }
584 
585 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
586 {
587 	struct vhost_dev *dev = &v->vdev;
588 	struct vdpa_device *vdpa = v->vdpa;
589 	const struct vdpa_config_ops *ops = vdpa->config;
590 
591 	vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
592 
593 	if (ops->dma_map) {
594 		ops->dma_unmap(vdpa, iova, size);
595 	} else if (ops->set_map) {
596 		if (!v->in_batch)
597 			ops->set_map(vdpa, dev->iotlb);
598 	} else {
599 		iommu_unmap(v->domain, iova, size);
600 	}
601 }
602 
603 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
604 					   struct vhost_iotlb_msg *msg)
605 {
606 	struct vhost_dev *dev = &v->vdev;
607 	struct vhost_iotlb *iotlb = dev->iotlb;
608 	struct page **page_list;
609 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
610 	unsigned int gup_flags = FOLL_LONGTERM;
611 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
612 	unsigned long lock_limit, sz2pin, nchunks, i;
613 	u64 iova = msg->iova;
614 	long pinned;
615 	int ret = 0;
616 
617 	if (msg->iova < v->range.first ||
618 	    msg->iova + msg->size - 1 > v->range.last)
619 		return -EINVAL;
620 
621 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
622 				    msg->iova + msg->size - 1))
623 		return -EEXIST;
624 
625 	/* Limit the use of memory for bookkeeping */
626 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
627 	if (!page_list)
628 		return -ENOMEM;
629 
630 	if (msg->perm & VHOST_ACCESS_WO)
631 		gup_flags |= FOLL_WRITE;
632 
633 	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
634 	if (!npages) {
635 		ret = -EINVAL;
636 		goto free;
637 	}
638 
639 	mmap_read_lock(dev->mm);
640 
641 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
642 	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
643 		ret = -ENOMEM;
644 		goto unlock;
645 	}
646 
647 	cur_base = msg->uaddr & PAGE_MASK;
648 	iova &= PAGE_MASK;
649 	nchunks = 0;
650 
651 	while (npages) {
652 		sz2pin = min_t(unsigned long, npages, list_size);
653 		pinned = pin_user_pages(cur_base, sz2pin,
654 					gup_flags, page_list, NULL);
655 		if (sz2pin != pinned) {
656 			if (pinned < 0) {
657 				ret = pinned;
658 			} else {
659 				unpin_user_pages(page_list, pinned);
660 				ret = -ENOMEM;
661 			}
662 			goto out;
663 		}
664 		nchunks++;
665 
666 		if (!last_pfn)
667 			map_pfn = page_to_pfn(page_list[0]);
668 
669 		for (i = 0; i < pinned; i++) {
670 			unsigned long this_pfn = page_to_pfn(page_list[i]);
671 			u64 csize;
672 
673 			if (last_pfn && (this_pfn != last_pfn + 1)) {
674 				/* Pin a contiguous chunk of memory */
675 				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
676 				ret = vhost_vdpa_map(v, iova, csize,
677 						     map_pfn << PAGE_SHIFT,
678 						     msg->perm);
679 				if (ret) {
680 					/*
681 					 * Unpin the pages that are left unmapped
682 					 * from this point on in the current
683 					 * page_list. The remaining outstanding
684 					 * ones which may stride across several
685 					 * chunks will be covered in the common
686 					 * error path subsequently.
687 					 */
688 					unpin_user_pages(&page_list[i],
689 							 pinned - i);
690 					goto out;
691 				}
692 
693 				map_pfn = this_pfn;
694 				iova += csize;
695 				nchunks = 0;
696 			}
697 
698 			last_pfn = this_pfn;
699 		}
700 
701 		cur_base += pinned << PAGE_SHIFT;
702 		npages -= pinned;
703 	}
704 
705 	/* Pin the rest chunk */
706 	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
707 			     map_pfn << PAGE_SHIFT, msg->perm);
708 out:
709 	if (ret) {
710 		if (nchunks) {
711 			unsigned long pfn;
712 
713 			/*
714 			 * Unpin the outstanding pages which are yet to be
715 			 * mapped but haven't due to vdpa_map() or
716 			 * pin_user_pages() failure.
717 			 *
718 			 * Mapped pages are accounted in vdpa_map(), hence
719 			 * the corresponding unpinning will be handled by
720 			 * vdpa_unmap().
721 			 */
722 			WARN_ON(!last_pfn);
723 			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
724 				unpin_user_page(pfn_to_page(pfn));
725 		}
726 		vhost_vdpa_unmap(v, msg->iova, msg->size);
727 	}
728 unlock:
729 	mmap_read_unlock(dev->mm);
730 free:
731 	free_page((unsigned long)page_list);
732 	return ret;
733 }
734 
735 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
736 					struct vhost_iotlb_msg *msg)
737 {
738 	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
739 	struct vdpa_device *vdpa = v->vdpa;
740 	const struct vdpa_config_ops *ops = vdpa->config;
741 	int r = 0;
742 
743 	mutex_lock(&dev->mutex);
744 
745 	r = vhost_dev_check_owner(dev);
746 	if (r)
747 		goto unlock;
748 
749 	switch (msg->type) {
750 	case VHOST_IOTLB_UPDATE:
751 		r = vhost_vdpa_process_iotlb_update(v, msg);
752 		break;
753 	case VHOST_IOTLB_INVALIDATE:
754 		vhost_vdpa_unmap(v, msg->iova, msg->size);
755 		break;
756 	case VHOST_IOTLB_BATCH_BEGIN:
757 		v->in_batch = true;
758 		break;
759 	case VHOST_IOTLB_BATCH_END:
760 		if (v->in_batch && ops->set_map)
761 			ops->set_map(vdpa, dev->iotlb);
762 		v->in_batch = false;
763 		break;
764 	default:
765 		r = -EINVAL;
766 		break;
767 	}
768 unlock:
769 	mutex_unlock(&dev->mutex);
770 
771 	return r;
772 }
773 
774 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
775 					 struct iov_iter *from)
776 {
777 	struct file *file = iocb->ki_filp;
778 	struct vhost_vdpa *v = file->private_data;
779 	struct vhost_dev *dev = &v->vdev;
780 
781 	return vhost_chr_write_iter(dev, from);
782 }
783 
784 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
785 {
786 	struct vdpa_device *vdpa = v->vdpa;
787 	const struct vdpa_config_ops *ops = vdpa->config;
788 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
789 	struct bus_type *bus;
790 	int ret;
791 
792 	/* Device want to do DMA by itself */
793 	if (ops->set_map || ops->dma_map)
794 		return 0;
795 
796 	bus = dma_dev->bus;
797 	if (!bus)
798 		return -EFAULT;
799 
800 	if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
801 		return -ENOTSUPP;
802 
803 	v->domain = iommu_domain_alloc(bus);
804 	if (!v->domain)
805 		return -EIO;
806 
807 	ret = iommu_attach_device(v->domain, dma_dev);
808 	if (ret)
809 		goto err_attach;
810 
811 	return 0;
812 
813 err_attach:
814 	iommu_domain_free(v->domain);
815 	return ret;
816 }
817 
818 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
819 {
820 	struct vdpa_device *vdpa = v->vdpa;
821 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
822 
823 	if (v->domain) {
824 		iommu_detach_device(v->domain, dma_dev);
825 		iommu_domain_free(v->domain);
826 	}
827 
828 	v->domain = NULL;
829 }
830 
831 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
832 {
833 	struct vdpa_iova_range *range = &v->range;
834 	struct vdpa_device *vdpa = v->vdpa;
835 	const struct vdpa_config_ops *ops = vdpa->config;
836 
837 	if (ops->get_iova_range) {
838 		*range = ops->get_iova_range(vdpa);
839 	} else if (v->domain && v->domain->geometry.force_aperture) {
840 		range->first = v->domain->geometry.aperture_start;
841 		range->last = v->domain->geometry.aperture_end;
842 	} else {
843 		range->first = 0;
844 		range->last = ULLONG_MAX;
845 	}
846 }
847 
848 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
849 {
850 	struct vhost_vdpa *v;
851 	struct vhost_dev *dev;
852 	struct vhost_virtqueue **vqs;
853 	int nvqs, i, r, opened;
854 
855 	v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
856 
857 	opened = atomic_cmpxchg(&v->opened, 0, 1);
858 	if (opened)
859 		return -EBUSY;
860 
861 	nvqs = v->nvqs;
862 	vhost_vdpa_reset(v);
863 
864 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
865 	if (!vqs) {
866 		r = -ENOMEM;
867 		goto err;
868 	}
869 
870 	dev = &v->vdev;
871 	for (i = 0; i < nvqs; i++) {
872 		vqs[i] = &v->vqs[i];
873 		vqs[i]->handle_kick = handle_vq_kick;
874 	}
875 	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
876 		       vhost_vdpa_process_iotlb_msg);
877 
878 	dev->iotlb = vhost_iotlb_alloc(0, 0);
879 	if (!dev->iotlb) {
880 		r = -ENOMEM;
881 		goto err_init_iotlb;
882 	}
883 
884 	r = vhost_vdpa_alloc_domain(v);
885 	if (r)
886 		goto err_init_iotlb;
887 
888 	vhost_vdpa_set_iova_range(v);
889 
890 	filep->private_data = v;
891 
892 	return 0;
893 
894 err_init_iotlb:
895 	vhost_dev_cleanup(&v->vdev);
896 	kfree(vqs);
897 err:
898 	atomic_dec(&v->opened);
899 	return r;
900 }
901 
902 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
903 {
904 	int i;
905 
906 	for (i = 0; i < v->nvqs; i++)
907 		vhost_vdpa_unsetup_vq_irq(v, i);
908 }
909 
910 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
911 {
912 	struct vhost_vdpa *v = filep->private_data;
913 	struct vhost_dev *d = &v->vdev;
914 
915 	mutex_lock(&d->mutex);
916 	filep->private_data = NULL;
917 	vhost_vdpa_reset(v);
918 	vhost_dev_stop(&v->vdev);
919 	vhost_vdpa_iotlb_free(v);
920 	vhost_vdpa_free_domain(v);
921 	vhost_vdpa_config_put(v);
922 	vhost_vdpa_clean_irq(v);
923 	vhost_dev_cleanup(&v->vdev);
924 	kfree(v->vdev.vqs);
925 	mutex_unlock(&d->mutex);
926 
927 	atomic_dec(&v->opened);
928 	complete(&v->completion);
929 
930 	return 0;
931 }
932 
933 #ifdef CONFIG_MMU
934 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
935 {
936 	struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
937 	struct vdpa_device *vdpa = v->vdpa;
938 	const struct vdpa_config_ops *ops = vdpa->config;
939 	struct vdpa_notification_area notify;
940 	struct vm_area_struct *vma = vmf->vma;
941 	u16 index = vma->vm_pgoff;
942 
943 	notify = ops->get_vq_notification(vdpa, index);
944 
945 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
946 	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
947 			    notify.addr >> PAGE_SHIFT, PAGE_SIZE,
948 			    vma->vm_page_prot))
949 		return VM_FAULT_SIGBUS;
950 
951 	return VM_FAULT_NOPAGE;
952 }
953 
954 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
955 	.fault = vhost_vdpa_fault,
956 };
957 
958 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
959 {
960 	struct vhost_vdpa *v = vma->vm_file->private_data;
961 	struct vdpa_device *vdpa = v->vdpa;
962 	const struct vdpa_config_ops *ops = vdpa->config;
963 	struct vdpa_notification_area notify;
964 	unsigned long index = vma->vm_pgoff;
965 
966 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
967 		return -EINVAL;
968 	if ((vma->vm_flags & VM_SHARED) == 0)
969 		return -EINVAL;
970 	if (vma->vm_flags & VM_READ)
971 		return -EINVAL;
972 	if (index > 65535)
973 		return -EINVAL;
974 	if (!ops->get_vq_notification)
975 		return -ENOTSUPP;
976 
977 	/* To be safe and easily modelled by userspace, We only
978 	 * support the doorbell which sits on the page boundary and
979 	 * does not share the page with other registers.
980 	 */
981 	notify = ops->get_vq_notification(vdpa, index);
982 	if (notify.addr & (PAGE_SIZE - 1))
983 		return -EINVAL;
984 	if (vma->vm_end - vma->vm_start != notify.size)
985 		return -ENOTSUPP;
986 
987 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
988 	vma->vm_ops = &vhost_vdpa_vm_ops;
989 	return 0;
990 }
991 #endif /* CONFIG_MMU */
992 
993 static const struct file_operations vhost_vdpa_fops = {
994 	.owner		= THIS_MODULE,
995 	.open		= vhost_vdpa_open,
996 	.release	= vhost_vdpa_release,
997 	.write_iter	= vhost_vdpa_chr_write_iter,
998 	.unlocked_ioctl	= vhost_vdpa_unlocked_ioctl,
999 #ifdef CONFIG_MMU
1000 	.mmap		= vhost_vdpa_mmap,
1001 #endif /* CONFIG_MMU */
1002 	.compat_ioctl	= compat_ptr_ioctl,
1003 };
1004 
1005 static void vhost_vdpa_release_dev(struct device *device)
1006 {
1007 	struct vhost_vdpa *v =
1008 	       container_of(device, struct vhost_vdpa, dev);
1009 
1010 	ida_simple_remove(&vhost_vdpa_ida, v->minor);
1011 	kfree(v->vqs);
1012 	kfree(v);
1013 }
1014 
1015 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1016 {
1017 	const struct vdpa_config_ops *ops = vdpa->config;
1018 	struct vhost_vdpa *v;
1019 	int minor;
1020 	int r;
1021 
1022 	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1023 	if (!v)
1024 		return -ENOMEM;
1025 
1026 	minor = ida_simple_get(&vhost_vdpa_ida, 0,
1027 			       VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1028 	if (minor < 0) {
1029 		kfree(v);
1030 		return minor;
1031 	}
1032 
1033 	atomic_set(&v->opened, 0);
1034 	v->minor = minor;
1035 	v->vdpa = vdpa;
1036 	v->nvqs = vdpa->nvqs;
1037 	v->virtio_id = ops->get_device_id(vdpa);
1038 
1039 	device_initialize(&v->dev);
1040 	v->dev.release = vhost_vdpa_release_dev;
1041 	v->dev.parent = &vdpa->dev;
1042 	v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1043 	v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1044 			       GFP_KERNEL);
1045 	if (!v->vqs) {
1046 		r = -ENOMEM;
1047 		goto err;
1048 	}
1049 
1050 	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1051 	if (r)
1052 		goto err;
1053 
1054 	cdev_init(&v->cdev, &vhost_vdpa_fops);
1055 	v->cdev.owner = THIS_MODULE;
1056 
1057 	r = cdev_device_add(&v->cdev, &v->dev);
1058 	if (r)
1059 		goto err;
1060 
1061 	init_completion(&v->completion);
1062 	vdpa_set_drvdata(vdpa, v);
1063 
1064 	return 0;
1065 
1066 err:
1067 	put_device(&v->dev);
1068 	return r;
1069 }
1070 
1071 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1072 {
1073 	struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1074 	int opened;
1075 
1076 	cdev_device_del(&v->cdev, &v->dev);
1077 
1078 	do {
1079 		opened = atomic_cmpxchg(&v->opened, 0, 1);
1080 		if (!opened)
1081 			break;
1082 		wait_for_completion(&v->completion);
1083 	} while (1);
1084 
1085 	put_device(&v->dev);
1086 }
1087 
1088 static struct vdpa_driver vhost_vdpa_driver = {
1089 	.driver = {
1090 		.name	= "vhost_vdpa",
1091 	},
1092 	.probe	= vhost_vdpa_probe,
1093 	.remove	= vhost_vdpa_remove,
1094 };
1095 
1096 static int __init vhost_vdpa_init(void)
1097 {
1098 	int r;
1099 
1100 	r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1101 				"vhost-vdpa");
1102 	if (r)
1103 		goto err_alloc_chrdev;
1104 
1105 	r = vdpa_register_driver(&vhost_vdpa_driver);
1106 	if (r)
1107 		goto err_vdpa_register_driver;
1108 
1109 	return 0;
1110 
1111 err_vdpa_register_driver:
1112 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1113 err_alloc_chrdev:
1114 	return r;
1115 }
1116 module_init(vhost_vdpa_init);
1117 
1118 static void __exit vhost_vdpa_exit(void)
1119 {
1120 	vdpa_unregister_driver(&vhost_vdpa_driver);
1121 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1122 }
1123 module_exit(vhost_vdpa_exit);
1124 
1125 MODULE_VERSION("0.0.1");
1126 MODULE_LICENSE("GPL v2");
1127 MODULE_AUTHOR("Intel Corporation");
1128 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
1129