xref: /linux/drivers/vhost/vdpa.c (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/slab.h>
20 #include <linux/iommu.h>
21 #include <linux/uuid.h>
22 #include <linux/vdpa.h>
23 #include <linux/nospec.h>
24 #include <linux/vhost.h>
25 
26 #include "vhost.h"
27 
28 enum {
29 	VHOST_VDPA_BACKEND_FEATURES =
30 	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 	(1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
32 	(1ULL << VHOST_BACKEND_F_IOTLB_ASID),
33 };
34 
35 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
36 
37 #define VHOST_VDPA_IOTLB_BUCKETS 16
38 
39 struct vhost_vdpa_as {
40 	struct hlist_node hash_link;
41 	struct vhost_iotlb iotlb;
42 	u32 id;
43 };
44 
45 struct vhost_vdpa {
46 	struct vhost_dev vdev;
47 	struct iommu_domain *domain;
48 	struct vhost_virtqueue *vqs;
49 	struct completion completion;
50 	struct vdpa_device *vdpa;
51 	struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
52 	struct device dev;
53 	struct cdev cdev;
54 	atomic_t opened;
55 	u32 nvqs;
56 	int virtio_id;
57 	int minor;
58 	struct eventfd_ctx *config_ctx;
59 	int in_batch;
60 	struct vdpa_iova_range range;
61 	u32 batch_asid;
62 	bool suspended;
63 };
64 
65 static DEFINE_IDA(vhost_vdpa_ida);
66 
67 static dev_t vhost_vdpa_major;
68 
69 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
70 				   struct vhost_iotlb *iotlb, u64 start,
71 				   u64 last, u32 asid);
72 
iotlb_to_asid(struct vhost_iotlb * iotlb)73 static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
74 {
75 	struct vhost_vdpa_as *as = container_of(iotlb, struct
76 						vhost_vdpa_as, iotlb);
77 	return as->id;
78 }
79 
asid_to_as(struct vhost_vdpa * v,u32 asid)80 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
81 {
82 	struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
83 	struct vhost_vdpa_as *as;
84 
85 	hlist_for_each_entry(as, head, hash_link)
86 		if (as->id == asid)
87 			return as;
88 
89 	return NULL;
90 }
91 
asid_to_iotlb(struct vhost_vdpa * v,u32 asid)92 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
93 {
94 	struct vhost_vdpa_as *as = asid_to_as(v, asid);
95 
96 	if (!as)
97 		return NULL;
98 
99 	return &as->iotlb;
100 }
101 
vhost_vdpa_alloc_as(struct vhost_vdpa * v,u32 asid)102 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
103 {
104 	struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
105 	struct vhost_vdpa_as *as;
106 
107 	if (asid_to_as(v, asid))
108 		return NULL;
109 
110 	if (asid >= v->vdpa->nas)
111 		return NULL;
112 
113 	as = kmalloc(sizeof(*as), GFP_KERNEL);
114 	if (!as)
115 		return NULL;
116 
117 	vhost_iotlb_init(&as->iotlb, 0, 0);
118 	as->id = asid;
119 	hlist_add_head(&as->hash_link, head);
120 
121 	return as;
122 }
123 
vhost_vdpa_find_alloc_as(struct vhost_vdpa * v,u32 asid)124 static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
125 						      u32 asid)
126 {
127 	struct vhost_vdpa_as *as = asid_to_as(v, asid);
128 
129 	if (as)
130 		return as;
131 
132 	return vhost_vdpa_alloc_as(v, asid);
133 }
134 
vhost_vdpa_reset_map(struct vhost_vdpa * v,u32 asid)135 static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
136 {
137 	struct vdpa_device *vdpa = v->vdpa;
138 	const struct vdpa_config_ops *ops = vdpa->config;
139 
140 	if (ops->reset_map)
141 		ops->reset_map(vdpa, asid);
142 }
143 
vhost_vdpa_remove_as(struct vhost_vdpa * v,u32 asid)144 static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
145 {
146 	struct vhost_vdpa_as *as = asid_to_as(v, asid);
147 
148 	if (!as)
149 		return -EINVAL;
150 
151 	hlist_del(&as->hash_link);
152 	vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
153 	/*
154 	 * Devices with vendor specific IOMMU may need to restore
155 	 * iotlb to the initial or default state, which cannot be
156 	 * cleaned up in the all range unmap call above. Give them
157 	 * a chance to clean up or reset the map to the desired
158 	 * state.
159 	 */
160 	vhost_vdpa_reset_map(v, asid);
161 	kfree(as);
162 
163 	return 0;
164 }
165 
handle_vq_kick(struct vhost_work * work)166 static void handle_vq_kick(struct vhost_work *work)
167 {
168 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
169 						  poll.work);
170 	struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
171 	const struct vdpa_config_ops *ops = v->vdpa->config;
172 
173 	ops->kick_vq(v->vdpa, vq - v->vqs);
174 }
175 
vhost_vdpa_virtqueue_cb(void * private)176 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
177 {
178 	struct vhost_virtqueue *vq = private;
179 	struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
180 
181 	if (call_ctx)
182 		eventfd_signal(call_ctx);
183 
184 	return IRQ_HANDLED;
185 }
186 
vhost_vdpa_config_cb(void * private)187 static irqreturn_t vhost_vdpa_config_cb(void *private)
188 {
189 	struct vhost_vdpa *v = private;
190 	struct eventfd_ctx *config_ctx = v->config_ctx;
191 
192 	if (config_ctx)
193 		eventfd_signal(config_ctx);
194 
195 	return IRQ_HANDLED;
196 }
197 
vhost_vdpa_setup_vq_irq(struct vhost_vdpa * v,u16 qid)198 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
199 {
200 	struct vhost_virtqueue *vq = &v->vqs[qid];
201 	const struct vdpa_config_ops *ops = v->vdpa->config;
202 	struct vdpa_device *vdpa = v->vdpa;
203 	int ret, irq;
204 
205 	if (!ops->get_vq_irq)
206 		return;
207 
208 	irq = ops->get_vq_irq(vdpa, qid);
209 	if (irq < 0)
210 		return;
211 
212 	if (!vq->call_ctx.ctx)
213 		return;
214 
215 	ret = irq_bypass_register_producer(&vq->call_ctx.producer,
216 					   vq->call_ctx.ctx, irq);
217 	if (unlikely(ret))
218 		dev_info(&v->dev, "vq %u, irq bypass producer (eventfd %p) registration fails, ret =  %d\n",
219 			 qid, vq->call_ctx.ctx, ret);
220 }
221 
vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa * v,u16 qid)222 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
223 {
224 	struct vhost_virtqueue *vq = &v->vqs[qid];
225 
226 	irq_bypass_unregister_producer(&vq->call_ctx.producer);
227 }
228 
_compat_vdpa_reset(struct vhost_vdpa * v)229 static int _compat_vdpa_reset(struct vhost_vdpa *v)
230 {
231 	struct vdpa_device *vdpa = v->vdpa;
232 	u32 flags = 0;
233 
234 	v->suspended = false;
235 
236 	if (v->vdev.vqs) {
237 		flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
238 						    VHOST_BACKEND_F_IOTLB_PERSIST) ?
239 			 VDPA_RESET_F_CLEAN_MAP : 0;
240 	}
241 
242 	return vdpa_reset(vdpa, flags);
243 }
244 
vhost_vdpa_reset(struct vhost_vdpa * v)245 static int vhost_vdpa_reset(struct vhost_vdpa *v)
246 {
247 	v->in_batch = 0;
248 	return _compat_vdpa_reset(v);
249 }
250 
vhost_vdpa_bind_mm(struct vhost_vdpa * v)251 static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
252 {
253 	struct vdpa_device *vdpa = v->vdpa;
254 	const struct vdpa_config_ops *ops = vdpa->config;
255 
256 	if (!vdpa->use_va || !ops->bind_mm)
257 		return 0;
258 
259 	return ops->bind_mm(vdpa, v->vdev.mm);
260 }
261 
vhost_vdpa_unbind_mm(struct vhost_vdpa * v)262 static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
263 {
264 	struct vdpa_device *vdpa = v->vdpa;
265 	const struct vdpa_config_ops *ops = vdpa->config;
266 
267 	if (!vdpa->use_va || !ops->unbind_mm)
268 		return;
269 
270 	ops->unbind_mm(vdpa);
271 }
272 
vhost_vdpa_get_device_id(struct vhost_vdpa * v,u8 __user * argp)273 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
274 {
275 	struct vdpa_device *vdpa = v->vdpa;
276 	const struct vdpa_config_ops *ops = vdpa->config;
277 	u32 device_id;
278 
279 	device_id = ops->get_device_id(vdpa);
280 
281 	if (copy_to_user(argp, &device_id, sizeof(device_id)))
282 		return -EFAULT;
283 
284 	return 0;
285 }
286 
vhost_vdpa_get_status(struct vhost_vdpa * v,u8 __user * statusp)287 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
288 {
289 	struct vdpa_device *vdpa = v->vdpa;
290 	const struct vdpa_config_ops *ops = vdpa->config;
291 	u8 status;
292 
293 	status = ops->get_status(vdpa);
294 
295 	if (copy_to_user(statusp, &status, sizeof(status)))
296 		return -EFAULT;
297 
298 	return 0;
299 }
300 
vhost_vdpa_set_status(struct vhost_vdpa * v,u8 __user * statusp)301 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
302 {
303 	struct vdpa_device *vdpa = v->vdpa;
304 	const struct vdpa_config_ops *ops = vdpa->config;
305 	u8 status, status_old;
306 	u32 nvqs = v->nvqs;
307 	int ret;
308 	u16 i;
309 
310 	if (copy_from_user(&status, statusp, sizeof(status)))
311 		return -EFAULT;
312 
313 	status_old = ops->get_status(vdpa);
314 
315 	/*
316 	 * Userspace shouldn't remove status bits unless reset the
317 	 * status to 0.
318 	 */
319 	if (status != 0 && (status_old & ~status) != 0)
320 		return -EINVAL;
321 
322 	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
323 		for (i = 0; i < nvqs; i++)
324 			vhost_vdpa_unsetup_vq_irq(v, i);
325 
326 	if (status == 0) {
327 		ret = _compat_vdpa_reset(v);
328 		if (ret)
329 			return ret;
330 	} else
331 		vdpa_set_status(vdpa, status);
332 
333 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
334 		for (i = 0; i < nvqs; i++)
335 			vhost_vdpa_setup_vq_irq(v, i);
336 
337 	return 0;
338 }
339 
vhost_vdpa_config_validate(struct vhost_vdpa * v,struct vhost_vdpa_config * c)340 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
341 				      struct vhost_vdpa_config *c)
342 {
343 	struct vdpa_device *vdpa = v->vdpa;
344 	size_t size = vdpa->config->get_config_size(vdpa);
345 
346 	if (c->len == 0 || c->off > size)
347 		return -EINVAL;
348 
349 	if (c->len > size - c->off)
350 		return -E2BIG;
351 
352 	return 0;
353 }
354 
vhost_vdpa_get_config(struct vhost_vdpa * v,struct vhost_vdpa_config __user * c)355 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
356 				  struct vhost_vdpa_config __user *c)
357 {
358 	struct vdpa_device *vdpa = v->vdpa;
359 	struct vhost_vdpa_config config;
360 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
361 	u8 *buf;
362 
363 	if (copy_from_user(&config, c, size))
364 		return -EFAULT;
365 	if (vhost_vdpa_config_validate(v, &config))
366 		return -EINVAL;
367 	buf = kvzalloc(config.len, GFP_KERNEL);
368 	if (!buf)
369 		return -ENOMEM;
370 
371 	vdpa_get_config(vdpa, config.off, buf, config.len);
372 
373 	if (copy_to_user(c->buf, buf, config.len)) {
374 		kvfree(buf);
375 		return -EFAULT;
376 	}
377 
378 	kvfree(buf);
379 	return 0;
380 }
381 
vhost_vdpa_set_config(struct vhost_vdpa * v,struct vhost_vdpa_config __user * c)382 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
383 				  struct vhost_vdpa_config __user *c)
384 {
385 	struct vdpa_device *vdpa = v->vdpa;
386 	struct vhost_vdpa_config config;
387 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
388 	u8 *buf;
389 
390 	if (copy_from_user(&config, c, size))
391 		return -EFAULT;
392 	if (vhost_vdpa_config_validate(v, &config))
393 		return -EINVAL;
394 
395 	buf = vmemdup_user(c->buf, config.len);
396 	if (IS_ERR(buf))
397 		return PTR_ERR(buf);
398 
399 	vdpa_set_config(vdpa, config.off, buf, config.len);
400 
401 	kvfree(buf);
402 	return 0;
403 }
404 
vhost_vdpa_can_suspend(const struct vhost_vdpa * v)405 static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
406 {
407 	struct vdpa_device *vdpa = v->vdpa;
408 	const struct vdpa_config_ops *ops = vdpa->config;
409 
410 	return ops->suspend;
411 }
412 
vhost_vdpa_can_resume(const struct vhost_vdpa * v)413 static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
414 {
415 	struct vdpa_device *vdpa = v->vdpa;
416 	const struct vdpa_config_ops *ops = vdpa->config;
417 
418 	return ops->resume;
419 }
420 
vhost_vdpa_has_desc_group(const struct vhost_vdpa * v)421 static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
422 {
423 	struct vdpa_device *vdpa = v->vdpa;
424 	const struct vdpa_config_ops *ops = vdpa->config;
425 
426 	return ops->get_vq_desc_group;
427 }
428 
vhost_vdpa_get_features(struct vhost_vdpa * v,u64 __user * featurep)429 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
430 {
431 	struct vdpa_device *vdpa = v->vdpa;
432 	const struct vdpa_config_ops *ops = vdpa->config;
433 	u64 features;
434 
435 	features = ops->get_device_features(vdpa);
436 
437 	if (copy_to_user(featurep, &features, sizeof(features)))
438 		return -EFAULT;
439 
440 	return 0;
441 }
442 
vhost_vdpa_get_backend_features(const struct vhost_vdpa * v)443 static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
444 {
445 	struct vdpa_device *vdpa = v->vdpa;
446 	const struct vdpa_config_ops *ops = vdpa->config;
447 
448 	if (!ops->get_backend_features)
449 		return 0;
450 	else
451 		return ops->get_backend_features(vdpa);
452 }
453 
vhost_vdpa_has_persistent_map(const struct vhost_vdpa * v)454 static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
455 {
456 	struct vdpa_device *vdpa = v->vdpa;
457 	const struct vdpa_config_ops *ops = vdpa->config;
458 
459 	return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
460 	       vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
461 }
462 
vhost_vdpa_set_features(struct vhost_vdpa * v,u64 __user * featurep)463 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
464 {
465 	struct vdpa_device *vdpa = v->vdpa;
466 	const struct vdpa_config_ops *ops = vdpa->config;
467 	struct vhost_dev *d = &v->vdev;
468 	u64 actual_features;
469 	u64 features;
470 	int i;
471 
472 	/*
473 	 * It's not allowed to change the features after they have
474 	 * been negotiated.
475 	 */
476 	if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
477 		return -EBUSY;
478 
479 	if (copy_from_user(&features, featurep, sizeof(features)))
480 		return -EFAULT;
481 
482 	if (vdpa_set_features(vdpa, features))
483 		return -EINVAL;
484 
485 	/* let the vqs know what has been configured */
486 	actual_features = ops->get_driver_features(vdpa);
487 	for (i = 0; i < d->nvqs; ++i) {
488 		struct vhost_virtqueue *vq = d->vqs[i];
489 
490 		mutex_lock(&vq->mutex);
491 		vq->acked_features = actual_features;
492 		mutex_unlock(&vq->mutex);
493 	}
494 
495 	return 0;
496 }
497 
vhost_vdpa_get_vring_num(struct vhost_vdpa * v,u16 __user * argp)498 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
499 {
500 	struct vdpa_device *vdpa = v->vdpa;
501 	const struct vdpa_config_ops *ops = vdpa->config;
502 	u16 num;
503 
504 	num = ops->get_vq_num_max(vdpa);
505 
506 	if (copy_to_user(argp, &num, sizeof(num)))
507 		return -EFAULT;
508 
509 	return 0;
510 }
511 
vhost_vdpa_config_put(struct vhost_vdpa * v)512 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
513 {
514 	if (v->config_ctx) {
515 		eventfd_ctx_put(v->config_ctx);
516 		v->config_ctx = NULL;
517 	}
518 }
519 
vhost_vdpa_set_config_call(struct vhost_vdpa * v,u32 __user * argp)520 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
521 {
522 	struct vdpa_callback cb;
523 	int fd;
524 	struct eventfd_ctx *ctx;
525 
526 	cb.callback = vhost_vdpa_config_cb;
527 	cb.private = v;
528 	if (copy_from_user(&fd, argp, sizeof(fd)))
529 		return  -EFAULT;
530 
531 	ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
532 	swap(ctx, v->config_ctx);
533 
534 	if (!IS_ERR_OR_NULL(ctx))
535 		eventfd_ctx_put(ctx);
536 
537 	if (IS_ERR(v->config_ctx)) {
538 		long ret = PTR_ERR(v->config_ctx);
539 
540 		v->config_ctx = NULL;
541 		return ret;
542 	}
543 
544 	v->vdpa->config->set_config_cb(v->vdpa, &cb);
545 
546 	return 0;
547 }
548 
vhost_vdpa_get_iova_range(struct vhost_vdpa * v,u32 __user * argp)549 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
550 {
551 	struct vhost_vdpa_iova_range range = {
552 		.first = v->range.first,
553 		.last = v->range.last,
554 	};
555 
556 	if (copy_to_user(argp, &range, sizeof(range)))
557 		return -EFAULT;
558 	return 0;
559 }
560 
vhost_vdpa_get_config_size(struct vhost_vdpa * v,u32 __user * argp)561 static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
562 {
563 	struct vdpa_device *vdpa = v->vdpa;
564 	const struct vdpa_config_ops *ops = vdpa->config;
565 	u32 size;
566 
567 	size = ops->get_config_size(vdpa);
568 
569 	if (copy_to_user(argp, &size, sizeof(size)))
570 		return -EFAULT;
571 
572 	return 0;
573 }
574 
vhost_vdpa_get_vqs_count(struct vhost_vdpa * v,u32 __user * argp)575 static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
576 {
577 	struct vdpa_device *vdpa = v->vdpa;
578 
579 	if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
580 		return -EFAULT;
581 
582 	return 0;
583 }
584 
585 /* After a successful return of ioctl the device must not process more
586  * virtqueue descriptors. The device can answer to read or writes of config
587  * fields as if it were not suspended. In particular, writing to "queue_enable"
588  * with a value of 1 will not make the device start processing buffers.
589  */
vhost_vdpa_suspend(struct vhost_vdpa * v)590 static long vhost_vdpa_suspend(struct vhost_vdpa *v)
591 {
592 	struct vdpa_device *vdpa = v->vdpa;
593 	const struct vdpa_config_ops *ops = vdpa->config;
594 	int ret;
595 
596 	if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
597 		return 0;
598 
599 	if (!ops->suspend)
600 		return -EOPNOTSUPP;
601 
602 	ret = ops->suspend(vdpa);
603 	if (!ret)
604 		v->suspended = true;
605 
606 	return ret;
607 }
608 
609 /* After a successful return of this ioctl the device resumes processing
610  * virtqueue descriptors. The device becomes fully operational the same way it
611  * was before it was suspended.
612  */
vhost_vdpa_resume(struct vhost_vdpa * v)613 static long vhost_vdpa_resume(struct vhost_vdpa *v)
614 {
615 	struct vdpa_device *vdpa = v->vdpa;
616 	const struct vdpa_config_ops *ops = vdpa->config;
617 	int ret;
618 
619 	if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
620 		return 0;
621 
622 	if (!ops->resume)
623 		return -EOPNOTSUPP;
624 
625 	ret = ops->resume(vdpa);
626 	if (!ret)
627 		v->suspended = false;
628 
629 	return ret;
630 }
631 
vhost_vdpa_vring_ioctl(struct vhost_vdpa * v,unsigned int cmd,void __user * argp)632 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
633 				   void __user *argp)
634 {
635 	struct vdpa_device *vdpa = v->vdpa;
636 	const struct vdpa_config_ops *ops = vdpa->config;
637 	struct vdpa_vq_state vq_state;
638 	struct vdpa_callback cb;
639 	struct vhost_virtqueue *vq;
640 	struct vhost_vring_state s;
641 	u32 idx;
642 	long r;
643 
644 	r = get_user(idx, (u32 __user *)argp);
645 	if (r < 0)
646 		return r;
647 
648 	if (idx >= v->nvqs)
649 		return -ENOBUFS;
650 
651 	idx = array_index_nospec(idx, v->nvqs);
652 	vq = &v->vqs[idx];
653 
654 	switch (cmd) {
655 	case VHOST_VDPA_SET_VRING_ENABLE:
656 		if (copy_from_user(&s, argp, sizeof(s)))
657 			return -EFAULT;
658 		ops->set_vq_ready(vdpa, idx, s.num);
659 		return 0;
660 	case VHOST_VDPA_GET_VRING_GROUP:
661 		if (!ops->get_vq_group)
662 			return -EOPNOTSUPP;
663 		s.index = idx;
664 		s.num = ops->get_vq_group(vdpa, idx);
665 		if (s.num >= vdpa->ngroups)
666 			return -EIO;
667 		else if (copy_to_user(argp, &s, sizeof(s)))
668 			return -EFAULT;
669 		return 0;
670 	case VHOST_VDPA_GET_VRING_DESC_GROUP:
671 		if (!vhost_vdpa_has_desc_group(v))
672 			return -EOPNOTSUPP;
673 		s.index = idx;
674 		s.num = ops->get_vq_desc_group(vdpa, idx);
675 		if (s.num >= vdpa->ngroups)
676 			return -EIO;
677 		else if (copy_to_user(argp, &s, sizeof(s)))
678 			return -EFAULT;
679 		return 0;
680 	case VHOST_VDPA_SET_GROUP_ASID:
681 		if (copy_from_user(&s, argp, sizeof(s)))
682 			return -EFAULT;
683 		if (s.num >= vdpa->nas)
684 			return -EINVAL;
685 		if (!ops->set_group_asid)
686 			return -EOPNOTSUPP;
687 		return ops->set_group_asid(vdpa, idx, s.num);
688 	case VHOST_VDPA_GET_VRING_SIZE:
689 		if (!ops->get_vq_size)
690 			return -EOPNOTSUPP;
691 		s.index = idx;
692 		s.num = ops->get_vq_size(vdpa, idx);
693 		if (copy_to_user(argp, &s, sizeof(s)))
694 			return -EFAULT;
695 		return 0;
696 	case VHOST_GET_VRING_BASE:
697 		r = ops->get_vq_state(v->vdpa, idx, &vq_state);
698 		if (r)
699 			return r;
700 
701 		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
702 			vq->last_avail_idx = vq_state.packed.last_avail_idx |
703 					     (vq_state.packed.last_avail_counter << 15);
704 			vq->last_used_idx = vq_state.packed.last_used_idx |
705 					    (vq_state.packed.last_used_counter << 15);
706 		} else {
707 			vq->last_avail_idx = vq_state.split.avail_index;
708 		}
709 		break;
710 	case VHOST_SET_VRING_CALL:
711 		if (vq->call_ctx.ctx) {
712 			if (ops->get_status(vdpa) &
713 			    VIRTIO_CONFIG_S_DRIVER_OK)
714 				vhost_vdpa_unsetup_vq_irq(v, idx);
715 		}
716 		break;
717 	}
718 
719 	r = vhost_vring_ioctl(&v->vdev, cmd, argp);
720 	if (r)
721 		return r;
722 
723 	switch (cmd) {
724 	case VHOST_SET_VRING_ADDR:
725 		if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
726 			return -EINVAL;
727 
728 		if (ops->set_vq_address(vdpa, idx,
729 					(u64)(uintptr_t)vq->desc,
730 					(u64)(uintptr_t)vq->avail,
731 					(u64)(uintptr_t)vq->used))
732 			r = -EINVAL;
733 		break;
734 
735 	case VHOST_SET_VRING_BASE:
736 		if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
737 			return -EINVAL;
738 
739 		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
740 			vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
741 			vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
742 			vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
743 			vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
744 		} else {
745 			vq_state.split.avail_index = vq->last_avail_idx;
746 		}
747 		r = ops->set_vq_state(vdpa, idx, &vq_state);
748 		break;
749 
750 	case VHOST_SET_VRING_CALL:
751 		if (vq->call_ctx.ctx) {
752 			cb.callback = vhost_vdpa_virtqueue_cb;
753 			cb.private = vq;
754 			cb.trigger = vq->call_ctx.ctx;
755 			if (ops->get_status(vdpa) &
756 			    VIRTIO_CONFIG_S_DRIVER_OK)
757 				vhost_vdpa_setup_vq_irq(v, idx);
758 		} else {
759 			cb.callback = NULL;
760 			cb.private = NULL;
761 			cb.trigger = NULL;
762 		}
763 		ops->set_vq_cb(vdpa, idx, &cb);
764 		break;
765 
766 	case VHOST_SET_VRING_NUM:
767 		ops->set_vq_num(vdpa, idx, vq->num);
768 		break;
769 	}
770 
771 	return r;
772 }
773 
vhost_vdpa_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)774 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
775 				      unsigned int cmd, unsigned long arg)
776 {
777 	struct vhost_vdpa *v = filep->private_data;
778 	struct vhost_dev *d = &v->vdev;
779 	void __user *argp = (void __user *)arg;
780 	u64 __user *featurep = argp;
781 	u64 features;
782 	long r = 0;
783 
784 	if (cmd == VHOST_SET_BACKEND_FEATURES) {
785 		if (copy_from_user(&features, featurep, sizeof(features)))
786 			return -EFAULT;
787 		if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
788 				 BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
789 				 BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
790 				 BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
791 				 BIT_ULL(VHOST_BACKEND_F_RESUME) |
792 				 BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
793 			return -EOPNOTSUPP;
794 		if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
795 		     !vhost_vdpa_can_suspend(v))
796 			return -EOPNOTSUPP;
797 		if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
798 		     !vhost_vdpa_can_resume(v))
799 			return -EOPNOTSUPP;
800 		if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
801 		    !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
802 			return -EINVAL;
803 		if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
804 		     !vhost_vdpa_has_desc_group(v))
805 			return -EOPNOTSUPP;
806 		if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
807 		     !vhost_vdpa_has_persistent_map(v))
808 			return -EOPNOTSUPP;
809 		vhost_set_backend_features(&v->vdev, features);
810 		return 0;
811 	}
812 
813 	mutex_lock(&d->mutex);
814 
815 	switch (cmd) {
816 	case VHOST_VDPA_GET_DEVICE_ID:
817 		r = vhost_vdpa_get_device_id(v, argp);
818 		break;
819 	case VHOST_VDPA_GET_STATUS:
820 		r = vhost_vdpa_get_status(v, argp);
821 		break;
822 	case VHOST_VDPA_SET_STATUS:
823 		r = vhost_vdpa_set_status(v, argp);
824 		break;
825 	case VHOST_VDPA_GET_CONFIG:
826 		r = vhost_vdpa_get_config(v, argp);
827 		break;
828 	case VHOST_VDPA_SET_CONFIG:
829 		r = vhost_vdpa_set_config(v, argp);
830 		break;
831 	case VHOST_GET_FEATURES:
832 		r = vhost_vdpa_get_features(v, argp);
833 		break;
834 	case VHOST_SET_FEATURES:
835 		r = vhost_vdpa_set_features(v, argp);
836 		break;
837 	case VHOST_VDPA_GET_VRING_NUM:
838 		r = vhost_vdpa_get_vring_num(v, argp);
839 		break;
840 	case VHOST_VDPA_GET_GROUP_NUM:
841 		if (copy_to_user(argp, &v->vdpa->ngroups,
842 				 sizeof(v->vdpa->ngroups)))
843 			r = -EFAULT;
844 		break;
845 	case VHOST_VDPA_GET_AS_NUM:
846 		if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
847 			r = -EFAULT;
848 		break;
849 	case VHOST_SET_LOG_BASE:
850 	case VHOST_SET_LOG_FD:
851 		r = -ENOIOCTLCMD;
852 		break;
853 	case VHOST_VDPA_SET_CONFIG_CALL:
854 		r = vhost_vdpa_set_config_call(v, argp);
855 		break;
856 	case VHOST_GET_BACKEND_FEATURES:
857 		features = VHOST_VDPA_BACKEND_FEATURES;
858 		if (vhost_vdpa_can_suspend(v))
859 			features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
860 		if (vhost_vdpa_can_resume(v))
861 			features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
862 		if (vhost_vdpa_has_desc_group(v))
863 			features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
864 		if (vhost_vdpa_has_persistent_map(v))
865 			features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
866 		features |= vhost_vdpa_get_backend_features(v);
867 		if (copy_to_user(featurep, &features, sizeof(features)))
868 			r = -EFAULT;
869 		break;
870 	case VHOST_VDPA_GET_IOVA_RANGE:
871 		r = vhost_vdpa_get_iova_range(v, argp);
872 		break;
873 	case VHOST_VDPA_GET_CONFIG_SIZE:
874 		r = vhost_vdpa_get_config_size(v, argp);
875 		break;
876 	case VHOST_VDPA_GET_VQS_COUNT:
877 		r = vhost_vdpa_get_vqs_count(v, argp);
878 		break;
879 	case VHOST_VDPA_SUSPEND:
880 		r = vhost_vdpa_suspend(v);
881 		break;
882 	case VHOST_VDPA_RESUME:
883 		r = vhost_vdpa_resume(v);
884 		break;
885 	default:
886 		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
887 		if (r == -ENOIOCTLCMD)
888 			r = vhost_vdpa_vring_ioctl(v, cmd, argp);
889 		break;
890 	}
891 
892 	if (r)
893 		goto out;
894 
895 	switch (cmd) {
896 	case VHOST_SET_OWNER:
897 		r = vhost_vdpa_bind_mm(v);
898 		if (r)
899 			vhost_dev_reset_owner(d, NULL);
900 		break;
901 	}
902 out:
903 	mutex_unlock(&d->mutex);
904 	return r;
905 }
vhost_vdpa_general_unmap(struct vhost_vdpa * v,struct vhost_iotlb_map * map,u32 asid)906 static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
907 				     struct vhost_iotlb_map *map, u32 asid)
908 {
909 	struct vdpa_device *vdpa = v->vdpa;
910 	const struct vdpa_config_ops *ops = vdpa->config;
911 	if (ops->dma_map) {
912 		ops->dma_unmap(vdpa, asid, map->start, map->size);
913 	} else if (ops->set_map == NULL) {
914 		iommu_unmap(v->domain, map->start, map->size);
915 	}
916 }
917 
vhost_vdpa_pa_unmap(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 start,u64 last,u32 asid)918 static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
919 				u64 start, u64 last, u32 asid)
920 {
921 	struct vhost_dev *dev = &v->vdev;
922 	struct vhost_iotlb_map *map;
923 	struct page *page;
924 	unsigned long pfn, pinned;
925 
926 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
927 		pinned = PFN_DOWN(map->size);
928 		for (pfn = PFN_DOWN(map->addr);
929 		     pinned > 0; pfn++, pinned--) {
930 			page = pfn_to_page(pfn);
931 			if (map->perm & VHOST_ACCESS_WO)
932 				set_page_dirty_lock(page);
933 			unpin_user_page(page);
934 		}
935 		atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
936 		vhost_vdpa_general_unmap(v, map, asid);
937 		vhost_iotlb_map_free(iotlb, map);
938 	}
939 }
940 
vhost_vdpa_va_unmap(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 start,u64 last,u32 asid)941 static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
942 				u64 start, u64 last, u32 asid)
943 {
944 	struct vhost_iotlb_map *map;
945 	struct vdpa_map_file *map_file;
946 
947 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
948 		map_file = (struct vdpa_map_file *)map->opaque;
949 		fput(map_file->file);
950 		kfree(map_file);
951 		vhost_vdpa_general_unmap(v, map, asid);
952 		vhost_iotlb_map_free(iotlb, map);
953 	}
954 }
955 
vhost_vdpa_iotlb_unmap(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 start,u64 last,u32 asid)956 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
957 				   struct vhost_iotlb *iotlb, u64 start,
958 				   u64 last, u32 asid)
959 {
960 	struct vdpa_device *vdpa = v->vdpa;
961 
962 	if (vdpa->use_va)
963 		return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
964 
965 	return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
966 }
967 
perm_to_iommu_flags(u32 perm)968 static int perm_to_iommu_flags(u32 perm)
969 {
970 	int flags = 0;
971 
972 	switch (perm) {
973 	case VHOST_ACCESS_WO:
974 		flags |= IOMMU_WRITE;
975 		break;
976 	case VHOST_ACCESS_RO:
977 		flags |= IOMMU_READ;
978 		break;
979 	case VHOST_ACCESS_RW:
980 		flags |= (IOMMU_WRITE | IOMMU_READ);
981 		break;
982 	default:
983 		WARN(1, "invalidate vhost IOTLB permission\n");
984 		break;
985 	}
986 
987 	return flags | IOMMU_CACHE;
988 }
989 
vhost_vdpa_map(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 iova,u64 size,u64 pa,u32 perm,void * opaque)990 static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
991 			  u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
992 {
993 	struct vhost_dev *dev = &v->vdev;
994 	struct vdpa_device *vdpa = v->vdpa;
995 	const struct vdpa_config_ops *ops = vdpa->config;
996 	u32 asid = iotlb_to_asid(iotlb);
997 	int r = 0;
998 
999 	r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
1000 				      pa, perm, opaque);
1001 	if (r)
1002 		return r;
1003 
1004 	if (ops->dma_map) {
1005 		r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
1006 	} else if (ops->set_map) {
1007 		if (!v->in_batch)
1008 			r = ops->set_map(vdpa, asid, iotlb);
1009 	} else {
1010 		r = iommu_map(v->domain, iova, pa, size,
1011 			      perm_to_iommu_flags(perm),
1012 			      GFP_KERNEL_ACCOUNT);
1013 	}
1014 	if (r) {
1015 		vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
1016 		return r;
1017 	}
1018 
1019 	if (!vdpa->use_va)
1020 		atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
1021 
1022 	return 0;
1023 }
1024 
vhost_vdpa_unmap(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 iova,u64 size)1025 static void vhost_vdpa_unmap(struct vhost_vdpa *v,
1026 			     struct vhost_iotlb *iotlb,
1027 			     u64 iova, u64 size)
1028 {
1029 	struct vdpa_device *vdpa = v->vdpa;
1030 	const struct vdpa_config_ops *ops = vdpa->config;
1031 	u32 asid = iotlb_to_asid(iotlb);
1032 
1033 	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
1034 
1035 	if (ops->set_map) {
1036 		if (!v->in_batch)
1037 			ops->set_map(vdpa, asid, iotlb);
1038 	}
1039 
1040 }
1041 
vhost_vdpa_va_map(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 iova,u64 size,u64 uaddr,u32 perm)1042 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
1043 			     struct vhost_iotlb *iotlb,
1044 			     u64 iova, u64 size, u64 uaddr, u32 perm)
1045 {
1046 	struct vhost_dev *dev = &v->vdev;
1047 	u64 offset, map_size, map_iova = iova;
1048 	struct vdpa_map_file *map_file;
1049 	struct vm_area_struct *vma;
1050 	int ret = 0;
1051 
1052 	mmap_read_lock(dev->mm);
1053 
1054 	while (size) {
1055 		vma = find_vma(dev->mm, uaddr);
1056 		if (!vma) {
1057 			ret = -EINVAL;
1058 			break;
1059 		}
1060 		map_size = min(size, vma->vm_end - uaddr);
1061 		if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
1062 			!(vma->vm_flags & (VM_IO | VM_PFNMAP))))
1063 			goto next;
1064 
1065 		map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
1066 		if (!map_file) {
1067 			ret = -ENOMEM;
1068 			break;
1069 		}
1070 		offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
1071 		map_file->offset = offset;
1072 		map_file->file = get_file(vma->vm_file);
1073 		ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
1074 				     perm, map_file);
1075 		if (ret) {
1076 			fput(map_file->file);
1077 			kfree(map_file);
1078 			break;
1079 		}
1080 next:
1081 		size -= map_size;
1082 		uaddr += map_size;
1083 		map_iova += map_size;
1084 	}
1085 	if (ret)
1086 		vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
1087 
1088 	mmap_read_unlock(dev->mm);
1089 
1090 	return ret;
1091 }
1092 
vhost_vdpa_pa_map(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 iova,u64 size,u64 uaddr,u32 perm)1093 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
1094 			     struct vhost_iotlb *iotlb,
1095 			     u64 iova, u64 size, u64 uaddr, u32 perm)
1096 {
1097 	struct vhost_dev *dev = &v->vdev;
1098 	struct page **page_list;
1099 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
1100 	unsigned int gup_flags = FOLL_LONGTERM;
1101 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
1102 	unsigned long lock_limit, sz2pin, nchunks, i;
1103 	u64 start = iova;
1104 	long pinned;
1105 	int ret = 0;
1106 
1107 	/* Limit the use of memory for bookkeeping */
1108 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
1109 	if (!page_list)
1110 		return -ENOMEM;
1111 
1112 	if (perm & VHOST_ACCESS_WO)
1113 		gup_flags |= FOLL_WRITE;
1114 
1115 	npages = PFN_UP(size + (iova & ~PAGE_MASK));
1116 	if (!npages) {
1117 		ret = -EINVAL;
1118 		goto free;
1119 	}
1120 
1121 	mmap_read_lock(dev->mm);
1122 
1123 	lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
1124 	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
1125 		ret = -ENOMEM;
1126 		goto unlock;
1127 	}
1128 
1129 	cur_base = uaddr & PAGE_MASK;
1130 	iova &= PAGE_MASK;
1131 	nchunks = 0;
1132 
1133 	while (npages) {
1134 		sz2pin = min_t(unsigned long, npages, list_size);
1135 		pinned = pin_user_pages(cur_base, sz2pin,
1136 					gup_flags, page_list);
1137 		if (sz2pin != pinned) {
1138 			if (pinned < 0) {
1139 				ret = pinned;
1140 			} else {
1141 				unpin_user_pages(page_list, pinned);
1142 				ret = -ENOMEM;
1143 			}
1144 			goto out;
1145 		}
1146 		nchunks++;
1147 
1148 		if (!last_pfn)
1149 			map_pfn = page_to_pfn(page_list[0]);
1150 
1151 		for (i = 0; i < pinned; i++) {
1152 			unsigned long this_pfn = page_to_pfn(page_list[i]);
1153 			u64 csize;
1154 
1155 			if (last_pfn && (this_pfn != last_pfn + 1)) {
1156 				/* Pin a contiguous chunk of memory */
1157 				csize = PFN_PHYS(last_pfn - map_pfn + 1);
1158 				ret = vhost_vdpa_map(v, iotlb, iova, csize,
1159 						     PFN_PHYS(map_pfn),
1160 						     perm, NULL);
1161 				if (ret) {
1162 					/*
1163 					 * Unpin the pages that are left unmapped
1164 					 * from this point on in the current
1165 					 * page_list. The remaining outstanding
1166 					 * ones which may stride across several
1167 					 * chunks will be covered in the common
1168 					 * error path subsequently.
1169 					 */
1170 					unpin_user_pages(&page_list[i],
1171 							 pinned - i);
1172 					goto out;
1173 				}
1174 
1175 				map_pfn = this_pfn;
1176 				iova += csize;
1177 				nchunks = 0;
1178 			}
1179 
1180 			last_pfn = this_pfn;
1181 		}
1182 
1183 		cur_base += PFN_PHYS(pinned);
1184 		npages -= pinned;
1185 	}
1186 
1187 	/* Pin the rest chunk */
1188 	ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
1189 			     PFN_PHYS(map_pfn), perm, NULL);
1190 out:
1191 	if (ret) {
1192 		if (nchunks) {
1193 			unsigned long pfn;
1194 
1195 			/*
1196 			 * Unpin the outstanding pages which are yet to be
1197 			 * mapped but haven't due to vdpa_map() or
1198 			 * pin_user_pages() failure.
1199 			 *
1200 			 * Mapped pages are accounted in vdpa_map(), hence
1201 			 * the corresponding unpinning will be handled by
1202 			 * vdpa_unmap().
1203 			 */
1204 			WARN_ON(!last_pfn);
1205 			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
1206 				unpin_user_page(pfn_to_page(pfn));
1207 		}
1208 		vhost_vdpa_unmap(v, iotlb, start, size);
1209 	}
1210 unlock:
1211 	mmap_read_unlock(dev->mm);
1212 free:
1213 	free_page((unsigned long)page_list);
1214 	return ret;
1215 
1216 }
1217 
vhost_vdpa_process_iotlb_update(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,struct vhost_iotlb_msg * msg)1218 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
1219 					   struct vhost_iotlb *iotlb,
1220 					   struct vhost_iotlb_msg *msg)
1221 {
1222 	struct vdpa_device *vdpa = v->vdpa;
1223 
1224 	if (msg->iova < v->range.first || !msg->size ||
1225 	    msg->iova > U64_MAX - msg->size + 1 ||
1226 	    msg->iova + msg->size - 1 > v->range.last)
1227 		return -EINVAL;
1228 
1229 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
1230 				    msg->iova + msg->size - 1))
1231 		return -EEXIST;
1232 
1233 	if (vdpa->use_va)
1234 		return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
1235 					 msg->uaddr, msg->perm);
1236 
1237 	return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
1238 				 msg->perm);
1239 }
1240 
vhost_vdpa_process_iotlb_msg(struct vhost_dev * dev,u32 asid,struct vhost_iotlb_msg * msg)1241 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1242 					struct vhost_iotlb_msg *msg)
1243 {
1244 	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
1245 	struct vdpa_device *vdpa = v->vdpa;
1246 	const struct vdpa_config_ops *ops = vdpa->config;
1247 	struct vhost_iotlb *iotlb = NULL;
1248 	struct vhost_vdpa_as *as = NULL;
1249 	int r = 0;
1250 
1251 	mutex_lock(&dev->mutex);
1252 
1253 	r = vhost_dev_check_owner(dev);
1254 	if (r)
1255 		goto unlock;
1256 
1257 	if (msg->type == VHOST_IOTLB_UPDATE ||
1258 	    msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1259 		as = vhost_vdpa_find_alloc_as(v, asid);
1260 		if (!as) {
1261 			dev_err(&v->dev, "can't find and alloc asid %d\n",
1262 				asid);
1263 			r = -EINVAL;
1264 			goto unlock;
1265 		}
1266 		iotlb = &as->iotlb;
1267 	} else
1268 		iotlb = asid_to_iotlb(v, asid);
1269 
1270 	if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1271 		if (v->in_batch && v->batch_asid != asid) {
1272 			dev_info(&v->dev, "batch id %d asid %d\n",
1273 				 v->batch_asid, asid);
1274 		}
1275 		if (!iotlb)
1276 			dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1277 		r = -EINVAL;
1278 		goto unlock;
1279 	}
1280 
1281 	switch (msg->type) {
1282 	case VHOST_IOTLB_UPDATE:
1283 		r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
1284 		break;
1285 	case VHOST_IOTLB_INVALIDATE:
1286 		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
1287 		break;
1288 	case VHOST_IOTLB_BATCH_BEGIN:
1289 		v->batch_asid = asid;
1290 		v->in_batch = true;
1291 		break;
1292 	case VHOST_IOTLB_BATCH_END:
1293 		if (v->in_batch && ops->set_map)
1294 			ops->set_map(vdpa, asid, iotlb);
1295 		v->in_batch = false;
1296 		break;
1297 	default:
1298 		r = -EINVAL;
1299 		break;
1300 	}
1301 unlock:
1302 	mutex_unlock(&dev->mutex);
1303 
1304 	return r;
1305 }
1306 
vhost_vdpa_chr_write_iter(struct kiocb * iocb,struct iov_iter * from)1307 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1308 					 struct iov_iter *from)
1309 {
1310 	struct file *file = iocb->ki_filp;
1311 	struct vhost_vdpa *v = file->private_data;
1312 	struct vhost_dev *dev = &v->vdev;
1313 
1314 	return vhost_chr_write_iter(dev, from);
1315 }
1316 
vhost_vdpa_alloc_domain(struct vhost_vdpa * v)1317 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1318 {
1319 	struct vdpa_device *vdpa = v->vdpa;
1320 	const struct vdpa_config_ops *ops = vdpa->config;
1321 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1322 	int ret;
1323 
1324 	/* Device want to do DMA by itself */
1325 	if (ops->set_map || ops->dma_map)
1326 		return 0;
1327 
1328 	if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
1329 		dev_warn_once(&v->dev,
1330 			      "Failed to allocate domain, device is not IOMMU cache coherent capable\n");
1331 		return -ENOTSUPP;
1332 	}
1333 
1334 	v->domain = iommu_paging_domain_alloc(dma_dev);
1335 	if (IS_ERR(v->domain)) {
1336 		ret = PTR_ERR(v->domain);
1337 		v->domain = NULL;
1338 		return ret;
1339 	}
1340 
1341 	ret = iommu_attach_device(v->domain, dma_dev);
1342 	if (ret)
1343 		goto err_attach;
1344 
1345 	return 0;
1346 
1347 err_attach:
1348 	iommu_domain_free(v->domain);
1349 	v->domain = NULL;
1350 	return ret;
1351 }
1352 
vhost_vdpa_free_domain(struct vhost_vdpa * v)1353 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1354 {
1355 	struct vdpa_device *vdpa = v->vdpa;
1356 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1357 
1358 	if (v->domain) {
1359 		iommu_detach_device(v->domain, dma_dev);
1360 		iommu_domain_free(v->domain);
1361 	}
1362 
1363 	v->domain = NULL;
1364 }
1365 
vhost_vdpa_set_iova_range(struct vhost_vdpa * v)1366 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1367 {
1368 	struct vdpa_iova_range *range = &v->range;
1369 	struct vdpa_device *vdpa = v->vdpa;
1370 	const struct vdpa_config_ops *ops = vdpa->config;
1371 
1372 	if (ops->get_iova_range) {
1373 		*range = ops->get_iova_range(vdpa);
1374 	} else if (v->domain && v->domain->geometry.force_aperture) {
1375 		range->first = v->domain->geometry.aperture_start;
1376 		range->last = v->domain->geometry.aperture_end;
1377 	} else {
1378 		range->first = 0;
1379 		range->last = ULLONG_MAX;
1380 	}
1381 }
1382 
vhost_vdpa_cleanup(struct vhost_vdpa * v)1383 static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1384 {
1385 	struct vhost_vdpa_as *as;
1386 	u32 asid;
1387 
1388 	for (asid = 0; asid < v->vdpa->nas; asid++) {
1389 		as = asid_to_as(v, asid);
1390 		if (as)
1391 			vhost_vdpa_remove_as(v, asid);
1392 	}
1393 
1394 	vhost_vdpa_free_domain(v);
1395 	vhost_dev_cleanup(&v->vdev);
1396 	kfree(v->vdev.vqs);
1397 	v->vdev.vqs = NULL;
1398 }
1399 
vhost_vdpa_open(struct inode * inode,struct file * filep)1400 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1401 {
1402 	struct vhost_vdpa *v;
1403 	struct vhost_dev *dev;
1404 	struct vhost_virtqueue **vqs;
1405 	int r, opened;
1406 	u32 i, nvqs;
1407 
1408 	v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
1409 
1410 	opened = atomic_cmpxchg(&v->opened, 0, 1);
1411 	if (opened)
1412 		return -EBUSY;
1413 
1414 	nvqs = v->nvqs;
1415 	r = vhost_vdpa_reset(v);
1416 	if (r)
1417 		goto err;
1418 
1419 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1420 	if (!vqs) {
1421 		r = -ENOMEM;
1422 		goto err;
1423 	}
1424 
1425 	dev = &v->vdev;
1426 	for (i = 0; i < nvqs; i++) {
1427 		vqs[i] = &v->vqs[i];
1428 		vqs[i]->handle_kick = handle_vq_kick;
1429 		vqs[i]->call_ctx.ctx = NULL;
1430 	}
1431 	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
1432 		       vhost_vdpa_process_iotlb_msg);
1433 
1434 	r = vhost_vdpa_alloc_domain(v);
1435 	if (r)
1436 		goto err_alloc_domain;
1437 
1438 	vhost_vdpa_set_iova_range(v);
1439 
1440 	filep->private_data = v;
1441 
1442 	return 0;
1443 
1444 err_alloc_domain:
1445 	vhost_vdpa_cleanup(v);
1446 err:
1447 	atomic_dec(&v->opened);
1448 	return r;
1449 }
1450 
vhost_vdpa_clean_irq(struct vhost_vdpa * v)1451 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1452 {
1453 	u32 i;
1454 
1455 	for (i = 0; i < v->nvqs; i++)
1456 		vhost_vdpa_unsetup_vq_irq(v, i);
1457 }
1458 
vhost_vdpa_release(struct inode * inode,struct file * filep)1459 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1460 {
1461 	struct vhost_vdpa *v = filep->private_data;
1462 	struct vhost_dev *d = &v->vdev;
1463 
1464 	mutex_lock(&d->mutex);
1465 	filep->private_data = NULL;
1466 	vhost_vdpa_clean_irq(v);
1467 	vhost_vdpa_reset(v);
1468 	vhost_dev_stop(&v->vdev);
1469 	vhost_vdpa_unbind_mm(v);
1470 	vhost_vdpa_config_put(v);
1471 	vhost_vdpa_cleanup(v);
1472 	mutex_unlock(&d->mutex);
1473 
1474 	atomic_dec(&v->opened);
1475 	complete(&v->completion);
1476 
1477 	return 0;
1478 }
1479 
1480 #ifdef CONFIG_MMU
vhost_vdpa_fault(struct vm_fault * vmf)1481 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1482 {
1483 	struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1484 	struct vdpa_device *vdpa = v->vdpa;
1485 	const struct vdpa_config_ops *ops = vdpa->config;
1486 	struct vdpa_notification_area notify;
1487 	struct vm_area_struct *vma = vmf->vma;
1488 	u16 index = vma->vm_pgoff;
1489 
1490 	notify = ops->get_vq_notification(vdpa, index);
1491 
1492 	return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr));
1493 }
1494 
1495 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1496 	.fault = vhost_vdpa_fault,
1497 };
1498 
vhost_vdpa_mmap(struct file * file,struct vm_area_struct * vma)1499 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1500 {
1501 	struct vhost_vdpa *v = vma->vm_file->private_data;
1502 	struct vdpa_device *vdpa = v->vdpa;
1503 	const struct vdpa_config_ops *ops = vdpa->config;
1504 	struct vdpa_notification_area notify;
1505 	unsigned long index = vma->vm_pgoff;
1506 
1507 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1508 		return -EINVAL;
1509 	if ((vma->vm_flags & VM_SHARED) == 0)
1510 		return -EINVAL;
1511 	if (vma->vm_flags & VM_READ)
1512 		return -EINVAL;
1513 	if (index > 65535)
1514 		return -EINVAL;
1515 	if (!ops->get_vq_notification)
1516 		return -ENOTSUPP;
1517 
1518 	/* To be safe and easily modelled by userspace, We only
1519 	 * support the doorbell which sits on the page boundary and
1520 	 * does not share the page with other registers.
1521 	 */
1522 	notify = ops->get_vq_notification(vdpa, index);
1523 	if (notify.addr & (PAGE_SIZE - 1))
1524 		return -EINVAL;
1525 	if (vma->vm_end - vma->vm_start != notify.size)
1526 		return -ENOTSUPP;
1527 
1528 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1529 	vma->vm_ops = &vhost_vdpa_vm_ops;
1530 	return 0;
1531 }
1532 #endif /* CONFIG_MMU */
1533 
1534 static const struct file_operations vhost_vdpa_fops = {
1535 	.owner		= THIS_MODULE,
1536 	.open		= vhost_vdpa_open,
1537 	.release	= vhost_vdpa_release,
1538 	.write_iter	= vhost_vdpa_chr_write_iter,
1539 	.unlocked_ioctl	= vhost_vdpa_unlocked_ioctl,
1540 #ifdef CONFIG_MMU
1541 	.mmap		= vhost_vdpa_mmap,
1542 #endif /* CONFIG_MMU */
1543 	.compat_ioctl	= compat_ptr_ioctl,
1544 };
1545 
vhost_vdpa_release_dev(struct device * device)1546 static void vhost_vdpa_release_dev(struct device *device)
1547 {
1548 	struct vhost_vdpa *v =
1549 	       container_of(device, struct vhost_vdpa, dev);
1550 
1551 	ida_free(&vhost_vdpa_ida, v->minor);
1552 	kfree(v->vqs);
1553 	kfree(v);
1554 }
1555 
vhost_vdpa_probe(struct vdpa_device * vdpa)1556 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1557 {
1558 	const struct vdpa_config_ops *ops = vdpa->config;
1559 	struct vhost_vdpa *v;
1560 	int minor;
1561 	int i, r;
1562 
1563 	/* We can't support platform IOMMU device with more than 1
1564 	 * group or as
1565 	 */
1566 	if (!ops->set_map && !ops->dma_map &&
1567 	    (vdpa->ngroups > 1 || vdpa->nas > 1))
1568 		return -EOPNOTSUPP;
1569 
1570 	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1571 	if (!v)
1572 		return -ENOMEM;
1573 
1574 	minor = ida_alloc_max(&vhost_vdpa_ida, VHOST_VDPA_DEV_MAX - 1,
1575 			      GFP_KERNEL);
1576 	if (minor < 0) {
1577 		kfree(v);
1578 		return minor;
1579 	}
1580 
1581 	atomic_set(&v->opened, 0);
1582 	v->minor = minor;
1583 	v->vdpa = vdpa;
1584 	v->nvqs = vdpa->nvqs;
1585 	v->virtio_id = ops->get_device_id(vdpa);
1586 
1587 	device_initialize(&v->dev);
1588 	v->dev.release = vhost_vdpa_release_dev;
1589 	v->dev.parent = &vdpa->dev;
1590 	v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1591 	v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1592 			       GFP_KERNEL);
1593 	if (!v->vqs) {
1594 		r = -ENOMEM;
1595 		goto err;
1596 	}
1597 
1598 	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1599 	if (r)
1600 		goto err;
1601 
1602 	cdev_init(&v->cdev, &vhost_vdpa_fops);
1603 	v->cdev.owner = THIS_MODULE;
1604 
1605 	r = cdev_device_add(&v->cdev, &v->dev);
1606 	if (r)
1607 		goto err;
1608 
1609 	init_completion(&v->completion);
1610 	vdpa_set_drvdata(vdpa, v);
1611 
1612 	for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1613 		INIT_HLIST_HEAD(&v->as[i]);
1614 
1615 	return 0;
1616 
1617 err:
1618 	put_device(&v->dev);
1619 	return r;
1620 }
1621 
vhost_vdpa_remove(struct vdpa_device * vdpa)1622 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1623 {
1624 	struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1625 	int opened;
1626 
1627 	cdev_device_del(&v->cdev, &v->dev);
1628 
1629 	do {
1630 		opened = atomic_cmpxchg(&v->opened, 0, 1);
1631 		if (!opened)
1632 			break;
1633 		wait_for_completion(&v->completion);
1634 	} while (1);
1635 
1636 	put_device(&v->dev);
1637 }
1638 
1639 static struct vdpa_driver vhost_vdpa_driver = {
1640 	.driver = {
1641 		.name	= "vhost_vdpa",
1642 	},
1643 	.probe	= vhost_vdpa_probe,
1644 	.remove	= vhost_vdpa_remove,
1645 };
1646 
vhost_vdpa_init(void)1647 static int __init vhost_vdpa_init(void)
1648 {
1649 	int r;
1650 
1651 	r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1652 				"vhost-vdpa");
1653 	if (r)
1654 		goto err_alloc_chrdev;
1655 
1656 	r = vdpa_register_driver(&vhost_vdpa_driver);
1657 	if (r)
1658 		goto err_vdpa_register_driver;
1659 
1660 	return 0;
1661 
1662 err_vdpa_register_driver:
1663 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1664 err_alloc_chrdev:
1665 	return r;
1666 }
1667 module_init(vhost_vdpa_init);
1668 
vhost_vdpa_exit(void)1669 static void __exit vhost_vdpa_exit(void)
1670 {
1671 	vdpa_unregister_driver(&vhost_vdpa_driver);
1672 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1673 }
1674 module_exit(vhost_vdpa_exit);
1675 
1676 MODULE_VERSION("0.0.1");
1677 MODULE_LICENSE("GPL v2");
1678 MODULE_AUTHOR("Intel Corporation");
1679 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
1680