xref: /linux/drivers/vdpa/vdpa_sim/vdpa_sim.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDPA device simulator core.
4  *
5  * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6  *     Author: Jason Wang <jasowang@redhat.com>
7  *
8  */
9 
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/slab.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <uapi/linux/vdpa.h>
21 #include <uapi/linux/vhost_types.h>
22 
23 #include "vdpa_sim.h"
24 
25 #define DRV_VERSION  "0.1"
26 #define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
27 #define DRV_DESC     "vDPA Device Simulator core"
28 #define DRV_LICENSE  "GPL v2"
29 
30 static int batch_mapping = 1;
31 module_param(batch_mapping, int, 0444);
32 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
33 
34 static int max_iotlb_entries = 2048;
35 module_param(max_iotlb_entries, int, 0444);
36 MODULE_PARM_DESC(max_iotlb_entries,
37 		 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
38 
39 static bool use_va = true;
40 module_param(use_va, bool, 0444);
41 MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA");
42 
43 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
44 #define VDPASIM_QUEUE_MAX 256
45 #define VDPASIM_VENDOR_ID 0
46 
47 struct vdpasim_mm_work {
48 	struct kthread_work work;
49 	struct vdpasim *vdpasim;
50 	struct mm_struct *mm_to_bind;
51 	int ret;
52 };
53 
vdpasim_mm_work_fn(struct kthread_work * work)54 static void vdpasim_mm_work_fn(struct kthread_work *work)
55 {
56 	struct vdpasim_mm_work *mm_work =
57 		container_of(work, struct vdpasim_mm_work, work);
58 	struct vdpasim *vdpasim = mm_work->vdpasim;
59 
60 	mm_work->ret = 0;
61 
62 	//TODO: should we attach the cgroup of the mm owner?
63 	vdpasim->mm_bound = mm_work->mm_to_bind;
64 }
65 
vdpasim_worker_change_mm_sync(struct vdpasim * vdpasim,struct vdpasim_mm_work * mm_work)66 static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim,
67 					  struct vdpasim_mm_work *mm_work)
68 {
69 	struct kthread_work *work = &mm_work->work;
70 
71 	kthread_init_work(work, vdpasim_mm_work_fn);
72 	kthread_queue_work(vdpasim->worker, work);
73 
74 	kthread_flush_work(work);
75 }
76 
vdpa_to_sim(struct vdpa_device * vdpa)77 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
78 {
79 	return container_of(vdpa, struct vdpasim, vdpa);
80 }
81 
vdpasim_vq_notify(struct vringh * vring)82 static void vdpasim_vq_notify(struct vringh *vring)
83 {
84 	struct vdpasim_virtqueue *vq =
85 		container_of(vring, struct vdpasim_virtqueue, vring);
86 
87 	if (!vq->cb)
88 		return;
89 
90 	vq->cb(vq->private);
91 }
92 
vdpasim_queue_ready(struct vdpasim * vdpasim,unsigned int idx)93 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
94 {
95 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
96 	uint16_t last_avail_idx = vq->vring.last_avail_idx;
97 	struct vring_desc *desc = (struct vring_desc *)
98 				  (uintptr_t)vq->desc_addr;
99 	struct vring_avail *avail = (struct vring_avail *)
100 				    (uintptr_t)vq->driver_addr;
101 	struct vring_used *used = (struct vring_used *)
102 				  (uintptr_t)vq->device_addr;
103 
104 	if (use_va && vdpasim->mm_bound) {
105 		vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num,
106 				     true, desc, avail, used);
107 	} else {
108 		vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num,
109 				  true, desc, avail, used);
110 	}
111 
112 	vq->vring.last_avail_idx = last_avail_idx;
113 
114 	/*
115 	 * Since vdpa_sim does not support receive inflight descriptors as a
116 	 * destination of a migration, let's set both avail_idx and used_idx
117 	 * the same at vq start.  This is how vhost-user works in a
118 	 * VHOST_SET_VRING_BASE call.
119 	 *
120 	 * Although the simple fix is to set last_used_idx at
121 	 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
122 	 */
123 	vq->vring.last_used_idx = last_avail_idx;
124 	vq->vring.notify = vdpasim_vq_notify;
125 }
126 
vdpasim_vq_reset(struct vdpasim * vdpasim,struct vdpasim_virtqueue * vq)127 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
128 			     struct vdpasim_virtqueue *vq)
129 {
130 	vq->ready = false;
131 	vq->desc_addr = 0;
132 	vq->driver_addr = 0;
133 	vq->device_addr = 0;
134 	vq->cb = NULL;
135 	vq->private = NULL;
136 	vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
137 			  VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
138 
139 	vq->vring.notify = NULL;
140 }
141 
vdpasim_do_reset(struct vdpasim * vdpasim,u32 flags)142 static void vdpasim_do_reset(struct vdpasim *vdpasim, u32 flags)
143 {
144 	int i;
145 
146 	spin_lock(&vdpasim->iommu_lock);
147 
148 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
149 		vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
150 		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
151 				 &vdpasim->iommu_lock);
152 	}
153 
154 	if (flags & VDPA_RESET_F_CLEAN_MAP) {
155 		for (i = 0; i < vdpasim->dev_attr.nas; i++) {
156 			vhost_iotlb_reset(&vdpasim->iommu[i]);
157 			vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
158 					      0, VHOST_MAP_RW);
159 			vdpasim->iommu_pt[i] = true;
160 		}
161 	}
162 
163 	vdpasim->running = false;
164 	spin_unlock(&vdpasim->iommu_lock);
165 
166 	vdpasim->features = 0;
167 	vdpasim->status = 0;
168 	++vdpasim->generation;
169 }
170 
171 static const struct vdpa_config_ops vdpasim_config_ops;
172 static const struct vdpa_config_ops vdpasim_batch_config_ops;
173 
vdpasim_work_fn(struct kthread_work * work)174 static void vdpasim_work_fn(struct kthread_work *work)
175 {
176 	struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
177 	struct mm_struct *mm = vdpasim->mm_bound;
178 
179 	if (use_va && mm) {
180 		if (!mmget_not_zero(mm))
181 			return;
182 		kthread_use_mm(mm);
183 	}
184 
185 	vdpasim->dev_attr.work_fn(vdpasim);
186 
187 	if (use_va && mm) {
188 		kthread_unuse_mm(mm);
189 		mmput(mm);
190 	}
191 }
192 
vdpasim_create(struct vdpasim_dev_attr * dev_attr,const struct vdpa_dev_set_config * config)193 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
194 			       const struct vdpa_dev_set_config *config)
195 {
196 	const struct vdpa_config_ops *ops;
197 	struct vdpa_device *vdpa;
198 	struct vdpasim *vdpasim;
199 	struct device *dev;
200 	int i, ret = -ENOMEM;
201 
202 	if (!dev_attr->alloc_size)
203 		return ERR_PTR(-EINVAL);
204 
205 	if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
206 		if (config->device_features &
207 		    ~dev_attr->supported_features)
208 			return ERR_PTR(-EINVAL);
209 		dev_attr->supported_features =
210 			config->device_features;
211 	}
212 
213 	if (batch_mapping)
214 		ops = &vdpasim_batch_config_ops;
215 	else
216 		ops = &vdpasim_config_ops;
217 
218 	vdpa = __vdpa_alloc_device(NULL, ops, NULL,
219 				   dev_attr->ngroups, dev_attr->nas,
220 				   dev_attr->alloc_size,
221 				   dev_attr->name, use_va);
222 	if (IS_ERR(vdpa)) {
223 		ret = PTR_ERR(vdpa);
224 		goto err_alloc;
225 	}
226 
227 	vdpasim = vdpa_to_sim(vdpa);
228 	vdpasim->dev_attr = *dev_attr;
229 	dev = &vdpasim->vdpa.dev;
230 
231 	kthread_init_work(&vdpasim->work, vdpasim_work_fn);
232 	vdpasim->worker = kthread_run_worker(0, "vDPA sim worker: %s",
233 						dev_attr->name);
234 	if (IS_ERR(vdpasim->worker))
235 		goto err_iommu;
236 
237 	mutex_init(&vdpasim->mutex);
238 	spin_lock_init(&vdpasim->iommu_lock);
239 
240 	dev->dma_mask = &dev->coherent_dma_mask;
241 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
242 		goto err_iommu;
243 	vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
244 
245 	vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
246 	if (!vdpasim->config)
247 		goto err_iommu;
248 
249 	vdpasim->vqs = kzalloc_objs(struct vdpasim_virtqueue, dev_attr->nvqs);
250 	if (!vdpasim->vqs)
251 		goto err_iommu;
252 
253 	vdpasim->iommu = kmalloc_objs(*vdpasim->iommu, vdpasim->dev_attr.nas);
254 	if (!vdpasim->iommu)
255 		goto err_iommu;
256 
257 	vdpasim->iommu_pt = kmalloc_objs(*vdpasim->iommu_pt,
258 					 vdpasim->dev_attr.nas);
259 	if (!vdpasim->iommu_pt)
260 		goto err_iommu;
261 
262 	for (i = 0; i < vdpasim->dev_attr.nas; i++) {
263 		vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
264 		vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0,
265 				      VHOST_MAP_RW);
266 		vdpasim->iommu_pt[i] = true;
267 	}
268 
269 	for (i = 0; i < dev_attr->nvqs; i++)
270 		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
271 				 &vdpasim->iommu_lock);
272 
273 	vdpasim->vdpa.vmap.dma_dev = dev;
274 
275 	return vdpasim;
276 
277 err_iommu:
278 	put_device(dev);
279 err_alloc:
280 	return ERR_PTR(ret);
281 }
282 EXPORT_SYMBOL_GPL(vdpasim_create);
283 
vdpasim_schedule_work(struct vdpasim * vdpasim)284 void vdpasim_schedule_work(struct vdpasim *vdpasim)
285 {
286 	kthread_queue_work(vdpasim->worker, &vdpasim->work);
287 }
288 EXPORT_SYMBOL_GPL(vdpasim_schedule_work);
289 
vdpasim_set_vq_address(struct vdpa_device * vdpa,u16 idx,u64 desc_area,u64 driver_area,u64 device_area)290 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
291 				  u64 desc_area, u64 driver_area,
292 				  u64 device_area)
293 {
294 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
295 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
296 
297 	vq->desc_addr = desc_area;
298 	vq->driver_addr = driver_area;
299 	vq->device_addr = device_area;
300 
301 	return 0;
302 }
303 
vdpasim_set_vq_num(struct vdpa_device * vdpa,u16 idx,u32 num)304 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
305 {
306 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
307 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
308 
309 	vq->num = num;
310 }
311 
vdpasim_get_vq_size(struct vdpa_device * vdpa,u16 idx)312 static u16 vdpasim_get_vq_size(struct vdpa_device *vdpa, u16 idx)
313 {
314 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
315 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
316 
317 	if (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)
318 		return vq->num;
319 	else
320 		return VDPASIM_QUEUE_MAX;
321 }
322 
vdpasim_kick_vq(struct vdpa_device * vdpa,u16 idx)323 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
324 {
325 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
326 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
327 
328 	if (!vdpasim->running &&
329 	    (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
330 		vdpasim->pending_kick = true;
331 		return;
332 	}
333 
334 	if (vq->ready)
335 		vdpasim_schedule_work(vdpasim);
336 }
337 
vdpasim_set_vq_cb(struct vdpa_device * vdpa,u16 idx,struct vdpa_callback * cb)338 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
339 			      struct vdpa_callback *cb)
340 {
341 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
342 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
343 
344 	vq->cb = cb->callback;
345 	vq->private = cb->private;
346 }
347 
vdpasim_set_vq_ready(struct vdpa_device * vdpa,u16 idx,bool ready)348 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
349 {
350 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
351 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
352 	bool old_ready;
353 
354 	mutex_lock(&vdpasim->mutex);
355 	old_ready = vq->ready;
356 	vq->ready = ready;
357 	if (vq->ready && !old_ready) {
358 		vdpasim_queue_ready(vdpasim, idx);
359 	}
360 	mutex_unlock(&vdpasim->mutex);
361 }
362 
vdpasim_get_vq_ready(struct vdpa_device * vdpa,u16 idx)363 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
364 {
365 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
366 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
367 
368 	return vq->ready;
369 }
370 
vdpasim_set_vq_state(struct vdpa_device * vdpa,u16 idx,const struct vdpa_vq_state * state)371 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
372 				const struct vdpa_vq_state *state)
373 {
374 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
375 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
376 	struct vringh *vrh = &vq->vring;
377 
378 	mutex_lock(&vdpasim->mutex);
379 	vrh->last_avail_idx = state->split.avail_index;
380 	mutex_unlock(&vdpasim->mutex);
381 
382 	return 0;
383 }
384 
vdpasim_get_vq_state(struct vdpa_device * vdpa,u16 idx,struct vdpa_vq_state * state)385 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
386 				struct vdpa_vq_state *state)
387 {
388 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
389 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
390 	struct vringh *vrh = &vq->vring;
391 
392 	state->split.avail_index = vrh->last_avail_idx;
393 	return 0;
394 }
395 
vdpasim_get_vq_stats(struct vdpa_device * vdpa,u16 idx,struct sk_buff * msg,struct netlink_ext_ack * extack)396 static int vdpasim_get_vq_stats(struct vdpa_device *vdpa, u16 idx,
397 				struct sk_buff *msg,
398 				struct netlink_ext_ack *extack)
399 {
400 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
401 
402 	if (vdpasim->dev_attr.get_stats)
403 		return vdpasim->dev_attr.get_stats(vdpasim, idx,
404 						   msg, extack);
405 	return -EOPNOTSUPP;
406 }
407 
vdpasim_get_vq_align(struct vdpa_device * vdpa)408 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
409 {
410 	return VDPASIM_QUEUE_ALIGN;
411 }
412 
vdpasim_get_vq_group(struct vdpa_device * vdpa,u16 idx)413 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
414 {
415 	/* RX and TX belongs to group 0, CVQ belongs to group 1 */
416 	if (idx == 2)
417 		return 1;
418 	else
419 		return 0;
420 }
421 
vdpasim_get_device_features(struct vdpa_device * vdpa)422 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
423 {
424 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
425 
426 	return vdpasim->dev_attr.supported_features;
427 }
428 
vdpasim_get_backend_features(const struct vdpa_device * vdpa)429 static u64 vdpasim_get_backend_features(const struct vdpa_device *vdpa)
430 {
431 	return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
432 }
433 
vdpasim_set_driver_features(struct vdpa_device * vdpa,u64 features)434 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
435 {
436 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
437 
438 	/* DMA mapping must be done by driver */
439 	if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
440 		return -EINVAL;
441 
442 	vdpasim->features = features & vdpasim->dev_attr.supported_features;
443 
444 	return 0;
445 }
446 
vdpasim_get_driver_features(struct vdpa_device * vdpa)447 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
448 {
449 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
450 
451 	return vdpasim->features;
452 }
453 
vdpasim_set_config_cb(struct vdpa_device * vdpa,struct vdpa_callback * cb)454 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
455 				  struct vdpa_callback *cb)
456 {
457 	/* We don't support config interrupt */
458 }
459 
vdpasim_get_vq_num_max(struct vdpa_device * vdpa)460 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
461 {
462 	return VDPASIM_QUEUE_MAX;
463 }
464 
vdpasim_get_device_id(struct vdpa_device * vdpa)465 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
466 {
467 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
468 
469 	return vdpasim->dev_attr.id;
470 }
471 
vdpasim_get_vendor_id(struct vdpa_device * vdpa)472 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
473 {
474 	return VDPASIM_VENDOR_ID;
475 }
476 
vdpasim_get_status(struct vdpa_device * vdpa)477 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
478 {
479 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
480 	u8 status;
481 
482 	mutex_lock(&vdpasim->mutex);
483 	status = vdpasim->status;
484 	mutex_unlock(&vdpasim->mutex);
485 
486 	return status;
487 }
488 
vdpasim_set_status(struct vdpa_device * vdpa,u8 status)489 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
490 {
491 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
492 
493 	mutex_lock(&vdpasim->mutex);
494 	vdpasim->status = status;
495 	vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0;
496 	mutex_unlock(&vdpasim->mutex);
497 }
498 
vdpasim_compat_reset(struct vdpa_device * vdpa,u32 flags)499 static int vdpasim_compat_reset(struct vdpa_device *vdpa, u32 flags)
500 {
501 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
502 
503 	mutex_lock(&vdpasim->mutex);
504 	vdpasim->status = 0;
505 	vdpasim_do_reset(vdpasim, flags);
506 	mutex_unlock(&vdpasim->mutex);
507 
508 	return 0;
509 }
510 
vdpasim_reset(struct vdpa_device * vdpa)511 static int vdpasim_reset(struct vdpa_device *vdpa)
512 {
513 	return vdpasim_compat_reset(vdpa, 0);
514 }
515 
vdpasim_suspend(struct vdpa_device * vdpa)516 static int vdpasim_suspend(struct vdpa_device *vdpa)
517 {
518 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
519 
520 	mutex_lock(&vdpasim->mutex);
521 	vdpasim->running = false;
522 	mutex_unlock(&vdpasim->mutex);
523 
524 	return 0;
525 }
526 
vdpasim_resume(struct vdpa_device * vdpa)527 static int vdpasim_resume(struct vdpa_device *vdpa)
528 {
529 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
530 	int i;
531 
532 	mutex_lock(&vdpasim->mutex);
533 	vdpasim->running = true;
534 
535 	if (vdpasim->pending_kick) {
536 		/* Process pending descriptors */
537 		for (i = 0; i < vdpasim->dev_attr.nvqs; ++i)
538 			vdpasim_kick_vq(vdpa, i);
539 
540 		vdpasim->pending_kick = false;
541 	}
542 
543 	mutex_unlock(&vdpasim->mutex);
544 
545 	return 0;
546 }
547 
vdpasim_get_config_size(struct vdpa_device * vdpa)548 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
549 {
550 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
551 
552 	return vdpasim->dev_attr.config_size;
553 }
554 
vdpasim_get_config(struct vdpa_device * vdpa,unsigned int offset,void * buf,unsigned int len)555 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
556 			     void *buf, unsigned int len)
557 {
558 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
559 
560 	if (offset + len > vdpasim->dev_attr.config_size)
561 		return;
562 
563 	if (vdpasim->dev_attr.get_config)
564 		vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
565 
566 	memcpy(buf, vdpasim->config + offset, len);
567 }
568 
vdpasim_set_config(struct vdpa_device * vdpa,unsigned int offset,const void * buf,unsigned int len)569 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
570 			     const void *buf, unsigned int len)
571 {
572 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
573 
574 	if (offset + len > vdpasim->dev_attr.config_size)
575 		return;
576 
577 	memcpy(vdpasim->config + offset, buf, len);
578 
579 	if (vdpasim->dev_attr.set_config)
580 		vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
581 }
582 
vdpasim_get_generation(struct vdpa_device * vdpa)583 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
584 {
585 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
586 
587 	return vdpasim->generation;
588 }
589 
vdpasim_get_iova_range(struct vdpa_device * vdpa)590 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
591 {
592 	struct vdpa_iova_range range = {
593 		.first = 0ULL,
594 		.last = ULLONG_MAX,
595 	};
596 
597 	return range;
598 }
599 
vdpasim_set_group_asid(struct vdpa_device * vdpa,unsigned int group,unsigned int asid)600 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
601 				  unsigned int asid)
602 {
603 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
604 	struct vhost_iotlb *iommu;
605 	int i;
606 
607 	iommu = &vdpasim->iommu[asid];
608 
609 	mutex_lock(&vdpasim->mutex);
610 
611 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
612 		if (vdpasim_get_vq_group(vdpa, i) == group)
613 			vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
614 					 &vdpasim->iommu_lock);
615 
616 	mutex_unlock(&vdpasim->mutex);
617 
618 	return 0;
619 }
620 
vdpasim_set_map(struct vdpa_device * vdpa,unsigned int asid,struct vhost_iotlb * iotlb)621 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
622 			   struct vhost_iotlb *iotlb)
623 {
624 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
625 	struct vhost_iotlb_map *map;
626 	struct vhost_iotlb *iommu;
627 	u64 start = 0ULL, last = 0ULL - 1;
628 	int ret;
629 
630 	if (asid >= vdpasim->dev_attr.nas)
631 		return -EINVAL;
632 
633 	spin_lock(&vdpasim->iommu_lock);
634 
635 	iommu = &vdpasim->iommu[asid];
636 	vhost_iotlb_reset(iommu);
637 	vdpasim->iommu_pt[asid] = false;
638 
639 	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
640 	     map = vhost_iotlb_itree_next(map, start, last)) {
641 		ret = vhost_iotlb_add_range(iommu, map->start,
642 					    map->last, map->addr, map->perm);
643 		if (ret)
644 			goto err;
645 	}
646 	spin_unlock(&vdpasim->iommu_lock);
647 	return 0;
648 
649 err:
650 	vhost_iotlb_reset(iommu);
651 	spin_unlock(&vdpasim->iommu_lock);
652 	return ret;
653 }
654 
vdpasim_reset_map(struct vdpa_device * vdpa,unsigned int asid)655 static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid)
656 {
657 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
658 
659 	if (asid >= vdpasim->dev_attr.nas)
660 		return -EINVAL;
661 
662 	spin_lock(&vdpasim->iommu_lock);
663 	if (vdpasim->iommu_pt[asid])
664 		goto out;
665 	vhost_iotlb_reset(&vdpasim->iommu[asid]);
666 	vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX,
667 			      0, VHOST_MAP_RW);
668 	vdpasim->iommu_pt[asid] = true;
669 out:
670 	spin_unlock(&vdpasim->iommu_lock);
671 	return 0;
672 }
673 
vdpasim_bind_mm(struct vdpa_device * vdpa,struct mm_struct * mm)674 static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
675 {
676 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
677 	struct vdpasim_mm_work mm_work;
678 
679 	mm_work.vdpasim = vdpasim;
680 	mm_work.mm_to_bind = mm;
681 
682 	vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
683 
684 	return mm_work.ret;
685 }
686 
vdpasim_unbind_mm(struct vdpa_device * vdpa)687 static void vdpasim_unbind_mm(struct vdpa_device *vdpa)
688 {
689 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
690 	struct vdpasim_mm_work mm_work;
691 
692 	mm_work.vdpasim = vdpasim;
693 	mm_work.mm_to_bind = NULL;
694 
695 	vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
696 }
697 
vdpasim_dma_map(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size,u64 pa,u32 perm,void * opaque)698 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
699 			   u64 iova, u64 size,
700 			   u64 pa, u32 perm, void *opaque)
701 {
702 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
703 	int ret;
704 
705 	if (asid >= vdpasim->dev_attr.nas)
706 		return -EINVAL;
707 
708 	spin_lock(&vdpasim->iommu_lock);
709 	if (vdpasim->iommu_pt[asid]) {
710 		vhost_iotlb_reset(&vdpasim->iommu[asid]);
711 		vdpasim->iommu_pt[asid] = false;
712 	}
713 	ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
714 					iova + size - 1, pa, perm, opaque);
715 	spin_unlock(&vdpasim->iommu_lock);
716 
717 	return ret;
718 }
719 
vdpasim_dma_unmap(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size)720 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
721 			     u64 iova, u64 size)
722 {
723 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
724 
725 	if (asid >= vdpasim->dev_attr.nas)
726 		return -EINVAL;
727 
728 	if (vdpasim->iommu_pt[asid]) {
729 		vhost_iotlb_reset(&vdpasim->iommu[asid]);
730 		vdpasim->iommu_pt[asid] = false;
731 	}
732 
733 	spin_lock(&vdpasim->iommu_lock);
734 	vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
735 	spin_unlock(&vdpasim->iommu_lock);
736 
737 	return 0;
738 }
739 
vdpasim_free(struct vdpa_device * vdpa)740 static void vdpasim_free(struct vdpa_device *vdpa)
741 {
742 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
743 	int i;
744 
745 	kthread_cancel_work_sync(&vdpasim->work);
746 	kthread_destroy_worker(vdpasim->worker);
747 
748 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
749 		vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
750 		vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
751 	}
752 
753 	vdpasim->dev_attr.free(vdpasim);
754 
755 	for (i = 0; i < vdpasim->dev_attr.nas; i++)
756 		vhost_iotlb_reset(&vdpasim->iommu[i]);
757 	kfree(vdpasim->iommu);
758 	kfree(vdpasim->iommu_pt);
759 	kfree(vdpasim->vqs);
760 	kfree(vdpasim->config);
761 }
762 
763 static const struct vdpa_config_ops vdpasim_config_ops = {
764 	.set_vq_address         = vdpasim_set_vq_address,
765 	.set_vq_num             = vdpasim_set_vq_num,
766 	.kick_vq                = vdpasim_kick_vq,
767 	.set_vq_cb              = vdpasim_set_vq_cb,
768 	.set_vq_ready           = vdpasim_set_vq_ready,
769 	.get_vq_ready           = vdpasim_get_vq_ready,
770 	.set_vq_state           = vdpasim_set_vq_state,
771 	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
772 	.get_vq_state           = vdpasim_get_vq_state,
773 	.get_vq_align           = vdpasim_get_vq_align,
774 	.get_vq_group           = vdpasim_get_vq_group,
775 	.get_device_features    = vdpasim_get_device_features,
776 	.get_backend_features   = vdpasim_get_backend_features,
777 	.set_driver_features    = vdpasim_set_driver_features,
778 	.get_driver_features    = vdpasim_get_driver_features,
779 	.set_config_cb          = vdpasim_set_config_cb,
780 	.get_vq_num_max         = vdpasim_get_vq_num_max,
781 	.get_vq_size		= vdpasim_get_vq_size,
782 	.get_device_id          = vdpasim_get_device_id,
783 	.get_vendor_id          = vdpasim_get_vendor_id,
784 	.get_status             = vdpasim_get_status,
785 	.set_status             = vdpasim_set_status,
786 	.reset			= vdpasim_reset,
787 	.compat_reset		= vdpasim_compat_reset,
788 	.suspend		= vdpasim_suspend,
789 	.resume			= vdpasim_resume,
790 	.get_config_size        = vdpasim_get_config_size,
791 	.get_config             = vdpasim_get_config,
792 	.set_config             = vdpasim_set_config,
793 	.get_generation         = vdpasim_get_generation,
794 	.get_iova_range         = vdpasim_get_iova_range,
795 	.set_group_asid         = vdpasim_set_group_asid,
796 	.dma_map                = vdpasim_dma_map,
797 	.dma_unmap              = vdpasim_dma_unmap,
798 	.reset_map              = vdpasim_reset_map,
799 	.bind_mm		= vdpasim_bind_mm,
800 	.unbind_mm		= vdpasim_unbind_mm,
801 	.free                   = vdpasim_free,
802 };
803 
804 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
805 	.set_vq_address         = vdpasim_set_vq_address,
806 	.set_vq_num             = vdpasim_set_vq_num,
807 	.kick_vq                = vdpasim_kick_vq,
808 	.set_vq_cb              = vdpasim_set_vq_cb,
809 	.set_vq_ready           = vdpasim_set_vq_ready,
810 	.get_vq_ready           = vdpasim_get_vq_ready,
811 	.set_vq_state           = vdpasim_set_vq_state,
812 	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
813 	.get_vq_state           = vdpasim_get_vq_state,
814 	.get_vq_align           = vdpasim_get_vq_align,
815 	.get_vq_group           = vdpasim_get_vq_group,
816 	.get_device_features    = vdpasim_get_device_features,
817 	.get_backend_features   = vdpasim_get_backend_features,
818 	.set_driver_features    = vdpasim_set_driver_features,
819 	.get_driver_features    = vdpasim_get_driver_features,
820 	.set_config_cb          = vdpasim_set_config_cb,
821 	.get_vq_num_max         = vdpasim_get_vq_num_max,
822 	.get_device_id          = vdpasim_get_device_id,
823 	.get_vendor_id          = vdpasim_get_vendor_id,
824 	.get_status             = vdpasim_get_status,
825 	.set_status             = vdpasim_set_status,
826 	.reset			= vdpasim_reset,
827 	.compat_reset		= vdpasim_compat_reset,
828 	.suspend		= vdpasim_suspend,
829 	.resume			= vdpasim_resume,
830 	.get_config_size        = vdpasim_get_config_size,
831 	.get_config             = vdpasim_get_config,
832 	.set_config             = vdpasim_set_config,
833 	.get_generation         = vdpasim_get_generation,
834 	.get_iova_range         = vdpasim_get_iova_range,
835 	.set_group_asid         = vdpasim_set_group_asid,
836 	.set_map                = vdpasim_set_map,
837 	.reset_map              = vdpasim_reset_map,
838 	.bind_mm		= vdpasim_bind_mm,
839 	.unbind_mm		= vdpasim_unbind_mm,
840 	.free                   = vdpasim_free,
841 };
842 
843 MODULE_VERSION(DRV_VERSION);
844 MODULE_LICENSE(DRV_LICENSE);
845 MODULE_AUTHOR(DRV_AUTHOR);
846 MODULE_DESCRIPTION(DRV_DESC);
847