xref: /linux/drivers/gpu/drm/i915/gvt/kvmgt.c (revision 160b8e75932fd51a49607d32dbfa1d417977b79c)
1 /*
2  * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3  *
4  * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Kevin Tian <kevin.tian@intel.com>
27  *    Jike Song <jike.song@intel.com>
28  *    Xiaoguang Chen <xiaoguang.chen@intel.com>
29  */
30 
31 #include <linux/init.h>
32 #include <linux/device.h>
33 #include <linux/mm.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
44 #include <linux/debugfs.h>
45 
46 #include "i915_drv.h"
47 #include "gvt.h"
48 
49 static const struct intel_gvt_ops *intel_gvt_ops;
50 
51 /* helper macros copied from vfio-pci */
52 #define VFIO_PCI_OFFSET_SHIFT   40
53 #define VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
55 #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
56 
57 #define OPREGION_SIGNATURE "IntelGraphicsMem"
58 
59 struct vfio_region;
60 struct intel_vgpu_regops {
61 	size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
62 			size_t count, loff_t *ppos, bool iswrite);
63 	void (*release)(struct intel_vgpu *vgpu,
64 			struct vfio_region *region);
65 };
66 
67 struct vfio_region {
68 	u32				type;
69 	u32				subtype;
70 	size_t				size;
71 	u32				flags;
72 	const struct intel_vgpu_regops	*ops;
73 	void				*data;
74 };
75 
76 struct kvmgt_pgfn {
77 	gfn_t gfn;
78 	struct hlist_node hnode;
79 };
80 
81 struct kvmgt_guest_info {
82 	struct kvm *kvm;
83 	struct intel_vgpu *vgpu;
84 	struct kvm_page_track_notifier_node track_node;
85 #define NR_BKT (1 << 18)
86 	struct hlist_head ptable[NR_BKT];
87 #undef NR_BKT
88 	struct dentry *debugfs_cache_entries;
89 };
90 
91 struct gvt_dma {
92 	struct intel_vgpu *vgpu;
93 	struct rb_node gfn_node;
94 	struct rb_node dma_addr_node;
95 	gfn_t gfn;
96 	dma_addr_t dma_addr;
97 	struct kref ref;
98 };
99 
100 static inline bool handle_valid(unsigned long handle)
101 {
102 	return !!(handle & ~0xff);
103 }
104 
105 static int kvmgt_guest_init(struct mdev_device *mdev);
106 static void intel_vgpu_release_work(struct work_struct *work);
107 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
108 
109 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
110 		dma_addr_t *dma_addr)
111 {
112 	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
113 	struct page *page;
114 	unsigned long pfn;
115 	int ret;
116 
117 	/* Pin the page first. */
118 	ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
119 			     IOMMU_READ | IOMMU_WRITE, &pfn);
120 	if (ret != 1) {
121 		gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
122 			     gfn, ret);
123 		return -EINVAL;
124 	}
125 
126 	/* Setup DMA mapping. */
127 	page = pfn_to_page(pfn);
128 	*dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
129 				 PCI_DMA_BIDIRECTIONAL);
130 	if (dma_mapping_error(dev, *dma_addr)) {
131 		gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
132 		vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
133 		return -ENOMEM;
134 	}
135 
136 	return 0;
137 }
138 
139 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
140 		dma_addr_t dma_addr)
141 {
142 	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
143 	int ret;
144 
145 	dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
146 	ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
147 	WARN_ON(ret != 1);
148 }
149 
150 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
151 		dma_addr_t dma_addr)
152 {
153 	struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
154 	struct gvt_dma *itr;
155 
156 	while (node) {
157 		itr = rb_entry(node, struct gvt_dma, dma_addr_node);
158 
159 		if (dma_addr < itr->dma_addr)
160 			node = node->rb_left;
161 		else if (dma_addr > itr->dma_addr)
162 			node = node->rb_right;
163 		else
164 			return itr;
165 	}
166 	return NULL;
167 }
168 
169 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
170 {
171 	struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
172 	struct gvt_dma *itr;
173 
174 	while (node) {
175 		itr = rb_entry(node, struct gvt_dma, gfn_node);
176 
177 		if (gfn < itr->gfn)
178 			node = node->rb_left;
179 		else if (gfn > itr->gfn)
180 			node = node->rb_right;
181 		else
182 			return itr;
183 	}
184 	return NULL;
185 }
186 
187 static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
188 		dma_addr_t dma_addr)
189 {
190 	struct gvt_dma *new, *itr;
191 	struct rb_node **link, *parent = NULL;
192 
193 	new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
194 	if (!new)
195 		return;
196 
197 	new->vgpu = vgpu;
198 	new->gfn = gfn;
199 	new->dma_addr = dma_addr;
200 	kref_init(&new->ref);
201 
202 	/* gfn_cache maps gfn to struct gvt_dma. */
203 	link = &vgpu->vdev.gfn_cache.rb_node;
204 	while (*link) {
205 		parent = *link;
206 		itr = rb_entry(parent, struct gvt_dma, gfn_node);
207 
208 		if (gfn < itr->gfn)
209 			link = &parent->rb_left;
210 		else
211 			link = &parent->rb_right;
212 	}
213 	rb_link_node(&new->gfn_node, parent, link);
214 	rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
215 
216 	/* dma_addr_cache maps dma addr to struct gvt_dma. */
217 	parent = NULL;
218 	link = &vgpu->vdev.dma_addr_cache.rb_node;
219 	while (*link) {
220 		parent = *link;
221 		itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
222 
223 		if (dma_addr < itr->dma_addr)
224 			link = &parent->rb_left;
225 		else
226 			link = &parent->rb_right;
227 	}
228 	rb_link_node(&new->dma_addr_node, parent, link);
229 	rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
230 
231 	vgpu->vdev.nr_cache_entries++;
232 }
233 
234 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
235 				struct gvt_dma *entry)
236 {
237 	rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
238 	rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
239 	kfree(entry);
240 	vgpu->vdev.nr_cache_entries--;
241 }
242 
243 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
244 {
245 	struct gvt_dma *dma;
246 	struct rb_node *node = NULL;
247 
248 	for (;;) {
249 		mutex_lock(&vgpu->vdev.cache_lock);
250 		node = rb_first(&vgpu->vdev.gfn_cache);
251 		if (!node) {
252 			mutex_unlock(&vgpu->vdev.cache_lock);
253 			break;
254 		}
255 		dma = rb_entry(node, struct gvt_dma, gfn_node);
256 		gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
257 		__gvt_cache_remove_entry(vgpu, dma);
258 		mutex_unlock(&vgpu->vdev.cache_lock);
259 	}
260 }
261 
262 static void gvt_cache_init(struct intel_vgpu *vgpu)
263 {
264 	vgpu->vdev.gfn_cache = RB_ROOT;
265 	vgpu->vdev.dma_addr_cache = RB_ROOT;
266 	vgpu->vdev.nr_cache_entries = 0;
267 	mutex_init(&vgpu->vdev.cache_lock);
268 }
269 
270 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
271 {
272 	hash_init(info->ptable);
273 }
274 
275 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
276 {
277 	struct kvmgt_pgfn *p;
278 	struct hlist_node *tmp;
279 	int i;
280 
281 	hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
282 		hash_del(&p->hnode);
283 		kfree(p);
284 	}
285 }
286 
287 static struct kvmgt_pgfn *
288 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
289 {
290 	struct kvmgt_pgfn *p, *res = NULL;
291 
292 	hash_for_each_possible(info->ptable, p, hnode, gfn) {
293 		if (gfn == p->gfn) {
294 			res = p;
295 			break;
296 		}
297 	}
298 
299 	return res;
300 }
301 
302 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
303 				gfn_t gfn)
304 {
305 	struct kvmgt_pgfn *p;
306 
307 	p = __kvmgt_protect_table_find(info, gfn);
308 	return !!p;
309 }
310 
311 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
312 {
313 	struct kvmgt_pgfn *p;
314 
315 	if (kvmgt_gfn_is_write_protected(info, gfn))
316 		return;
317 
318 	p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
319 	if (WARN(!p, "gfn: 0x%llx\n", gfn))
320 		return;
321 
322 	p->gfn = gfn;
323 	hash_add(info->ptable, &p->hnode, gfn);
324 }
325 
326 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
327 				gfn_t gfn)
328 {
329 	struct kvmgt_pgfn *p;
330 
331 	p = __kvmgt_protect_table_find(info, gfn);
332 	if (p) {
333 		hash_del(&p->hnode);
334 		kfree(p);
335 	}
336 }
337 
338 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
339 		size_t count, loff_t *ppos, bool iswrite)
340 {
341 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
342 			VFIO_PCI_NUM_REGIONS;
343 	void *base = vgpu->vdev.region[i].data;
344 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
345 
346 	if (pos >= vgpu->vdev.region[i].size || iswrite) {
347 		gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
348 		return -EINVAL;
349 	}
350 	count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
351 	memcpy(buf, base + pos, count);
352 
353 	return count;
354 }
355 
356 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
357 		struct vfio_region *region)
358 {
359 }
360 
361 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
362 	.rw = intel_vgpu_reg_rw_opregion,
363 	.release = intel_vgpu_reg_release_opregion,
364 };
365 
366 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
367 		unsigned int type, unsigned int subtype,
368 		const struct intel_vgpu_regops *ops,
369 		size_t size, u32 flags, void *data)
370 {
371 	struct vfio_region *region;
372 
373 	region = krealloc(vgpu->vdev.region,
374 			(vgpu->vdev.num_regions + 1) * sizeof(*region),
375 			GFP_KERNEL);
376 	if (!region)
377 		return -ENOMEM;
378 
379 	vgpu->vdev.region = region;
380 	vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
381 	vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
382 	vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
383 	vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
384 	vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
385 	vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
386 	vgpu->vdev.num_regions++;
387 	return 0;
388 }
389 
390 static int kvmgt_get_vfio_device(void *p_vgpu)
391 {
392 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
393 
394 	vgpu->vdev.vfio_device = vfio_device_get_from_dev(
395 		mdev_dev(vgpu->vdev.mdev));
396 	if (!vgpu->vdev.vfio_device) {
397 		gvt_vgpu_err("failed to get vfio device\n");
398 		return -ENODEV;
399 	}
400 	return 0;
401 }
402 
403 
404 static int kvmgt_set_opregion(void *p_vgpu)
405 {
406 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
407 	void *base;
408 	int ret;
409 
410 	/* Each vgpu has its own opregion, although VFIO would create another
411 	 * one later. This one is used to expose opregion to VFIO. And the
412 	 * other one created by VFIO later, is used by guest actually.
413 	 */
414 	base = vgpu_opregion(vgpu)->va;
415 	if (!base)
416 		return -ENOMEM;
417 
418 	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
419 		memunmap(base);
420 		return -EINVAL;
421 	}
422 
423 	ret = intel_vgpu_register_reg(vgpu,
424 			PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
425 			VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
426 			&intel_vgpu_regops_opregion, OPREGION_SIZE,
427 			VFIO_REGION_INFO_FLAG_READ, base);
428 
429 	return ret;
430 }
431 
432 static void kvmgt_put_vfio_device(void *vgpu)
433 {
434 	if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
435 		return;
436 
437 	vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
438 }
439 
440 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
441 {
442 	struct intel_vgpu *vgpu = NULL;
443 	struct intel_vgpu_type *type;
444 	struct device *pdev;
445 	void *gvt;
446 	int ret;
447 
448 	pdev = mdev_parent_dev(mdev);
449 	gvt = kdev_to_i915(pdev)->gvt;
450 
451 	type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
452 	if (!type) {
453 		gvt_vgpu_err("failed to find type %s to create\n",
454 						kobject_name(kobj));
455 		ret = -EINVAL;
456 		goto out;
457 	}
458 
459 	vgpu = intel_gvt_ops->vgpu_create(gvt, type);
460 	if (IS_ERR_OR_NULL(vgpu)) {
461 		ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
462 		gvt_err("failed to create intel vgpu: %d\n", ret);
463 		goto out;
464 	}
465 
466 	INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
467 
468 	vgpu->vdev.mdev = mdev;
469 	mdev_set_drvdata(mdev, vgpu);
470 
471 	gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
472 		     dev_name(mdev_dev(mdev)));
473 	ret = 0;
474 
475 out:
476 	return ret;
477 }
478 
479 static int intel_vgpu_remove(struct mdev_device *mdev)
480 {
481 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
482 
483 	if (handle_valid(vgpu->handle))
484 		return -EBUSY;
485 
486 	intel_gvt_ops->vgpu_destroy(vgpu);
487 	return 0;
488 }
489 
490 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
491 				     unsigned long action, void *data)
492 {
493 	struct intel_vgpu *vgpu = container_of(nb,
494 					struct intel_vgpu,
495 					vdev.iommu_notifier);
496 
497 	if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
498 		struct vfio_iommu_type1_dma_unmap *unmap = data;
499 		struct gvt_dma *entry;
500 		unsigned long iov_pfn, end_iov_pfn;
501 
502 		iov_pfn = unmap->iova >> PAGE_SHIFT;
503 		end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
504 
505 		mutex_lock(&vgpu->vdev.cache_lock);
506 		for (; iov_pfn < end_iov_pfn; iov_pfn++) {
507 			entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
508 			if (!entry)
509 				continue;
510 
511 			gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
512 			__gvt_cache_remove_entry(vgpu, entry);
513 		}
514 		mutex_unlock(&vgpu->vdev.cache_lock);
515 	}
516 
517 	return NOTIFY_OK;
518 }
519 
520 static int intel_vgpu_group_notifier(struct notifier_block *nb,
521 				     unsigned long action, void *data)
522 {
523 	struct intel_vgpu *vgpu = container_of(nb,
524 					struct intel_vgpu,
525 					vdev.group_notifier);
526 
527 	/* the only action we care about */
528 	if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
529 		vgpu->vdev.kvm = data;
530 
531 		if (!data)
532 			schedule_work(&vgpu->vdev.release_work);
533 	}
534 
535 	return NOTIFY_OK;
536 }
537 
538 static int intel_vgpu_open(struct mdev_device *mdev)
539 {
540 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
541 	unsigned long events;
542 	int ret;
543 
544 	vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
545 	vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
546 
547 	events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
548 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
549 				&vgpu->vdev.iommu_notifier);
550 	if (ret != 0) {
551 		gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
552 			ret);
553 		goto out;
554 	}
555 
556 	events = VFIO_GROUP_NOTIFY_SET_KVM;
557 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
558 				&vgpu->vdev.group_notifier);
559 	if (ret != 0) {
560 		gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
561 			ret);
562 		goto undo_iommu;
563 	}
564 
565 	ret = kvmgt_guest_init(mdev);
566 	if (ret)
567 		goto undo_group;
568 
569 	intel_gvt_ops->vgpu_activate(vgpu);
570 
571 	atomic_set(&vgpu->vdev.released, 0);
572 	return ret;
573 
574 undo_group:
575 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
576 					&vgpu->vdev.group_notifier);
577 
578 undo_iommu:
579 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
580 					&vgpu->vdev.iommu_notifier);
581 out:
582 	return ret;
583 }
584 
585 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
586 {
587 	struct kvmgt_guest_info *info;
588 	int ret;
589 
590 	if (!handle_valid(vgpu->handle))
591 		return;
592 
593 	if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
594 		return;
595 
596 	intel_gvt_ops->vgpu_deactivate(vgpu);
597 
598 	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
599 					&vgpu->vdev.iommu_notifier);
600 	WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
601 
602 	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
603 					&vgpu->vdev.group_notifier);
604 	WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
605 
606 	info = (struct kvmgt_guest_info *)vgpu->handle;
607 	kvmgt_guest_exit(info);
608 
609 	vgpu->vdev.kvm = NULL;
610 	vgpu->handle = 0;
611 }
612 
613 static void intel_vgpu_release(struct mdev_device *mdev)
614 {
615 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
616 
617 	__intel_vgpu_release(vgpu);
618 }
619 
620 static void intel_vgpu_release_work(struct work_struct *work)
621 {
622 	struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
623 					vdev.release_work);
624 
625 	__intel_vgpu_release(vgpu);
626 }
627 
628 static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
629 {
630 	u32 start_lo, start_hi;
631 	u32 mem_type;
632 
633 	start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
634 			PCI_BASE_ADDRESS_MEM_MASK;
635 	mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
636 			PCI_BASE_ADDRESS_MEM_TYPE_MASK;
637 
638 	switch (mem_type) {
639 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
640 		start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
641 						+ bar + 4));
642 		break;
643 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
644 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
645 		/* 1M mem BAR treated as 32-bit BAR */
646 	default:
647 		/* mem unknown type treated as 32-bit BAR */
648 		start_hi = 0;
649 		break;
650 	}
651 
652 	return ((u64)start_hi << 32) | start_lo;
653 }
654 
655 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
656 			     void *buf, unsigned int count, bool is_write)
657 {
658 	uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
659 	int ret;
660 
661 	if (is_write)
662 		ret = intel_gvt_ops->emulate_mmio_write(vgpu,
663 					bar_start + off, buf, count);
664 	else
665 		ret = intel_gvt_ops->emulate_mmio_read(vgpu,
666 					bar_start + off, buf, count);
667 	return ret;
668 }
669 
670 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
671 {
672 	return off >= vgpu_aperture_offset(vgpu) &&
673 	       off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
674 }
675 
676 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
677 		void *buf, unsigned long count, bool is_write)
678 {
679 	void *aperture_va;
680 
681 	if (!intel_vgpu_in_aperture(vgpu, off) ||
682 	    !intel_vgpu_in_aperture(vgpu, off + count)) {
683 		gvt_vgpu_err("Invalid aperture offset %llu\n", off);
684 		return -EINVAL;
685 	}
686 
687 	aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
688 					ALIGN_DOWN(off, PAGE_SIZE),
689 					count + offset_in_page(off));
690 	if (!aperture_va)
691 		return -EIO;
692 
693 	if (is_write)
694 		memcpy(aperture_va + offset_in_page(off), buf, count);
695 	else
696 		memcpy(buf, aperture_va + offset_in_page(off), count);
697 
698 	io_mapping_unmap(aperture_va);
699 
700 	return 0;
701 }
702 
703 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
704 			size_t count, loff_t *ppos, bool is_write)
705 {
706 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
707 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
708 	uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
709 	int ret = -EINVAL;
710 
711 
712 	if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
713 		gvt_vgpu_err("invalid index: %u\n", index);
714 		return -EINVAL;
715 	}
716 
717 	switch (index) {
718 	case VFIO_PCI_CONFIG_REGION_INDEX:
719 		if (is_write)
720 			ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
721 						buf, count);
722 		else
723 			ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
724 						buf, count);
725 		break;
726 	case VFIO_PCI_BAR0_REGION_INDEX:
727 		ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
728 					buf, count, is_write);
729 		break;
730 	case VFIO_PCI_BAR2_REGION_INDEX:
731 		ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
732 		break;
733 	case VFIO_PCI_BAR1_REGION_INDEX:
734 	case VFIO_PCI_BAR3_REGION_INDEX:
735 	case VFIO_PCI_BAR4_REGION_INDEX:
736 	case VFIO_PCI_BAR5_REGION_INDEX:
737 	case VFIO_PCI_VGA_REGION_INDEX:
738 	case VFIO_PCI_ROM_REGION_INDEX:
739 		break;
740 	default:
741 		if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
742 			return -EINVAL;
743 
744 		index -= VFIO_PCI_NUM_REGIONS;
745 		return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
746 				ppos, is_write);
747 	}
748 
749 	return ret == 0 ? count : ret;
750 }
751 
752 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
753 			size_t count, loff_t *ppos)
754 {
755 	unsigned int done = 0;
756 	int ret;
757 
758 	while (count) {
759 		size_t filled;
760 
761 		if (count >= 4 && !(*ppos % 4)) {
762 			u32 val;
763 
764 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
765 					ppos, false);
766 			if (ret <= 0)
767 				goto read_err;
768 
769 			if (copy_to_user(buf, &val, sizeof(val)))
770 				goto read_err;
771 
772 			filled = 4;
773 		} else if (count >= 2 && !(*ppos % 2)) {
774 			u16 val;
775 
776 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
777 					ppos, false);
778 			if (ret <= 0)
779 				goto read_err;
780 
781 			if (copy_to_user(buf, &val, sizeof(val)))
782 				goto read_err;
783 
784 			filled = 2;
785 		} else {
786 			u8 val;
787 
788 			ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
789 					false);
790 			if (ret <= 0)
791 				goto read_err;
792 
793 			if (copy_to_user(buf, &val, sizeof(val)))
794 				goto read_err;
795 
796 			filled = 1;
797 		}
798 
799 		count -= filled;
800 		done += filled;
801 		*ppos += filled;
802 		buf += filled;
803 	}
804 
805 	return done;
806 
807 read_err:
808 	return -EFAULT;
809 }
810 
811 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
812 				const char __user *buf,
813 				size_t count, loff_t *ppos)
814 {
815 	unsigned int done = 0;
816 	int ret;
817 
818 	while (count) {
819 		size_t filled;
820 
821 		if (count >= 4 && !(*ppos % 4)) {
822 			u32 val;
823 
824 			if (copy_from_user(&val, buf, sizeof(val)))
825 				goto write_err;
826 
827 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
828 					ppos, true);
829 			if (ret <= 0)
830 				goto write_err;
831 
832 			filled = 4;
833 		} else if (count >= 2 && !(*ppos % 2)) {
834 			u16 val;
835 
836 			if (copy_from_user(&val, buf, sizeof(val)))
837 				goto write_err;
838 
839 			ret = intel_vgpu_rw(mdev, (char *)&val,
840 					sizeof(val), ppos, true);
841 			if (ret <= 0)
842 				goto write_err;
843 
844 			filled = 2;
845 		} else {
846 			u8 val;
847 
848 			if (copy_from_user(&val, buf, sizeof(val)))
849 				goto write_err;
850 
851 			ret = intel_vgpu_rw(mdev, &val, sizeof(val),
852 					ppos, true);
853 			if (ret <= 0)
854 				goto write_err;
855 
856 			filled = 1;
857 		}
858 
859 		count -= filled;
860 		done += filled;
861 		*ppos += filled;
862 		buf += filled;
863 	}
864 
865 	return done;
866 write_err:
867 	return -EFAULT;
868 }
869 
870 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
871 {
872 	unsigned int index;
873 	u64 virtaddr;
874 	unsigned long req_size, pgoff = 0;
875 	pgprot_t pg_prot;
876 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
877 
878 	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
879 	if (index >= VFIO_PCI_ROM_REGION_INDEX)
880 		return -EINVAL;
881 
882 	if (vma->vm_end < vma->vm_start)
883 		return -EINVAL;
884 	if ((vma->vm_flags & VM_SHARED) == 0)
885 		return -EINVAL;
886 	if (index != VFIO_PCI_BAR2_REGION_INDEX)
887 		return -EINVAL;
888 
889 	pg_prot = vma->vm_page_prot;
890 	virtaddr = vma->vm_start;
891 	req_size = vma->vm_end - vma->vm_start;
892 	pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
893 
894 	return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
895 }
896 
897 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
898 {
899 	if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
900 		return 1;
901 
902 	return 0;
903 }
904 
905 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
906 			unsigned int index, unsigned int start,
907 			unsigned int count, uint32_t flags,
908 			void *data)
909 {
910 	return 0;
911 }
912 
913 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
914 			unsigned int index, unsigned int start,
915 			unsigned int count, uint32_t flags, void *data)
916 {
917 	return 0;
918 }
919 
920 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
921 		unsigned int index, unsigned int start, unsigned int count,
922 		uint32_t flags, void *data)
923 {
924 	return 0;
925 }
926 
927 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
928 		unsigned int index, unsigned int start, unsigned int count,
929 		uint32_t flags, void *data)
930 {
931 	struct eventfd_ctx *trigger;
932 
933 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
934 		int fd = *(int *)data;
935 
936 		trigger = eventfd_ctx_fdget(fd);
937 		if (IS_ERR(trigger)) {
938 			gvt_vgpu_err("eventfd_ctx_fdget failed\n");
939 			return PTR_ERR(trigger);
940 		}
941 		vgpu->vdev.msi_trigger = trigger;
942 	}
943 
944 	return 0;
945 }
946 
947 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
948 		unsigned int index, unsigned int start, unsigned int count,
949 		void *data)
950 {
951 	int (*func)(struct intel_vgpu *vgpu, unsigned int index,
952 			unsigned int start, unsigned int count, uint32_t flags,
953 			void *data) = NULL;
954 
955 	switch (index) {
956 	case VFIO_PCI_INTX_IRQ_INDEX:
957 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
958 		case VFIO_IRQ_SET_ACTION_MASK:
959 			func = intel_vgpu_set_intx_mask;
960 			break;
961 		case VFIO_IRQ_SET_ACTION_UNMASK:
962 			func = intel_vgpu_set_intx_unmask;
963 			break;
964 		case VFIO_IRQ_SET_ACTION_TRIGGER:
965 			func = intel_vgpu_set_intx_trigger;
966 			break;
967 		}
968 		break;
969 	case VFIO_PCI_MSI_IRQ_INDEX:
970 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
971 		case VFIO_IRQ_SET_ACTION_MASK:
972 		case VFIO_IRQ_SET_ACTION_UNMASK:
973 			/* XXX Need masking support exported */
974 			break;
975 		case VFIO_IRQ_SET_ACTION_TRIGGER:
976 			func = intel_vgpu_set_msi_trigger;
977 			break;
978 		}
979 		break;
980 	}
981 
982 	if (!func)
983 		return -ENOTTY;
984 
985 	return func(vgpu, index, start, count, flags, data);
986 }
987 
988 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
989 			     unsigned long arg)
990 {
991 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
992 	unsigned long minsz;
993 
994 	gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
995 
996 	if (cmd == VFIO_DEVICE_GET_INFO) {
997 		struct vfio_device_info info;
998 
999 		minsz = offsetofend(struct vfio_device_info, num_irqs);
1000 
1001 		if (copy_from_user(&info, (void __user *)arg, minsz))
1002 			return -EFAULT;
1003 
1004 		if (info.argsz < minsz)
1005 			return -EINVAL;
1006 
1007 		info.flags = VFIO_DEVICE_FLAGS_PCI;
1008 		info.flags |= VFIO_DEVICE_FLAGS_RESET;
1009 		info.num_regions = VFIO_PCI_NUM_REGIONS +
1010 				vgpu->vdev.num_regions;
1011 		info.num_irqs = VFIO_PCI_NUM_IRQS;
1012 
1013 		return copy_to_user((void __user *)arg, &info, minsz) ?
1014 			-EFAULT : 0;
1015 
1016 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1017 		struct vfio_region_info info;
1018 		struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1019 		int i, ret;
1020 		struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1021 		size_t size;
1022 		int nr_areas = 1;
1023 		int cap_type_id;
1024 
1025 		minsz = offsetofend(struct vfio_region_info, offset);
1026 
1027 		if (copy_from_user(&info, (void __user *)arg, minsz))
1028 			return -EFAULT;
1029 
1030 		if (info.argsz < minsz)
1031 			return -EINVAL;
1032 
1033 		switch (info.index) {
1034 		case VFIO_PCI_CONFIG_REGION_INDEX:
1035 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1036 			info.size = vgpu->gvt->device_info.cfg_space_size;
1037 			info.flags = VFIO_REGION_INFO_FLAG_READ |
1038 				     VFIO_REGION_INFO_FLAG_WRITE;
1039 			break;
1040 		case VFIO_PCI_BAR0_REGION_INDEX:
1041 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1042 			info.size = vgpu->cfg_space.bar[info.index].size;
1043 			if (!info.size) {
1044 				info.flags = 0;
1045 				break;
1046 			}
1047 
1048 			info.flags = VFIO_REGION_INFO_FLAG_READ |
1049 				     VFIO_REGION_INFO_FLAG_WRITE;
1050 			break;
1051 		case VFIO_PCI_BAR1_REGION_INDEX:
1052 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1053 			info.size = 0;
1054 			info.flags = 0;
1055 			break;
1056 		case VFIO_PCI_BAR2_REGION_INDEX:
1057 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1058 			info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1059 					VFIO_REGION_INFO_FLAG_MMAP |
1060 					VFIO_REGION_INFO_FLAG_READ |
1061 					VFIO_REGION_INFO_FLAG_WRITE;
1062 			info.size = gvt_aperture_sz(vgpu->gvt);
1063 
1064 			size = sizeof(*sparse) +
1065 					(nr_areas * sizeof(*sparse->areas));
1066 			sparse = kzalloc(size, GFP_KERNEL);
1067 			if (!sparse)
1068 				return -ENOMEM;
1069 
1070 			sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1071 			sparse->header.version = 1;
1072 			sparse->nr_areas = nr_areas;
1073 			cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1074 			sparse->areas[0].offset =
1075 					PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1076 			sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1077 			break;
1078 
1079 		case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1080 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1081 			info.size = 0;
1082 			info.flags = 0;
1083 
1084 			gvt_dbg_core("get region info bar:%d\n", info.index);
1085 			break;
1086 
1087 		case VFIO_PCI_ROM_REGION_INDEX:
1088 		case VFIO_PCI_VGA_REGION_INDEX:
1089 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1090 			info.size = 0;
1091 			info.flags = 0;
1092 
1093 			gvt_dbg_core("get region info index:%d\n", info.index);
1094 			break;
1095 		default:
1096 			{
1097 				struct vfio_region_info_cap_type cap_type = {
1098 					.header.id = VFIO_REGION_INFO_CAP_TYPE,
1099 					.header.version = 1 };
1100 
1101 				if (info.index >= VFIO_PCI_NUM_REGIONS +
1102 						vgpu->vdev.num_regions)
1103 					return -EINVAL;
1104 
1105 				i = info.index - VFIO_PCI_NUM_REGIONS;
1106 
1107 				info.offset =
1108 					VFIO_PCI_INDEX_TO_OFFSET(info.index);
1109 				info.size = vgpu->vdev.region[i].size;
1110 				info.flags = vgpu->vdev.region[i].flags;
1111 
1112 				cap_type.type = vgpu->vdev.region[i].type;
1113 				cap_type.subtype = vgpu->vdev.region[i].subtype;
1114 
1115 				ret = vfio_info_add_capability(&caps,
1116 							&cap_type.header,
1117 							sizeof(cap_type));
1118 				if (ret)
1119 					return ret;
1120 			}
1121 		}
1122 
1123 		if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1124 			switch (cap_type_id) {
1125 			case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1126 				ret = vfio_info_add_capability(&caps,
1127 					&sparse->header, sizeof(*sparse) +
1128 					(sparse->nr_areas *
1129 						sizeof(*sparse->areas)));
1130 				kfree(sparse);
1131 				if (ret)
1132 					return ret;
1133 				break;
1134 			default:
1135 				return -EINVAL;
1136 			}
1137 		}
1138 
1139 		if (caps.size) {
1140 			info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1141 			if (info.argsz < sizeof(info) + caps.size) {
1142 				info.argsz = sizeof(info) + caps.size;
1143 				info.cap_offset = 0;
1144 			} else {
1145 				vfio_info_cap_shift(&caps, sizeof(info));
1146 				if (copy_to_user((void __user *)arg +
1147 						  sizeof(info), caps.buf,
1148 						  caps.size)) {
1149 					kfree(caps.buf);
1150 					return -EFAULT;
1151 				}
1152 				info.cap_offset = sizeof(info);
1153 			}
1154 
1155 			kfree(caps.buf);
1156 		}
1157 
1158 		return copy_to_user((void __user *)arg, &info, minsz) ?
1159 			-EFAULT : 0;
1160 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1161 		struct vfio_irq_info info;
1162 
1163 		minsz = offsetofend(struct vfio_irq_info, count);
1164 
1165 		if (copy_from_user(&info, (void __user *)arg, minsz))
1166 			return -EFAULT;
1167 
1168 		if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1169 			return -EINVAL;
1170 
1171 		switch (info.index) {
1172 		case VFIO_PCI_INTX_IRQ_INDEX:
1173 		case VFIO_PCI_MSI_IRQ_INDEX:
1174 			break;
1175 		default:
1176 			return -EINVAL;
1177 		}
1178 
1179 		info.flags = VFIO_IRQ_INFO_EVENTFD;
1180 
1181 		info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1182 
1183 		if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1184 			info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1185 				       VFIO_IRQ_INFO_AUTOMASKED);
1186 		else
1187 			info.flags |= VFIO_IRQ_INFO_NORESIZE;
1188 
1189 		return copy_to_user((void __user *)arg, &info, minsz) ?
1190 			-EFAULT : 0;
1191 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
1192 		struct vfio_irq_set hdr;
1193 		u8 *data = NULL;
1194 		int ret = 0;
1195 		size_t data_size = 0;
1196 
1197 		minsz = offsetofend(struct vfio_irq_set, count);
1198 
1199 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
1200 			return -EFAULT;
1201 
1202 		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1203 			int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1204 
1205 			ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1206 						VFIO_PCI_NUM_IRQS, &data_size);
1207 			if (ret) {
1208 				gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1209 				return -EINVAL;
1210 			}
1211 			if (data_size) {
1212 				data = memdup_user((void __user *)(arg + minsz),
1213 						   data_size);
1214 				if (IS_ERR(data))
1215 					return PTR_ERR(data);
1216 			}
1217 		}
1218 
1219 		ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1220 					hdr.start, hdr.count, data);
1221 		kfree(data);
1222 
1223 		return ret;
1224 	} else if (cmd == VFIO_DEVICE_RESET) {
1225 		intel_gvt_ops->vgpu_reset(vgpu);
1226 		return 0;
1227 	} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1228 		struct vfio_device_gfx_plane_info dmabuf;
1229 		int ret = 0;
1230 
1231 		minsz = offsetofend(struct vfio_device_gfx_plane_info,
1232 				    dmabuf_id);
1233 		if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1234 			return -EFAULT;
1235 		if (dmabuf.argsz < minsz)
1236 			return -EINVAL;
1237 
1238 		ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1239 		if (ret != 0)
1240 			return ret;
1241 
1242 		return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1243 								-EFAULT : 0;
1244 	} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1245 		__u32 dmabuf_id;
1246 		__s32 dmabuf_fd;
1247 
1248 		if (get_user(dmabuf_id, (__u32 __user *)arg))
1249 			return -EFAULT;
1250 
1251 		dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1252 		return dmabuf_fd;
1253 
1254 	}
1255 
1256 	return 0;
1257 }
1258 
1259 static ssize_t
1260 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1261 	     char *buf)
1262 {
1263 	struct mdev_device *mdev = mdev_from_dev(dev);
1264 
1265 	if (mdev) {
1266 		struct intel_vgpu *vgpu = (struct intel_vgpu *)
1267 			mdev_get_drvdata(mdev);
1268 		return sprintf(buf, "%d\n", vgpu->id);
1269 	}
1270 	return sprintf(buf, "\n");
1271 }
1272 
1273 static ssize_t
1274 hw_id_show(struct device *dev, struct device_attribute *attr,
1275 	   char *buf)
1276 {
1277 	struct mdev_device *mdev = mdev_from_dev(dev);
1278 
1279 	if (mdev) {
1280 		struct intel_vgpu *vgpu = (struct intel_vgpu *)
1281 			mdev_get_drvdata(mdev);
1282 		return sprintf(buf, "%u\n",
1283 			       vgpu->submission.shadow_ctx->hw_id);
1284 	}
1285 	return sprintf(buf, "\n");
1286 }
1287 
1288 static DEVICE_ATTR_RO(vgpu_id);
1289 static DEVICE_ATTR_RO(hw_id);
1290 
1291 static struct attribute *intel_vgpu_attrs[] = {
1292 	&dev_attr_vgpu_id.attr,
1293 	&dev_attr_hw_id.attr,
1294 	NULL
1295 };
1296 
1297 static const struct attribute_group intel_vgpu_group = {
1298 	.name = "intel_vgpu",
1299 	.attrs = intel_vgpu_attrs,
1300 };
1301 
1302 static const struct attribute_group *intel_vgpu_groups[] = {
1303 	&intel_vgpu_group,
1304 	NULL,
1305 };
1306 
1307 static struct mdev_parent_ops intel_vgpu_ops = {
1308 	.mdev_attr_groups       = intel_vgpu_groups,
1309 	.create			= intel_vgpu_create,
1310 	.remove			= intel_vgpu_remove,
1311 
1312 	.open			= intel_vgpu_open,
1313 	.release		= intel_vgpu_release,
1314 
1315 	.read			= intel_vgpu_read,
1316 	.write			= intel_vgpu_write,
1317 	.mmap			= intel_vgpu_mmap,
1318 	.ioctl			= intel_vgpu_ioctl,
1319 };
1320 
1321 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1322 {
1323 	struct attribute **kvm_type_attrs;
1324 	struct attribute_group **kvm_vgpu_type_groups;
1325 
1326 	intel_gvt_ops = ops;
1327 	if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
1328 			&kvm_vgpu_type_groups))
1329 		return -EFAULT;
1330 	intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1331 
1332 	return mdev_register_device(dev, &intel_vgpu_ops);
1333 }
1334 
1335 static void kvmgt_host_exit(struct device *dev, void *gvt)
1336 {
1337 	mdev_unregister_device(dev);
1338 }
1339 
1340 static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1341 {
1342 	struct kvmgt_guest_info *info;
1343 	struct kvm *kvm;
1344 	struct kvm_memory_slot *slot;
1345 	int idx;
1346 
1347 	if (!handle_valid(handle))
1348 		return -ESRCH;
1349 
1350 	info = (struct kvmgt_guest_info *)handle;
1351 	kvm = info->kvm;
1352 
1353 	idx = srcu_read_lock(&kvm->srcu);
1354 	slot = gfn_to_memslot(kvm, gfn);
1355 	if (!slot) {
1356 		srcu_read_unlock(&kvm->srcu, idx);
1357 		return -EINVAL;
1358 	}
1359 
1360 	spin_lock(&kvm->mmu_lock);
1361 
1362 	if (kvmgt_gfn_is_write_protected(info, gfn))
1363 		goto out;
1364 
1365 	kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1366 	kvmgt_protect_table_add(info, gfn);
1367 
1368 out:
1369 	spin_unlock(&kvm->mmu_lock);
1370 	srcu_read_unlock(&kvm->srcu, idx);
1371 	return 0;
1372 }
1373 
1374 static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1375 {
1376 	struct kvmgt_guest_info *info;
1377 	struct kvm *kvm;
1378 	struct kvm_memory_slot *slot;
1379 	int idx;
1380 
1381 	if (!handle_valid(handle))
1382 		return 0;
1383 
1384 	info = (struct kvmgt_guest_info *)handle;
1385 	kvm = info->kvm;
1386 
1387 	idx = srcu_read_lock(&kvm->srcu);
1388 	slot = gfn_to_memslot(kvm, gfn);
1389 	if (!slot) {
1390 		srcu_read_unlock(&kvm->srcu, idx);
1391 		return -EINVAL;
1392 	}
1393 
1394 	spin_lock(&kvm->mmu_lock);
1395 
1396 	if (!kvmgt_gfn_is_write_protected(info, gfn))
1397 		goto out;
1398 
1399 	kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1400 	kvmgt_protect_table_del(info, gfn);
1401 
1402 out:
1403 	spin_unlock(&kvm->mmu_lock);
1404 	srcu_read_unlock(&kvm->srcu, idx);
1405 	return 0;
1406 }
1407 
1408 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1409 		const u8 *val, int len,
1410 		struct kvm_page_track_notifier_node *node)
1411 {
1412 	struct kvmgt_guest_info *info = container_of(node,
1413 					struct kvmgt_guest_info, track_node);
1414 
1415 	if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1416 		intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1417 						     (void *)val, len);
1418 }
1419 
1420 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1421 		struct kvm_memory_slot *slot,
1422 		struct kvm_page_track_notifier_node *node)
1423 {
1424 	int i;
1425 	gfn_t gfn;
1426 	struct kvmgt_guest_info *info = container_of(node,
1427 					struct kvmgt_guest_info, track_node);
1428 
1429 	spin_lock(&kvm->mmu_lock);
1430 	for (i = 0; i < slot->npages; i++) {
1431 		gfn = slot->base_gfn + i;
1432 		if (kvmgt_gfn_is_write_protected(info, gfn)) {
1433 			kvm_slot_page_track_remove_page(kvm, slot, gfn,
1434 						KVM_PAGE_TRACK_WRITE);
1435 			kvmgt_protect_table_del(info, gfn);
1436 		}
1437 	}
1438 	spin_unlock(&kvm->mmu_lock);
1439 }
1440 
1441 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1442 {
1443 	struct intel_vgpu *itr;
1444 	struct kvmgt_guest_info *info;
1445 	int id;
1446 	bool ret = false;
1447 
1448 	mutex_lock(&vgpu->gvt->lock);
1449 	for_each_active_vgpu(vgpu->gvt, itr, id) {
1450 		if (!handle_valid(itr->handle))
1451 			continue;
1452 
1453 		info = (struct kvmgt_guest_info *)itr->handle;
1454 		if (kvm && kvm == info->kvm) {
1455 			ret = true;
1456 			goto out;
1457 		}
1458 	}
1459 out:
1460 	mutex_unlock(&vgpu->gvt->lock);
1461 	return ret;
1462 }
1463 
1464 static int kvmgt_guest_init(struct mdev_device *mdev)
1465 {
1466 	struct kvmgt_guest_info *info;
1467 	struct intel_vgpu *vgpu;
1468 	struct kvm *kvm;
1469 
1470 	vgpu = mdev_get_drvdata(mdev);
1471 	if (handle_valid(vgpu->handle))
1472 		return -EEXIST;
1473 
1474 	kvm = vgpu->vdev.kvm;
1475 	if (!kvm || kvm->mm != current->mm) {
1476 		gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1477 		return -ESRCH;
1478 	}
1479 
1480 	if (__kvmgt_vgpu_exist(vgpu, kvm))
1481 		return -EEXIST;
1482 
1483 	info = vzalloc(sizeof(struct kvmgt_guest_info));
1484 	if (!info)
1485 		return -ENOMEM;
1486 
1487 	vgpu->handle = (unsigned long)info;
1488 	info->vgpu = vgpu;
1489 	info->kvm = kvm;
1490 	kvm_get_kvm(info->kvm);
1491 
1492 	kvmgt_protect_table_init(info);
1493 	gvt_cache_init(vgpu);
1494 
1495 	mutex_init(&vgpu->dmabuf_lock);
1496 	init_completion(&vgpu->vblank_done);
1497 
1498 	info->track_node.track_write = kvmgt_page_track_write;
1499 	info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1500 	kvm_page_track_register_notifier(kvm, &info->track_node);
1501 
1502 	info->debugfs_cache_entries = debugfs_create_ulong(
1503 						"kvmgt_nr_cache_entries",
1504 						0444, vgpu->debugfs,
1505 						&vgpu->vdev.nr_cache_entries);
1506 	if (!info->debugfs_cache_entries)
1507 		gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
1508 
1509 	return 0;
1510 }
1511 
1512 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1513 {
1514 	debugfs_remove(info->debugfs_cache_entries);
1515 
1516 	kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1517 	kvm_put_kvm(info->kvm);
1518 	kvmgt_protect_table_destroy(info);
1519 	gvt_cache_destroy(info->vgpu);
1520 	vfree(info);
1521 
1522 	return true;
1523 }
1524 
1525 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1526 {
1527 	/* nothing to do here */
1528 	return 0;
1529 }
1530 
1531 static void kvmgt_detach_vgpu(unsigned long handle)
1532 {
1533 	/* nothing to do here */
1534 }
1535 
1536 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1537 {
1538 	struct kvmgt_guest_info *info;
1539 	struct intel_vgpu *vgpu;
1540 
1541 	if (!handle_valid(handle))
1542 		return -ESRCH;
1543 
1544 	info = (struct kvmgt_guest_info *)handle;
1545 	vgpu = info->vgpu;
1546 
1547 	if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1548 		return 0;
1549 
1550 	return -EFAULT;
1551 }
1552 
1553 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1554 {
1555 	struct kvmgt_guest_info *info;
1556 	kvm_pfn_t pfn;
1557 
1558 	if (!handle_valid(handle))
1559 		return INTEL_GVT_INVALID_ADDR;
1560 
1561 	info = (struct kvmgt_guest_info *)handle;
1562 
1563 	pfn = gfn_to_pfn(info->kvm, gfn);
1564 	if (is_error_noslot_pfn(pfn))
1565 		return INTEL_GVT_INVALID_ADDR;
1566 
1567 	return pfn;
1568 }
1569 
1570 int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1571 		dma_addr_t *dma_addr)
1572 {
1573 	struct kvmgt_guest_info *info;
1574 	struct intel_vgpu *vgpu;
1575 	struct gvt_dma *entry;
1576 	int ret;
1577 
1578 	if (!handle_valid(handle))
1579 		return -EINVAL;
1580 
1581 	info = (struct kvmgt_guest_info *)handle;
1582 	vgpu = info->vgpu;
1583 
1584 	mutex_lock(&info->vgpu->vdev.cache_lock);
1585 
1586 	entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1587 	if (!entry) {
1588 		ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
1589 		if (ret) {
1590 			mutex_unlock(&info->vgpu->vdev.cache_lock);
1591 			return ret;
1592 		}
1593 		__gvt_cache_add(info->vgpu, gfn, *dma_addr);
1594 	} else {
1595 		kref_get(&entry->ref);
1596 		*dma_addr = entry->dma_addr;
1597 	}
1598 
1599 	mutex_unlock(&info->vgpu->vdev.cache_lock);
1600 	return 0;
1601 }
1602 
1603 static void __gvt_dma_release(struct kref *ref)
1604 {
1605 	struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
1606 
1607 	gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
1608 	__gvt_cache_remove_entry(entry->vgpu, entry);
1609 }
1610 
1611 void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
1612 {
1613 	struct kvmgt_guest_info *info;
1614 	struct gvt_dma *entry;
1615 
1616 	if (!handle_valid(handle))
1617 		return;
1618 
1619 	info = (struct kvmgt_guest_info *)handle;
1620 
1621 	mutex_lock(&info->vgpu->vdev.cache_lock);
1622 	entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
1623 	if (entry)
1624 		kref_put(&entry->ref, __gvt_dma_release);
1625 	mutex_unlock(&info->vgpu->vdev.cache_lock);
1626 }
1627 
1628 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1629 			void *buf, unsigned long len, bool write)
1630 {
1631 	struct kvmgt_guest_info *info;
1632 	struct kvm *kvm;
1633 	int idx, ret;
1634 	bool kthread = current->mm == NULL;
1635 
1636 	if (!handle_valid(handle))
1637 		return -ESRCH;
1638 
1639 	info = (struct kvmgt_guest_info *)handle;
1640 	kvm = info->kvm;
1641 
1642 	if (kthread)
1643 		use_mm(kvm->mm);
1644 
1645 	idx = srcu_read_lock(&kvm->srcu);
1646 	ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1647 		      kvm_read_guest(kvm, gpa, buf, len);
1648 	srcu_read_unlock(&kvm->srcu, idx);
1649 
1650 	if (kthread)
1651 		unuse_mm(kvm->mm);
1652 
1653 	return ret;
1654 }
1655 
1656 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1657 			void *buf, unsigned long len)
1658 {
1659 	return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1660 }
1661 
1662 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1663 			void *buf, unsigned long len)
1664 {
1665 	return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1666 }
1667 
1668 static unsigned long kvmgt_virt_to_pfn(void *addr)
1669 {
1670 	return PFN_DOWN(__pa(addr));
1671 }
1672 
1673 static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1674 {
1675 	struct kvmgt_guest_info *info;
1676 	struct kvm *kvm;
1677 
1678 	if (!handle_valid(handle))
1679 		return false;
1680 
1681 	info = (struct kvmgt_guest_info *)handle;
1682 	kvm = info->kvm;
1683 
1684 	return kvm_is_visible_gfn(kvm, gfn);
1685 
1686 }
1687 
1688 struct intel_gvt_mpt kvmgt_mpt = {
1689 	.host_init = kvmgt_host_init,
1690 	.host_exit = kvmgt_host_exit,
1691 	.attach_vgpu = kvmgt_attach_vgpu,
1692 	.detach_vgpu = kvmgt_detach_vgpu,
1693 	.inject_msi = kvmgt_inject_msi,
1694 	.from_virt_to_mfn = kvmgt_virt_to_pfn,
1695 	.enable_page_track = kvmgt_page_track_add,
1696 	.disable_page_track = kvmgt_page_track_remove,
1697 	.read_gpa = kvmgt_read_gpa,
1698 	.write_gpa = kvmgt_write_gpa,
1699 	.gfn_to_mfn = kvmgt_gfn_to_pfn,
1700 	.dma_map_guest_page = kvmgt_dma_map_guest_page,
1701 	.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
1702 	.set_opregion = kvmgt_set_opregion,
1703 	.get_vfio_device = kvmgt_get_vfio_device,
1704 	.put_vfio_device = kvmgt_put_vfio_device,
1705 	.is_valid_gfn = kvmgt_is_valid_gfn,
1706 };
1707 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1708 
1709 static int __init kvmgt_init(void)
1710 {
1711 	return 0;
1712 }
1713 
1714 static void __exit kvmgt_exit(void)
1715 {
1716 }
1717 
1718 module_init(kvmgt_init);
1719 module_exit(kvmgt_exit);
1720 
1721 MODULE_LICENSE("GPL and additional rights");
1722 MODULE_AUTHOR("Intel Corporation");
1723