1 /*
2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
29 * Eddie Dong <eddie.dong@intel.com>
30 *
31 * Contributors:
32 * Niu Bing <bing.niu@intel.com>
33 * Zhi Wang <zhi.a.wang@intel.com>
34 */
35
36 #include <linux/debugfs.h>
37 #include <linux/eventfd.h>
38 #include <linux/init.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <linux/mdev.h>
42 #include <linux/mm.h>
43 #include <linux/nospec.h>
44 #include <linux/rbtree.h>
45 #include <linux/sched/mm.h>
46 #include <linux/spinlock.h>
47 #include <linux/types.h>
48
49 #include <drm/drm_edid.h>
50 #include <drm/drm_print.h>
51
52 #include "gvt.h"
53 #include "i915_drv.h"
54 #include "intel_gvt.h"
55 #include "sched_policy.h"
56
57 MODULE_IMPORT_NS("DMA_BUF");
58 MODULE_IMPORT_NS("I915_GVT");
59
60 /* helper macros copied from vfio-pci */
61 #define VFIO_PCI_OFFSET_SHIFT 40
62 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
63 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
64 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
65
66 #define EDID_BLOB_OFFSET (PAGE_SIZE/2)
67
68 #define OPREGION_SIGNATURE "IntelGraphicsMem"
69
70 struct vfio_region;
71 struct intel_vgpu_regops {
72 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
73 size_t count, loff_t *ppos, bool iswrite);
74 void (*release)(struct intel_vgpu *vgpu,
75 struct vfio_region *region);
76 };
77
78 struct vfio_region {
79 u32 type;
80 u32 subtype;
81 size_t size;
82 u32 flags;
83 const struct intel_vgpu_regops *ops;
84 void *data;
85 };
86
87 struct vfio_edid_region {
88 struct vfio_region_gfx_edid vfio_edid_regs;
89 void *edid_blob;
90 };
91
92 struct kvmgt_pgfn {
93 gfn_t gfn;
94 struct hlist_node hnode;
95 };
96
97 struct gvt_dma {
98 struct intel_vgpu *vgpu;
99 struct rb_node gfn_node;
100 struct rb_node dma_addr_node;
101 gfn_t gfn;
102 dma_addr_t dma_addr;
103 unsigned long size;
104 struct kref ref;
105 };
106
107 #define vfio_dev_to_vgpu(vfio_dev) \
108 container_of((vfio_dev), struct intel_vgpu, vfio_device)
109
110 static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
111 struct kvm_page_track_notifier_node *node);
112 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
113 struct kvm_page_track_notifier_node *node);
114
intel_vgpu_show_description(struct mdev_type * mtype,char * buf)115 static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
116 {
117 struct intel_vgpu_type *type =
118 container_of(mtype, struct intel_vgpu_type, type);
119
120 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
121 "fence: %d\nresolution: %s\n"
122 "weight: %d\n",
123 BYTES_TO_MB(type->conf->low_mm),
124 BYTES_TO_MB(type->conf->high_mm),
125 type->conf->fence, vgpu_edid_str(type->conf->edid),
126 type->conf->weight);
127 }
128
gvt_unpin_guest_page(struct intel_vgpu * vgpu,unsigned long gfn,unsigned long size)129 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
130 unsigned long size)
131 {
132 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
133 DIV_ROUND_UP(size, PAGE_SIZE));
134 }
135
136 /* Pin a normal or compound guest page for dma. */
gvt_pin_guest_page(struct intel_vgpu * vgpu,unsigned long gfn,unsigned long size,struct page ** page)137 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
138 unsigned long size, struct page **page)
139 {
140 int total_pages = DIV_ROUND_UP(size, PAGE_SIZE);
141 struct page *base_page = NULL;
142 int npage;
143 int ret;
144
145 /*
146 * We pin the pages one-by-one to avoid allocating a big array
147 * on stack to hold pfns.
148 */
149 for (npage = 0; npage < total_pages; npage++) {
150 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
151 struct page *cur_page;
152
153 ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
154 IOMMU_READ | IOMMU_WRITE, &cur_page);
155 if (ret != 1) {
156 gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n",
157 &cur_iova, ret);
158 goto err;
159 }
160
161 if (npage == 0)
162 base_page = cur_page;
163 else if (page_to_pfn(base_page) + npage != page_to_pfn(cur_page)) {
164 ret = -EINVAL;
165 npage++;
166 goto err;
167 }
168 }
169
170 *page = base_page;
171 return 0;
172 err:
173 if (npage)
174 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
175 return ret;
176 }
177
gvt_dma_map_page(struct intel_vgpu * vgpu,unsigned long gfn,dma_addr_t * dma_addr,unsigned long size)178 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
179 dma_addr_t *dma_addr, unsigned long size)
180 {
181 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
182 struct page *page = NULL;
183 int ret;
184
185 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
186 if (ret)
187 return ret;
188
189 /* Setup DMA mapping. */
190 *dma_addr = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL);
191 if (dma_mapping_error(dev, *dma_addr)) {
192 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
193 page_to_pfn(page), ret);
194 gvt_unpin_guest_page(vgpu, gfn, size);
195 return -ENOMEM;
196 }
197
198 return 0;
199 }
200
gvt_dma_unmap_page(struct intel_vgpu * vgpu,unsigned long gfn,dma_addr_t dma_addr,unsigned long size)201 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
202 dma_addr_t dma_addr, unsigned long size)
203 {
204 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
205
206 dma_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL);
207 gvt_unpin_guest_page(vgpu, gfn, size);
208 }
209
__gvt_cache_find_dma_addr(struct intel_vgpu * vgpu,dma_addr_t dma_addr)210 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
211 dma_addr_t dma_addr)
212 {
213 struct rb_node *node = vgpu->dma_addr_cache.rb_node;
214 struct gvt_dma *itr;
215
216 while (node) {
217 itr = rb_entry(node, struct gvt_dma, dma_addr_node);
218
219 if (dma_addr < itr->dma_addr)
220 node = node->rb_left;
221 else if (dma_addr > itr->dma_addr)
222 node = node->rb_right;
223 else
224 return itr;
225 }
226 return NULL;
227 }
228
__gvt_cache_find_gfn(struct intel_vgpu * vgpu,gfn_t gfn)229 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
230 {
231 struct rb_node *node = vgpu->gfn_cache.rb_node;
232 struct gvt_dma *itr;
233
234 while (node) {
235 itr = rb_entry(node, struct gvt_dma, gfn_node);
236
237 if (gfn < itr->gfn)
238 node = node->rb_left;
239 else if (gfn > itr->gfn)
240 node = node->rb_right;
241 else
242 return itr;
243 }
244 return NULL;
245 }
246
__gvt_cache_add(struct intel_vgpu * vgpu,gfn_t gfn,dma_addr_t dma_addr,unsigned long size)247 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
248 dma_addr_t dma_addr, unsigned long size)
249 {
250 struct gvt_dma *new, *itr;
251 struct rb_node **link, *parent = NULL;
252
253 new = kzalloc_obj(struct gvt_dma);
254 if (!new)
255 return -ENOMEM;
256
257 new->vgpu = vgpu;
258 new->gfn = gfn;
259 new->dma_addr = dma_addr;
260 new->size = size;
261 kref_init(&new->ref);
262
263 /* gfn_cache maps gfn to struct gvt_dma. */
264 link = &vgpu->gfn_cache.rb_node;
265 while (*link) {
266 parent = *link;
267 itr = rb_entry(parent, struct gvt_dma, gfn_node);
268
269 if (gfn < itr->gfn)
270 link = &parent->rb_left;
271 else
272 link = &parent->rb_right;
273 }
274 rb_link_node(&new->gfn_node, parent, link);
275 rb_insert_color(&new->gfn_node, &vgpu->gfn_cache);
276
277 /* dma_addr_cache maps dma addr to struct gvt_dma. */
278 parent = NULL;
279 link = &vgpu->dma_addr_cache.rb_node;
280 while (*link) {
281 parent = *link;
282 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
283
284 if (dma_addr < itr->dma_addr)
285 link = &parent->rb_left;
286 else
287 link = &parent->rb_right;
288 }
289 rb_link_node(&new->dma_addr_node, parent, link);
290 rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache);
291
292 vgpu->nr_cache_entries++;
293 return 0;
294 }
295
__gvt_cache_remove_entry(struct intel_vgpu * vgpu,struct gvt_dma * entry)296 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
297 struct gvt_dma *entry)
298 {
299 rb_erase(&entry->gfn_node, &vgpu->gfn_cache);
300 rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache);
301 kfree(entry);
302 vgpu->nr_cache_entries--;
303 }
304
gvt_cache_destroy(struct intel_vgpu * vgpu)305 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
306 {
307 struct gvt_dma *dma;
308 struct rb_node *node = NULL;
309
310 for (;;) {
311 mutex_lock(&vgpu->cache_lock);
312 node = rb_first(&vgpu->gfn_cache);
313 if (!node) {
314 mutex_unlock(&vgpu->cache_lock);
315 break;
316 }
317 dma = rb_entry(node, struct gvt_dma, gfn_node);
318 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
319 __gvt_cache_remove_entry(vgpu, dma);
320 mutex_unlock(&vgpu->cache_lock);
321 }
322 }
323
gvt_cache_init(struct intel_vgpu * vgpu)324 static void gvt_cache_init(struct intel_vgpu *vgpu)
325 {
326 vgpu->gfn_cache = RB_ROOT;
327 vgpu->dma_addr_cache = RB_ROOT;
328 vgpu->nr_cache_entries = 0;
329 mutex_init(&vgpu->cache_lock);
330 }
331
kvmgt_protect_table_init(struct intel_vgpu * info)332 static void kvmgt_protect_table_init(struct intel_vgpu *info)
333 {
334 hash_init(info->ptable);
335 }
336
kvmgt_protect_table_destroy(struct intel_vgpu * info)337 static void kvmgt_protect_table_destroy(struct intel_vgpu *info)
338 {
339 struct kvmgt_pgfn *p;
340 struct hlist_node *tmp;
341 int i;
342
343 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
344 hash_del(&p->hnode);
345 kfree(p);
346 }
347 }
348
349 static struct kvmgt_pgfn *
__kvmgt_protect_table_find(struct intel_vgpu * info,gfn_t gfn)350 __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
351 {
352 struct kvmgt_pgfn *p, *res = NULL;
353
354 lockdep_assert_held(&info->vgpu_lock);
355
356 hash_for_each_possible(info->ptable, p, hnode, gfn) {
357 if (gfn == p->gfn) {
358 res = p;
359 break;
360 }
361 }
362
363 return res;
364 }
365
kvmgt_gfn_is_write_protected(struct intel_vgpu * info,gfn_t gfn)366 static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn)
367 {
368 struct kvmgt_pgfn *p;
369
370 p = __kvmgt_protect_table_find(info, gfn);
371 return !!p;
372 }
373
kvmgt_protect_table_add(struct intel_vgpu * info,gfn_t gfn)374 static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
375 {
376 struct kvmgt_pgfn *p;
377
378 if (kvmgt_gfn_is_write_protected(info, gfn))
379 return;
380
381 p = kzalloc_obj(struct kvmgt_pgfn, GFP_ATOMIC);
382 if (WARN(!p, "gfn: 0x%llx\n", gfn))
383 return;
384
385 p->gfn = gfn;
386 hash_add(info->ptable, &p->hnode, gfn);
387 }
388
kvmgt_protect_table_del(struct intel_vgpu * info,gfn_t gfn)389 static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn)
390 {
391 struct kvmgt_pgfn *p;
392
393 p = __kvmgt_protect_table_find(info, gfn);
394 if (p) {
395 hash_del(&p->hnode);
396 kfree(p);
397 }
398 }
399
intel_vgpu_reg_rw_opregion(struct intel_vgpu * vgpu,char * buf,size_t count,loff_t * ppos,bool iswrite)400 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
401 size_t count, loff_t *ppos, bool iswrite)
402 {
403 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
404 VFIO_PCI_NUM_REGIONS;
405 void *base = vgpu->region[i].data;
406 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
407
408
409 if (pos >= vgpu->region[i].size || iswrite) {
410 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
411 return -EINVAL;
412 }
413 count = min(count, (size_t)(vgpu->region[i].size - pos));
414 memcpy(buf, base + pos, count);
415
416 return count;
417 }
418
intel_vgpu_reg_release_opregion(struct intel_vgpu * vgpu,struct vfio_region * region)419 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
420 struct vfio_region *region)
421 {
422 }
423
424 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
425 .rw = intel_vgpu_reg_rw_opregion,
426 .release = intel_vgpu_reg_release_opregion,
427 };
428
edid_valid(const void * edid,size_t size)429 static bool edid_valid(const void *edid, size_t size)
430 {
431 const struct drm_edid *drm_edid;
432 bool is_valid;
433
434 drm_edid = drm_edid_alloc(edid, size);
435 is_valid = drm_edid_valid(drm_edid);
436 drm_edid_free(drm_edid);
437
438 return is_valid;
439 }
440
handle_edid_regs(struct intel_vgpu * vgpu,struct vfio_edid_region * region,char * buf,size_t count,u16 offset,bool is_write)441 static int handle_edid_regs(struct intel_vgpu *vgpu,
442 struct vfio_edid_region *region, char *buf,
443 size_t count, u16 offset, bool is_write)
444 {
445 struct vfio_region_gfx_edid *regs = ®ion->vfio_edid_regs;
446 unsigned int data;
447
448 if (offset + count > sizeof(*regs))
449 return -EINVAL;
450
451 if (count != 4)
452 return -EINVAL;
453
454 if (is_write) {
455 data = *((unsigned int *)buf);
456 switch (offset) {
457 case offsetof(struct vfio_region_gfx_edid, link_state):
458 if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
459 if (!edid_valid(region->edid_blob, EDID_SIZE)) {
460 gvt_vgpu_err("invalid EDID blob\n");
461 return -EINVAL;
462 }
463 intel_vgpu_emulate_hotplug(vgpu, true);
464 } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
465 intel_vgpu_emulate_hotplug(vgpu, false);
466 else {
467 gvt_vgpu_err("invalid EDID link state %d\n",
468 regs->link_state);
469 return -EINVAL;
470 }
471 regs->link_state = data;
472 break;
473 case offsetof(struct vfio_region_gfx_edid, edid_size):
474 if (data > regs->edid_max_size) {
475 gvt_vgpu_err("EDID size is bigger than %d!\n",
476 regs->edid_max_size);
477 return -EINVAL;
478 }
479 regs->edid_size = data;
480 break;
481 default:
482 /* read-only regs */
483 gvt_vgpu_err("write read-only EDID region at offset %d\n",
484 offset);
485 return -EPERM;
486 }
487 } else {
488 memcpy(buf, (char *)regs + offset, count);
489 }
490
491 return count;
492 }
493
handle_edid_blob(struct vfio_edid_region * region,char * buf,size_t count,u16 offset,bool is_write)494 static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
495 size_t count, u16 offset, bool is_write)
496 {
497 if (offset + count > region->vfio_edid_regs.edid_size)
498 return -EINVAL;
499
500 if (is_write)
501 memcpy(region->edid_blob + offset, buf, count);
502 else
503 memcpy(buf, region->edid_blob + offset, count);
504
505 return count;
506 }
507
intel_vgpu_reg_rw_edid(struct intel_vgpu * vgpu,char * buf,size_t count,loff_t * ppos,bool iswrite)508 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
509 size_t count, loff_t *ppos, bool iswrite)
510 {
511 int ret;
512 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
513 VFIO_PCI_NUM_REGIONS;
514 struct vfio_edid_region *region = vgpu->region[i].data;
515 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
516
517 if (pos < region->vfio_edid_regs.edid_offset) {
518 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
519 } else {
520 pos -= EDID_BLOB_OFFSET;
521 ret = handle_edid_blob(region, buf, count, pos, iswrite);
522 }
523
524 if (ret < 0)
525 gvt_vgpu_err("failed to access EDID region\n");
526
527 return ret;
528 }
529
intel_vgpu_reg_release_edid(struct intel_vgpu * vgpu,struct vfio_region * region)530 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
531 struct vfio_region *region)
532 {
533 kfree(region->data);
534 }
535
536 static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
537 .rw = intel_vgpu_reg_rw_edid,
538 .release = intel_vgpu_reg_release_edid,
539 };
540
intel_vgpu_register_reg(struct intel_vgpu * vgpu,unsigned int type,unsigned int subtype,const struct intel_vgpu_regops * ops,size_t size,u32 flags,void * data)541 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
542 unsigned int type, unsigned int subtype,
543 const struct intel_vgpu_regops *ops,
544 size_t size, u32 flags, void *data)
545 {
546 struct vfio_region *region;
547
548 region = krealloc(vgpu->region,
549 (vgpu->num_regions + 1) * sizeof(*region),
550 GFP_KERNEL);
551 if (!region)
552 return -ENOMEM;
553
554 vgpu->region = region;
555 vgpu->region[vgpu->num_regions].type = type;
556 vgpu->region[vgpu->num_regions].subtype = subtype;
557 vgpu->region[vgpu->num_regions].ops = ops;
558 vgpu->region[vgpu->num_regions].size = size;
559 vgpu->region[vgpu->num_regions].flags = flags;
560 vgpu->region[vgpu->num_regions].data = data;
561 vgpu->num_regions++;
562 return 0;
563 }
564
intel_gvt_set_opregion(struct intel_vgpu * vgpu)565 int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
566 {
567 void *base;
568 int ret;
569
570 /* Each vgpu has its own opregion, although VFIO would create another
571 * one later. This one is used to expose opregion to VFIO. And the
572 * other one created by VFIO later, is used by guest actually.
573 */
574 base = vgpu_opregion(vgpu)->va;
575 if (!base)
576 return -ENOMEM;
577
578 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
579 memunmap(base);
580 return -EINVAL;
581 }
582
583 ret = intel_vgpu_register_reg(vgpu,
584 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
585 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
586 &intel_vgpu_regops_opregion, INTEL_GVT_OPREGION_SIZE,
587 VFIO_REGION_INFO_FLAG_READ, base);
588
589 return ret;
590 }
591
intel_gvt_set_edid(struct intel_vgpu * vgpu,int port_num)592 int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
593 {
594 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
595 struct vfio_edid_region *base;
596 int ret;
597
598 base = kzalloc_obj(*base);
599 if (!base)
600 return -ENOMEM;
601
602 /* TODO: Add multi-port and EDID extension block support */
603 base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
604 base->vfio_edid_regs.edid_max_size = EDID_SIZE;
605 base->vfio_edid_regs.edid_size = EDID_SIZE;
606 base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
607 base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
608 base->edid_blob = port->edid->edid_block;
609
610 ret = intel_vgpu_register_reg(vgpu,
611 VFIO_REGION_TYPE_GFX,
612 VFIO_REGION_SUBTYPE_GFX_EDID,
613 &intel_vgpu_regops_edid, EDID_SIZE,
614 VFIO_REGION_INFO_FLAG_READ |
615 VFIO_REGION_INFO_FLAG_WRITE |
616 VFIO_REGION_INFO_FLAG_CAPS, base);
617
618 return ret;
619 }
620
intel_vgpu_dma_unmap(struct vfio_device * vfio_dev,u64 iova,u64 length)621 static void intel_vgpu_dma_unmap(struct vfio_device *vfio_dev, u64 iova,
622 u64 length)
623 {
624 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
625 struct gvt_dma *entry;
626 u64 iov_pfn = iova >> PAGE_SHIFT;
627 u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE;
628
629 mutex_lock(&vgpu->cache_lock);
630 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
631 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
632 if (!entry)
633 continue;
634
635 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
636 entry->size);
637 __gvt_cache_remove_entry(vgpu, entry);
638 }
639 mutex_unlock(&vgpu->cache_lock);
640 }
641
__kvmgt_vgpu_exist(struct intel_vgpu * vgpu)642 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
643 {
644 struct intel_vgpu *itr;
645 int id;
646 bool ret = false;
647
648 mutex_lock(&vgpu->gvt->lock);
649 for_each_active_vgpu(vgpu->gvt, itr, id) {
650 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
651 continue;
652
653 if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
654 ret = true;
655 goto out;
656 }
657 }
658 out:
659 mutex_unlock(&vgpu->gvt->lock);
660 return ret;
661 }
662
intel_vgpu_open_device(struct vfio_device * vfio_dev)663 static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
664 {
665 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
666 int ret;
667
668 if (__kvmgt_vgpu_exist(vgpu))
669 return -EEXIST;
670
671 vgpu->track_node.track_write = kvmgt_page_track_write;
672 vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region;
673 ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
674 &vgpu->track_node);
675 if (ret) {
676 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
677 return ret;
678 }
679
680 set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
681
682 debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
683 &vgpu->nr_cache_entries);
684
685 intel_gvt_activate_vgpu(vgpu);
686
687 return 0;
688 }
689
intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu * vgpu)690 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
691 {
692 struct eventfd_ctx *trigger;
693
694 trigger = vgpu->msi_trigger;
695 if (trigger) {
696 eventfd_ctx_put(trigger);
697 vgpu->msi_trigger = NULL;
698 }
699 }
700
intel_vgpu_close_device(struct vfio_device * vfio_dev)701 static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
702 {
703 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
704
705 intel_gvt_release_vgpu(vgpu);
706
707 clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
708
709 debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
710
711 kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
712 &vgpu->track_node);
713
714 kvmgt_protect_table_destroy(vgpu);
715 gvt_cache_destroy(vgpu);
716
717 WARN_ON(vgpu->nr_cache_entries);
718
719 vgpu->gfn_cache = RB_ROOT;
720 vgpu->dma_addr_cache = RB_ROOT;
721
722 intel_vgpu_release_msi_eventfd_ctx(vgpu);
723 }
724
intel_vgpu_get_bar_addr(struct intel_vgpu * vgpu,int bar)725 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
726 {
727 u32 start_lo, start_hi;
728 u32 mem_type;
729
730 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
731 PCI_BASE_ADDRESS_MEM_MASK;
732 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
733 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
734
735 switch (mem_type) {
736 case PCI_BASE_ADDRESS_MEM_TYPE_64:
737 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
738 + bar + 4));
739 break;
740 case PCI_BASE_ADDRESS_MEM_TYPE_32:
741 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
742 /* 1M mem BAR treated as 32-bit BAR */
743 default:
744 /* mem unknown type treated as 32-bit BAR */
745 start_hi = 0;
746 break;
747 }
748
749 return ((u64)start_hi << 32) | start_lo;
750 }
751
intel_vgpu_bar_rw(struct intel_vgpu * vgpu,int bar,u64 off,void * buf,unsigned int count,bool is_write)752 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
753 void *buf, unsigned int count, bool is_write)
754 {
755 u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
756 int ret;
757
758 if (is_write)
759 ret = intel_vgpu_emulate_mmio_write(vgpu,
760 bar_start + off, buf, count);
761 else
762 ret = intel_vgpu_emulate_mmio_read(vgpu,
763 bar_start + off, buf, count);
764 return ret;
765 }
766
intel_vgpu_in_aperture(struct intel_vgpu * vgpu,u64 off)767 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
768 {
769 return off >= vgpu_aperture_offset(vgpu) &&
770 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
771 }
772
intel_vgpu_aperture_rw(struct intel_vgpu * vgpu,u64 off,void * buf,unsigned long count,bool is_write)773 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
774 void *buf, unsigned long count, bool is_write)
775 {
776 void __iomem *aperture_va;
777
778 if (!intel_vgpu_in_aperture(vgpu, off) ||
779 !intel_vgpu_in_aperture(vgpu, off + count)) {
780 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
781 return -EINVAL;
782 }
783
784 aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
785 ALIGN_DOWN(off, PAGE_SIZE),
786 count + offset_in_page(off));
787 if (!aperture_va)
788 return -EIO;
789
790 if (is_write)
791 memcpy_toio(aperture_va + offset_in_page(off), buf, count);
792 else
793 memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
794
795 io_mapping_unmap(aperture_va);
796
797 return 0;
798 }
799
intel_vgpu_rw(struct intel_vgpu * vgpu,char * buf,size_t count,loff_t * ppos,bool is_write)800 static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf,
801 size_t count, loff_t *ppos, bool is_write)
802 {
803 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
804 u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
805 int ret = -EINVAL;
806
807
808 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) {
809 gvt_vgpu_err("invalid index: %u\n", index);
810 return -EINVAL;
811 }
812
813 switch (index) {
814 case VFIO_PCI_CONFIG_REGION_INDEX:
815 if (is_write)
816 ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
817 buf, count);
818 else
819 ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
820 buf, count);
821 break;
822 case VFIO_PCI_BAR0_REGION_INDEX:
823 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
824 buf, count, is_write);
825 break;
826 case VFIO_PCI_BAR2_REGION_INDEX:
827 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
828 break;
829 case VFIO_PCI_BAR1_REGION_INDEX:
830 case VFIO_PCI_BAR3_REGION_INDEX:
831 case VFIO_PCI_BAR4_REGION_INDEX:
832 case VFIO_PCI_BAR5_REGION_INDEX:
833 case VFIO_PCI_VGA_REGION_INDEX:
834 case VFIO_PCI_ROM_REGION_INDEX:
835 break;
836 default:
837 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
838 return -EINVAL;
839
840 index -= VFIO_PCI_NUM_REGIONS;
841 return vgpu->region[index].ops->rw(vgpu, buf, count,
842 ppos, is_write);
843 }
844
845 return ret == 0 ? count : ret;
846 }
847
gtt_entry(struct intel_vgpu * vgpu,loff_t * ppos)848 static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos)
849 {
850 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
851 struct intel_gvt *gvt = vgpu->gvt;
852 int offset;
853
854 /* Only allow MMIO GGTT entry access */
855 if (index != PCI_BASE_ADDRESS_0)
856 return false;
857
858 offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
859 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
860
861 return (offset >= gvt->device_info.gtt_start_offset &&
862 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
863 true : false;
864 }
865
intel_vgpu_read(struct vfio_device * vfio_dev,char __user * buf,size_t count,loff_t * ppos)866 static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf,
867 size_t count, loff_t *ppos)
868 {
869 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
870 unsigned int done = 0;
871 int ret;
872
873 while (count) {
874 size_t filled;
875
876 /* Only support GGTT entry 8 bytes read */
877 if (count >= 8 && !(*ppos % 8) &&
878 gtt_entry(vgpu, ppos)) {
879 u64 val;
880
881 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
882 ppos, false);
883 if (ret <= 0)
884 goto read_err;
885
886 if (copy_to_user(buf, &val, sizeof(val)))
887 goto read_err;
888
889 filled = 8;
890 } else if (count >= 4 && !(*ppos % 4)) {
891 u32 val;
892
893 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
894 ppos, false);
895 if (ret <= 0)
896 goto read_err;
897
898 if (copy_to_user(buf, &val, sizeof(val)))
899 goto read_err;
900
901 filled = 4;
902 } else if (count >= 2 && !(*ppos % 2)) {
903 u16 val;
904
905 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
906 ppos, false);
907 if (ret <= 0)
908 goto read_err;
909
910 if (copy_to_user(buf, &val, sizeof(val)))
911 goto read_err;
912
913 filled = 2;
914 } else {
915 u8 val;
916
917 ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos,
918 false);
919 if (ret <= 0)
920 goto read_err;
921
922 if (copy_to_user(buf, &val, sizeof(val)))
923 goto read_err;
924
925 filled = 1;
926 }
927
928 count -= filled;
929 done += filled;
930 *ppos += filled;
931 buf += filled;
932 }
933
934 return done;
935
936 read_err:
937 return -EFAULT;
938 }
939
intel_vgpu_write(struct vfio_device * vfio_dev,const char __user * buf,size_t count,loff_t * ppos)940 static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev,
941 const char __user *buf,
942 size_t count, loff_t *ppos)
943 {
944 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
945 unsigned int done = 0;
946 int ret;
947
948 while (count) {
949 size_t filled;
950
951 /* Only support GGTT entry 8 bytes write */
952 if (count >= 8 && !(*ppos % 8) &&
953 gtt_entry(vgpu, ppos)) {
954 u64 val;
955
956 if (copy_from_user(&val, buf, sizeof(val)))
957 goto write_err;
958
959 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
960 ppos, true);
961 if (ret <= 0)
962 goto write_err;
963
964 filled = 8;
965 } else if (count >= 4 && !(*ppos % 4)) {
966 u32 val;
967
968 if (copy_from_user(&val, buf, sizeof(val)))
969 goto write_err;
970
971 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
972 ppos, true);
973 if (ret <= 0)
974 goto write_err;
975
976 filled = 4;
977 } else if (count >= 2 && !(*ppos % 2)) {
978 u16 val;
979
980 if (copy_from_user(&val, buf, sizeof(val)))
981 goto write_err;
982
983 ret = intel_vgpu_rw(vgpu, (char *)&val,
984 sizeof(val), ppos, true);
985 if (ret <= 0)
986 goto write_err;
987
988 filled = 2;
989 } else {
990 u8 val;
991
992 if (copy_from_user(&val, buf, sizeof(val)))
993 goto write_err;
994
995 ret = intel_vgpu_rw(vgpu, &val, sizeof(val),
996 ppos, true);
997 if (ret <= 0)
998 goto write_err;
999
1000 filled = 1;
1001 }
1002
1003 count -= filled;
1004 done += filled;
1005 *ppos += filled;
1006 buf += filled;
1007 }
1008
1009 return done;
1010 write_err:
1011 return -EFAULT;
1012 }
1013
intel_vgpu_mmap(struct vfio_device * vfio_dev,struct vm_area_struct * vma)1014 static int intel_vgpu_mmap(struct vfio_device *vfio_dev,
1015 struct vm_area_struct *vma)
1016 {
1017 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1018 unsigned int index;
1019 u64 virtaddr;
1020 unsigned long req_size, pgoff, req_start;
1021 pgprot_t pg_prot;
1022
1023 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1024 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1025 return -EINVAL;
1026
1027 if (vma->vm_end < vma->vm_start)
1028 return -EINVAL;
1029 if ((vma->vm_flags & VM_SHARED) == 0)
1030 return -EINVAL;
1031 if (index != VFIO_PCI_BAR2_REGION_INDEX)
1032 return -EINVAL;
1033
1034 pg_prot = vma->vm_page_prot;
1035 virtaddr = vma->vm_start;
1036 req_size = vma->vm_end - vma->vm_start;
1037 pgoff = vma->vm_pgoff &
1038 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1039 req_start = pgoff << PAGE_SHIFT;
1040
1041 if (!intel_vgpu_in_aperture(vgpu, req_start))
1042 return -EINVAL;
1043 if (req_start + req_size >
1044 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1045 return -EINVAL;
1046
1047 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1048
1049 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1050 }
1051
intel_vgpu_get_irq_count(struct intel_vgpu * vgpu,int type)1052 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1053 {
1054 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1055 return 1;
1056
1057 return 0;
1058 }
1059
intel_vgpu_set_intx_mask(struct intel_vgpu * vgpu,unsigned int index,unsigned int start,unsigned int count,u32 flags,void * data)1060 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1061 unsigned int index, unsigned int start,
1062 unsigned int count, u32 flags,
1063 void *data)
1064 {
1065 return 0;
1066 }
1067
intel_vgpu_set_intx_unmask(struct intel_vgpu * vgpu,unsigned int index,unsigned int start,unsigned int count,u32 flags,void * data)1068 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1069 unsigned int index, unsigned int start,
1070 unsigned int count, u32 flags, void *data)
1071 {
1072 return 0;
1073 }
1074
intel_vgpu_set_intx_trigger(struct intel_vgpu * vgpu,unsigned int index,unsigned int start,unsigned int count,u32 flags,void * data)1075 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1076 unsigned int index, unsigned int start, unsigned int count,
1077 u32 flags, void *data)
1078 {
1079 return 0;
1080 }
1081
intel_vgpu_set_msi_trigger(struct intel_vgpu * vgpu,unsigned int index,unsigned int start,unsigned int count,u32 flags,void * data)1082 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1083 unsigned int index, unsigned int start, unsigned int count,
1084 u32 flags, void *data)
1085 {
1086 struct eventfd_ctx *trigger;
1087
1088 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1089 int fd = *(int *)data;
1090
1091 trigger = eventfd_ctx_fdget(fd);
1092 if (IS_ERR(trigger)) {
1093 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1094 return PTR_ERR(trigger);
1095 }
1096 vgpu->msi_trigger = trigger;
1097 } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1098 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1099
1100 return 0;
1101 }
1102
intel_vgpu_set_irqs(struct intel_vgpu * vgpu,u32 flags,unsigned int index,unsigned int start,unsigned int count,void * data)1103 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1104 unsigned int index, unsigned int start, unsigned int count,
1105 void *data)
1106 {
1107 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1108 unsigned int start, unsigned int count, u32 flags,
1109 void *data) = NULL;
1110
1111 switch (index) {
1112 case VFIO_PCI_INTX_IRQ_INDEX:
1113 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1114 case VFIO_IRQ_SET_ACTION_MASK:
1115 func = intel_vgpu_set_intx_mask;
1116 break;
1117 case VFIO_IRQ_SET_ACTION_UNMASK:
1118 func = intel_vgpu_set_intx_unmask;
1119 break;
1120 case VFIO_IRQ_SET_ACTION_TRIGGER:
1121 func = intel_vgpu_set_intx_trigger;
1122 break;
1123 }
1124 break;
1125 case VFIO_PCI_MSI_IRQ_INDEX:
1126 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1127 case VFIO_IRQ_SET_ACTION_MASK:
1128 case VFIO_IRQ_SET_ACTION_UNMASK:
1129 /* XXX Need masking support exported */
1130 break;
1131 case VFIO_IRQ_SET_ACTION_TRIGGER:
1132 func = intel_vgpu_set_msi_trigger;
1133 break;
1134 }
1135 break;
1136 }
1137
1138 if (!func)
1139 return -ENOTTY;
1140
1141 return func(vgpu, index, start, count, flags, data);
1142 }
1143
intel_vgpu_ioctl_get_region_info(struct vfio_device * vfio_dev,struct vfio_region_info * info,struct vfio_info_cap * caps)1144 static int intel_vgpu_ioctl_get_region_info(struct vfio_device *vfio_dev,
1145 struct vfio_region_info *info,
1146 struct vfio_info_cap *caps)
1147 {
1148 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1149 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1150 int nr_areas = 1;
1151 int cap_type_id;
1152 unsigned int i;
1153 int ret;
1154
1155 switch (info->index) {
1156 case VFIO_PCI_CONFIG_REGION_INDEX:
1157 info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1158 info->size = vgpu->gvt->device_info.cfg_space_size;
1159 info->flags = VFIO_REGION_INFO_FLAG_READ |
1160 VFIO_REGION_INFO_FLAG_WRITE;
1161 break;
1162 case VFIO_PCI_BAR0_REGION_INDEX:
1163 info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1164 info->size = vgpu->cfg_space.bar[info->index].size;
1165 if (!info->size) {
1166 info->flags = 0;
1167 break;
1168 }
1169
1170 info->flags = VFIO_REGION_INFO_FLAG_READ |
1171 VFIO_REGION_INFO_FLAG_WRITE;
1172 break;
1173 case VFIO_PCI_BAR1_REGION_INDEX:
1174 info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1175 info->size = 0;
1176 info->flags = 0;
1177 break;
1178 case VFIO_PCI_BAR2_REGION_INDEX:
1179 info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1180 info->flags = VFIO_REGION_INFO_FLAG_CAPS |
1181 VFIO_REGION_INFO_FLAG_MMAP |
1182 VFIO_REGION_INFO_FLAG_READ |
1183 VFIO_REGION_INFO_FLAG_WRITE;
1184 info->size = gvt_aperture_sz(vgpu->gvt);
1185
1186 sparse = kzalloc_flex(*sparse, areas, nr_areas);
1187 if (!sparse)
1188 return -ENOMEM;
1189
1190 sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1191 sparse->header.version = 1;
1192 sparse->nr_areas = nr_areas;
1193 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1194 sparse->areas[0].offset =
1195 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1196 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1197 break;
1198
1199 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1200 info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1201 info->size = 0;
1202 info->flags = 0;
1203
1204 gvt_dbg_core("get region info bar:%d\n", info->index);
1205 break;
1206
1207 case VFIO_PCI_ROM_REGION_INDEX:
1208 case VFIO_PCI_VGA_REGION_INDEX:
1209 info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1210 info->size = 0;
1211 info->flags = 0;
1212
1213 gvt_dbg_core("get region info index:%d\n", info->index);
1214 break;
1215 default: {
1216 struct vfio_region_info_cap_type cap_type = {
1217 .header.id = VFIO_REGION_INFO_CAP_TYPE,
1218 .header.version = 1
1219 };
1220
1221 if (info->index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
1222 return -EINVAL;
1223 info->index = array_index_nospec(
1224 info->index, VFIO_PCI_NUM_REGIONS + vgpu->num_regions);
1225
1226 i = info->index - VFIO_PCI_NUM_REGIONS;
1227
1228 info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1229 info->size = vgpu->region[i].size;
1230 info->flags = vgpu->region[i].flags;
1231
1232 cap_type.type = vgpu->region[i].type;
1233 cap_type.subtype = vgpu->region[i].subtype;
1234
1235 ret = vfio_info_add_capability(caps, &cap_type.header,
1236 sizeof(cap_type));
1237 if (ret)
1238 return ret;
1239 }
1240 }
1241
1242 if ((info->flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1243 ret = -EINVAL;
1244 if (cap_type_id == VFIO_REGION_INFO_CAP_SPARSE_MMAP) {
1245 ret = vfio_info_add_capability(
1246 caps, &sparse->header,
1247 struct_size(sparse, areas, sparse->nr_areas));
1248 }
1249 if (ret) {
1250 kfree(sparse);
1251 return ret;
1252 }
1253 }
1254
1255 kfree(sparse);
1256 return 0;
1257 }
1258
intel_vgpu_ioctl(struct vfio_device * vfio_dev,unsigned int cmd,unsigned long arg)1259 static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
1260 unsigned long arg)
1261 {
1262 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1263 unsigned long minsz;
1264
1265 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1266
1267 if (cmd == VFIO_DEVICE_GET_INFO) {
1268 struct vfio_device_info info;
1269
1270 minsz = offsetofend(struct vfio_device_info, num_irqs);
1271
1272 if (copy_from_user(&info, (void __user *)arg, minsz))
1273 return -EFAULT;
1274
1275 if (info.argsz < minsz)
1276 return -EINVAL;
1277
1278 info.flags = VFIO_DEVICE_FLAGS_PCI;
1279 info.flags |= VFIO_DEVICE_FLAGS_RESET;
1280 info.num_regions = VFIO_PCI_NUM_REGIONS +
1281 vgpu->num_regions;
1282 info.num_irqs = VFIO_PCI_NUM_IRQS;
1283
1284 return copy_to_user((void __user *)arg, &info, minsz) ?
1285 -EFAULT : 0;
1286
1287 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1288 struct vfio_irq_info info;
1289
1290 minsz = offsetofend(struct vfio_irq_info, count);
1291
1292 if (copy_from_user(&info, (void __user *)arg, minsz))
1293 return -EFAULT;
1294
1295 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1296 return -EINVAL;
1297
1298 switch (info.index) {
1299 case VFIO_PCI_INTX_IRQ_INDEX:
1300 case VFIO_PCI_MSI_IRQ_INDEX:
1301 break;
1302 default:
1303 return -EINVAL;
1304 }
1305
1306 info.flags = VFIO_IRQ_INFO_EVENTFD;
1307
1308 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1309
1310 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1311 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1312 VFIO_IRQ_INFO_AUTOMASKED);
1313 else
1314 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1315
1316 return copy_to_user((void __user *)arg, &info, minsz) ?
1317 -EFAULT : 0;
1318 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1319 struct vfio_irq_set hdr;
1320 u8 *data = NULL;
1321 int ret = 0;
1322 size_t data_size = 0;
1323
1324 minsz = offsetofend(struct vfio_irq_set, count);
1325
1326 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1327 return -EFAULT;
1328
1329 if (!is_power_of_2(hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) ||
1330 !is_power_of_2(hdr.flags & VFIO_IRQ_SET_ACTION_TYPE_MASK))
1331 return -EINVAL;
1332
1333 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1334 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1335
1336 if (!hdr.count)
1337 return -EINVAL;
1338
1339 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1340 VFIO_PCI_NUM_IRQS, &data_size);
1341 if (ret) {
1342 gvt_vgpu_err("vfio_set_irqs_validate_and_prepare failed\n");
1343 return ret;
1344 }
1345
1346 data = memdup_user((void __user *)(arg + minsz),
1347 data_size);
1348 if (IS_ERR(data))
1349 return PTR_ERR(data);
1350 }
1351
1352 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1353 hdr.start, hdr.count, data);
1354 kfree(data);
1355
1356 return ret;
1357 } else if (cmd == VFIO_DEVICE_RESET) {
1358 intel_gvt_reset_vgpu(vgpu);
1359 return 0;
1360 } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1361 struct vfio_device_gfx_plane_info dmabuf = {};
1362 int ret = 0;
1363
1364 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1365 dmabuf_id);
1366 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1367 return -EFAULT;
1368 if (dmabuf.argsz < minsz)
1369 return -EINVAL;
1370
1371 ret = intel_vgpu_query_plane(vgpu, &dmabuf);
1372 if (ret != 0)
1373 return ret;
1374
1375 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1376 -EFAULT : 0;
1377 } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1378 __u32 dmabuf_id;
1379
1380 if (get_user(dmabuf_id, (__u32 __user *)arg))
1381 return -EFAULT;
1382 return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
1383 }
1384
1385 return -ENOTTY;
1386 }
1387
1388 static ssize_t
vgpu_id_show(struct device * dev,struct device_attribute * attr,char * buf)1389 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1390 char *buf)
1391 {
1392 struct intel_vgpu *vgpu = dev_get_drvdata(dev);
1393
1394 return sprintf(buf, "%d\n", vgpu->id);
1395 }
1396
1397 static DEVICE_ATTR_RO(vgpu_id);
1398
1399 static struct attribute *intel_vgpu_attrs[] = {
1400 &dev_attr_vgpu_id.attr,
1401 NULL
1402 };
1403
1404 static const struct attribute_group intel_vgpu_group = {
1405 .name = "intel_vgpu",
1406 .attrs = intel_vgpu_attrs,
1407 };
1408
1409 static const struct attribute_group *intel_vgpu_groups[] = {
1410 &intel_vgpu_group,
1411 NULL,
1412 };
1413
intel_vgpu_init_dev(struct vfio_device * vfio_dev)1414 static int intel_vgpu_init_dev(struct vfio_device *vfio_dev)
1415 {
1416 struct mdev_device *mdev = to_mdev_device(vfio_dev->dev);
1417 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1418 struct intel_vgpu_type *type =
1419 container_of(mdev->type, struct intel_vgpu_type, type);
1420 int ret;
1421
1422 vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt;
1423 ret = intel_gvt_create_vgpu(vgpu, type->conf);
1424 if (ret)
1425 return ret;
1426
1427 kvmgt_protect_table_init(vgpu);
1428 gvt_cache_init(vgpu);
1429
1430 return 0;
1431 }
1432
intel_vgpu_release_dev(struct vfio_device * vfio_dev)1433 static void intel_vgpu_release_dev(struct vfio_device *vfio_dev)
1434 {
1435 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1436
1437 intel_gvt_destroy_vgpu(vgpu);
1438 }
1439
1440 static const struct vfio_device_ops intel_vgpu_dev_ops = {
1441 .init = intel_vgpu_init_dev,
1442 .release = intel_vgpu_release_dev,
1443 .open_device = intel_vgpu_open_device,
1444 .close_device = intel_vgpu_close_device,
1445 .read = intel_vgpu_read,
1446 .write = intel_vgpu_write,
1447 .mmap = intel_vgpu_mmap,
1448 .ioctl = intel_vgpu_ioctl,
1449 .get_region_info_caps = intel_vgpu_ioctl_get_region_info,
1450 .dma_unmap = intel_vgpu_dma_unmap,
1451 .bind_iommufd = vfio_iommufd_emulated_bind,
1452 .unbind_iommufd = vfio_iommufd_emulated_unbind,
1453 .attach_ioas = vfio_iommufd_emulated_attach_ioas,
1454 .detach_ioas = vfio_iommufd_emulated_detach_ioas,
1455 };
1456
intel_vgpu_probe(struct mdev_device * mdev)1457 static int intel_vgpu_probe(struct mdev_device *mdev)
1458 {
1459 struct intel_vgpu *vgpu;
1460 int ret;
1461
1462 vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev,
1463 &intel_vgpu_dev_ops);
1464 if (IS_ERR(vgpu)) {
1465 gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
1466 return PTR_ERR(vgpu);
1467 }
1468
1469 dev_set_drvdata(&mdev->dev, vgpu);
1470 ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
1471 if (ret)
1472 goto out_put_vdev;
1473
1474 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
1475 dev_name(mdev_dev(mdev)));
1476 return 0;
1477
1478 out_put_vdev:
1479 vfio_put_device(&vgpu->vfio_device);
1480 return ret;
1481 }
1482
intel_vgpu_remove(struct mdev_device * mdev)1483 static void intel_vgpu_remove(struct mdev_device *mdev)
1484 {
1485 struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
1486
1487 vfio_unregister_group_dev(&vgpu->vfio_device);
1488 vfio_put_device(&vgpu->vfio_device);
1489 }
1490
intel_vgpu_get_available(struct mdev_type * mtype)1491 static unsigned int intel_vgpu_get_available(struct mdev_type *mtype)
1492 {
1493 struct intel_vgpu_type *type =
1494 container_of(mtype, struct intel_vgpu_type, type);
1495 struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt;
1496 unsigned int low_gm_avail, high_gm_avail, fence_avail;
1497
1498 mutex_lock(&gvt->lock);
1499 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
1500 gvt->gm.vgpu_allocated_low_gm_size;
1501 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
1502 gvt->gm.vgpu_allocated_high_gm_size;
1503 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
1504 gvt->fence.vgpu_allocated_fence_num;
1505 mutex_unlock(&gvt->lock);
1506
1507 return min3(low_gm_avail / type->conf->low_mm,
1508 high_gm_avail / type->conf->high_mm,
1509 fence_avail / type->conf->fence);
1510 }
1511
1512 static struct mdev_driver intel_vgpu_mdev_driver = {
1513 .device_api = VFIO_DEVICE_API_PCI_STRING,
1514 .driver = {
1515 .name = "intel_vgpu_mdev",
1516 .owner = THIS_MODULE,
1517 .dev_groups = intel_vgpu_groups,
1518 },
1519 .probe = intel_vgpu_probe,
1520 .remove = intel_vgpu_remove,
1521 .get_available = intel_vgpu_get_available,
1522 .show_description = intel_vgpu_show_description,
1523 };
1524
intel_gvt_page_track_add(struct intel_vgpu * info,u64 gfn)1525 int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
1526 {
1527 int r;
1528
1529 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
1530 return -ESRCH;
1531
1532 if (kvmgt_gfn_is_write_protected(info, gfn))
1533 return 0;
1534
1535 r = kvm_write_track_add_gfn(info->vfio_device.kvm, gfn);
1536 if (r)
1537 return r;
1538
1539 kvmgt_protect_table_add(info, gfn);
1540 return 0;
1541 }
1542
intel_gvt_page_track_remove(struct intel_vgpu * info,u64 gfn)1543 int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
1544 {
1545 int r;
1546
1547 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
1548 return -ESRCH;
1549
1550 if (!kvmgt_gfn_is_write_protected(info, gfn))
1551 return 0;
1552
1553 r = kvm_write_track_remove_gfn(info->vfio_device.kvm, gfn);
1554 if (r)
1555 return r;
1556
1557 kvmgt_protect_table_del(info, gfn);
1558 return 0;
1559 }
1560
kvmgt_page_track_write(gpa_t gpa,const u8 * val,int len,struct kvm_page_track_notifier_node * node)1561 static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
1562 struct kvm_page_track_notifier_node *node)
1563 {
1564 struct intel_vgpu *info =
1565 container_of(node, struct intel_vgpu, track_node);
1566
1567 mutex_lock(&info->vgpu_lock);
1568
1569 if (kvmgt_gfn_is_write_protected(info, gpa >> PAGE_SHIFT))
1570 intel_vgpu_page_track_handler(info, gpa,
1571 (void *)val, len);
1572
1573 mutex_unlock(&info->vgpu_lock);
1574 }
1575
kvmgt_page_track_remove_region(gfn_t gfn,unsigned long nr_pages,struct kvm_page_track_notifier_node * node)1576 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
1577 struct kvm_page_track_notifier_node *node)
1578 {
1579 unsigned long i;
1580 struct intel_vgpu *info =
1581 container_of(node, struct intel_vgpu, track_node);
1582
1583 mutex_lock(&info->vgpu_lock);
1584
1585 for (i = 0; i < nr_pages; i++) {
1586 if (kvmgt_gfn_is_write_protected(info, gfn + i))
1587 kvmgt_protect_table_del(info, gfn + i);
1588 }
1589
1590 mutex_unlock(&info->vgpu_lock);
1591 }
1592
intel_vgpu_detach_regions(struct intel_vgpu * vgpu)1593 void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
1594 {
1595 int i;
1596
1597 if (!vgpu->region)
1598 return;
1599
1600 for (i = 0; i < vgpu->num_regions; i++)
1601 if (vgpu->region[i].ops->release)
1602 vgpu->region[i].ops->release(vgpu,
1603 &vgpu->region[i]);
1604 vgpu->num_regions = 0;
1605 kfree(vgpu->region);
1606 vgpu->region = NULL;
1607 }
1608
intel_gvt_dma_map_guest_page(struct intel_vgpu * vgpu,unsigned long gfn,unsigned long size,dma_addr_t * dma_addr)1609 int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
1610 unsigned long size, dma_addr_t *dma_addr)
1611 {
1612 struct gvt_dma *entry;
1613 int ret;
1614
1615 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1616 return -EINVAL;
1617
1618 mutex_lock(&vgpu->cache_lock);
1619
1620 entry = __gvt_cache_find_gfn(vgpu, gfn);
1621 if (!entry) {
1622 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1623 if (ret)
1624 goto err_unlock;
1625
1626 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1627 if (ret)
1628 goto err_unmap;
1629 } else if (entry->size != size) {
1630 /* the same gfn with different size: unmap and re-map */
1631 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1632 __gvt_cache_remove_entry(vgpu, entry);
1633
1634 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1635 if (ret)
1636 goto err_unlock;
1637
1638 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1639 if (ret)
1640 goto err_unmap;
1641 } else {
1642 kref_get(&entry->ref);
1643 *dma_addr = entry->dma_addr;
1644 }
1645
1646 mutex_unlock(&vgpu->cache_lock);
1647 return 0;
1648
1649 err_unmap:
1650 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1651 err_unlock:
1652 mutex_unlock(&vgpu->cache_lock);
1653 return ret;
1654 }
1655
intel_gvt_dma_pin_guest_page(struct intel_vgpu * vgpu,dma_addr_t dma_addr)1656 int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
1657 {
1658 struct gvt_dma *entry;
1659 int ret = 0;
1660
1661 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1662 return -EINVAL;
1663
1664 mutex_lock(&vgpu->cache_lock);
1665 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
1666 if (entry)
1667 kref_get(&entry->ref);
1668 else
1669 ret = -ENOMEM;
1670 mutex_unlock(&vgpu->cache_lock);
1671
1672 return ret;
1673 }
1674
__gvt_dma_release(struct kref * ref)1675 static void __gvt_dma_release(struct kref *ref)
1676 {
1677 struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
1678
1679 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1680 entry->size);
1681 __gvt_cache_remove_entry(entry->vgpu, entry);
1682 }
1683
intel_gvt_dma_unmap_guest_page(struct intel_vgpu * vgpu,dma_addr_t dma_addr)1684 void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
1685 dma_addr_t dma_addr)
1686 {
1687 struct gvt_dma *entry;
1688
1689 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1690 return;
1691
1692 mutex_lock(&vgpu->cache_lock);
1693 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
1694 if (entry)
1695 kref_put(&entry->ref, __gvt_dma_release);
1696 mutex_unlock(&vgpu->cache_lock);
1697 }
1698
init_device_info(struct intel_gvt * gvt)1699 static void init_device_info(struct intel_gvt *gvt)
1700 {
1701 struct intel_gvt_device_info *info = &gvt->device_info;
1702 struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
1703
1704 info->max_support_vgpus = 8;
1705 info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
1706 info->mmio_size = 2 * 1024 * 1024;
1707 info->mmio_bar = 0;
1708 info->gtt_start_offset = 8 * 1024 * 1024;
1709 info->gtt_entry_size = 8;
1710 info->gtt_entry_size_shift = 3;
1711 info->gmadr_bytes_in_cmd = 8;
1712 info->max_surface_size = 36 * 1024 * 1024;
1713 info->msi_cap_offset = pdev->msi_cap;
1714 }
1715
intel_gvt_test_and_emulate_vblank(struct intel_gvt * gvt)1716 static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
1717 {
1718 struct intel_vgpu *vgpu;
1719 int id;
1720
1721 mutex_lock(&gvt->lock);
1722 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
1723 if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
1724 (void *)&gvt->service_request)) {
1725 if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
1726 intel_vgpu_emulate_vblank(vgpu);
1727 }
1728 }
1729 mutex_unlock(&gvt->lock);
1730 }
1731
gvt_service_thread(void * data)1732 static int gvt_service_thread(void *data)
1733 {
1734 struct intel_gvt *gvt = (struct intel_gvt *)data;
1735 int ret;
1736
1737 gvt_dbg_core("service thread start\n");
1738
1739 while (!kthread_should_stop()) {
1740 ret = wait_event_interruptible(gvt->service_thread_wq,
1741 kthread_should_stop() || gvt->service_request);
1742
1743 if (kthread_should_stop())
1744 break;
1745
1746 if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
1747 continue;
1748
1749 intel_gvt_test_and_emulate_vblank(gvt);
1750
1751 if (test_bit(INTEL_GVT_REQUEST_SCHED,
1752 (void *)&gvt->service_request) ||
1753 test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
1754 (void *)&gvt->service_request)) {
1755 intel_gvt_schedule(gvt);
1756 }
1757 }
1758
1759 return 0;
1760 }
1761
clean_service_thread(struct intel_gvt * gvt)1762 static void clean_service_thread(struct intel_gvt *gvt)
1763 {
1764 kthread_stop(gvt->service_thread);
1765 }
1766
init_service_thread(struct intel_gvt * gvt)1767 static int init_service_thread(struct intel_gvt *gvt)
1768 {
1769 init_waitqueue_head(&gvt->service_thread_wq);
1770
1771 gvt->service_thread = kthread_run(gvt_service_thread,
1772 gvt, "gvt_service_thread");
1773 if (IS_ERR(gvt->service_thread)) {
1774 gvt_err("fail to start service thread.\n");
1775 return PTR_ERR(gvt->service_thread);
1776 }
1777 return 0;
1778 }
1779
1780 /**
1781 * intel_gvt_clean_device - clean a GVT device
1782 * @i915: i915 private
1783 *
1784 * This function is called at the driver unloading stage, to free the
1785 * resources owned by a GVT device.
1786 *
1787 */
intel_gvt_clean_device(struct drm_i915_private * i915)1788 static void intel_gvt_clean_device(struct drm_i915_private *i915)
1789 {
1790 struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
1791
1792 if (drm_WARN_ON(&i915->drm, !gvt))
1793 return;
1794
1795 mdev_unregister_parent(&gvt->parent);
1796 intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
1797 intel_gvt_clean_vgpu_types(gvt);
1798
1799 intel_gvt_debugfs_clean(gvt);
1800 clean_service_thread(gvt);
1801 intel_gvt_clean_cmd_parser(gvt);
1802 intel_gvt_clean_sched_policy(gvt);
1803 intel_gvt_clean_workload_scheduler(gvt);
1804 intel_gvt_clean_gtt(gvt);
1805 intel_gvt_free_firmware(gvt);
1806 intel_gvt_clean_mmio_info(gvt);
1807 idr_destroy(&gvt->vgpu_idr);
1808
1809 kfree(i915->gvt);
1810 }
1811
1812 /**
1813 * intel_gvt_init_device - initialize a GVT device
1814 * @i915: drm i915 private data
1815 *
1816 * This function is called at the initialization stage, to initialize
1817 * necessary GVT components.
1818 *
1819 * Returns:
1820 * Zero on success, negative error code if failed.
1821 *
1822 */
intel_gvt_init_device(struct drm_i915_private * i915)1823 static int intel_gvt_init_device(struct drm_i915_private *i915)
1824 {
1825 struct intel_gvt *gvt;
1826 struct intel_vgpu *vgpu;
1827 int ret;
1828
1829 if (drm_WARN_ON(&i915->drm, i915->gvt))
1830 return -EEXIST;
1831
1832 gvt = kzalloc_obj(struct intel_gvt);
1833 if (!gvt)
1834 return -ENOMEM;
1835
1836 gvt_dbg_core("init gvt device\n");
1837
1838 idr_init_base(&gvt->vgpu_idr, 1);
1839 spin_lock_init(&gvt->scheduler.mmio_context_lock);
1840 mutex_init(&gvt->lock);
1841 mutex_init(&gvt->sched_lock);
1842 gvt->gt = to_gt(i915);
1843 i915->gvt = gvt;
1844
1845 init_device_info(gvt);
1846
1847 ret = intel_gvt_setup_mmio_info(gvt);
1848 if (ret)
1849 goto out_clean_idr;
1850
1851 intel_gvt_init_engine_mmio_context(gvt);
1852
1853 ret = intel_gvt_load_firmware(gvt);
1854 if (ret)
1855 goto out_clean_mmio_info;
1856
1857 ret = intel_gvt_init_irq(gvt);
1858 if (ret)
1859 goto out_free_firmware;
1860
1861 ret = intel_gvt_init_gtt(gvt);
1862 if (ret)
1863 goto out_free_firmware;
1864
1865 ret = intel_gvt_init_workload_scheduler(gvt);
1866 if (ret)
1867 goto out_clean_gtt;
1868
1869 ret = intel_gvt_init_sched_policy(gvt);
1870 if (ret)
1871 goto out_clean_workload_scheduler;
1872
1873 ret = intel_gvt_init_cmd_parser(gvt);
1874 if (ret)
1875 goto out_clean_sched_policy;
1876
1877 ret = init_service_thread(gvt);
1878 if (ret)
1879 goto out_clean_cmd_parser;
1880
1881 ret = intel_gvt_init_vgpu_types(gvt);
1882 if (ret)
1883 goto out_clean_thread;
1884
1885 vgpu = intel_gvt_create_idle_vgpu(gvt);
1886 if (IS_ERR(vgpu)) {
1887 ret = PTR_ERR(vgpu);
1888 gvt_err("failed to create idle vgpu\n");
1889 goto out_clean_types;
1890 }
1891 gvt->idle_vgpu = vgpu;
1892
1893 intel_gvt_debugfs_init(gvt);
1894
1895 ret = mdev_register_parent(&gvt->parent, i915->drm.dev,
1896 &intel_vgpu_mdev_driver,
1897 gvt->mdev_types, gvt->num_types);
1898 if (ret)
1899 goto out_destroy_idle_vgpu;
1900
1901 gvt_dbg_core("gvt device initialization is done\n");
1902 return 0;
1903
1904 out_destroy_idle_vgpu:
1905 intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
1906 intel_gvt_debugfs_clean(gvt);
1907 out_clean_types:
1908 intel_gvt_clean_vgpu_types(gvt);
1909 out_clean_thread:
1910 clean_service_thread(gvt);
1911 out_clean_cmd_parser:
1912 intel_gvt_clean_cmd_parser(gvt);
1913 out_clean_sched_policy:
1914 intel_gvt_clean_sched_policy(gvt);
1915 out_clean_workload_scheduler:
1916 intel_gvt_clean_workload_scheduler(gvt);
1917 out_clean_gtt:
1918 intel_gvt_clean_gtt(gvt);
1919 out_free_firmware:
1920 intel_gvt_free_firmware(gvt);
1921 out_clean_mmio_info:
1922 intel_gvt_clean_mmio_info(gvt);
1923 out_clean_idr:
1924 idr_destroy(&gvt->vgpu_idr);
1925 kfree(gvt);
1926 i915->gvt = NULL;
1927 return ret;
1928 }
1929
intel_gvt_pm_resume(struct drm_i915_private * i915)1930 static void intel_gvt_pm_resume(struct drm_i915_private *i915)
1931 {
1932 struct intel_gvt *gvt = i915->gvt;
1933
1934 intel_gvt_restore_fence(gvt);
1935 intel_gvt_restore_mmio(gvt);
1936 intel_gvt_restore_ggtt(gvt);
1937 }
1938
1939 static const struct intel_vgpu_ops intel_gvt_vgpu_ops = {
1940 .init_device = intel_gvt_init_device,
1941 .clean_device = intel_gvt_clean_device,
1942 .pm_resume = intel_gvt_pm_resume,
1943 };
1944
kvmgt_init(void)1945 static int __init kvmgt_init(void)
1946 {
1947 int ret;
1948
1949 ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops);
1950 if (ret)
1951 return ret;
1952
1953 ret = mdev_register_driver(&intel_vgpu_mdev_driver);
1954 if (ret)
1955 intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
1956 return ret;
1957 }
1958
kvmgt_exit(void)1959 static void __exit kvmgt_exit(void)
1960 {
1961 mdev_unregister_driver(&intel_vgpu_mdev_driver);
1962 intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
1963 }
1964
1965 module_init(kvmgt_init);
1966 module_exit(kvmgt_exit);
1967
1968 MODULE_DESCRIPTION("Intel mediated pass-through framework for KVM");
1969 MODULE_LICENSE("GPL and additional rights");
1970 MODULE_AUTHOR("Intel Corporation");
1971