Lines Matching refs:vgpu
71 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
73 void (*release)(struct intel_vgpu *vgpu,
97 struct intel_vgpu *vgpu;
128 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
131 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
136 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
152 ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
180 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
184 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
193 gvt_unpin_guest_page(vgpu, gfn, size);
200 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
203 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
206 gvt_unpin_guest_page(vgpu, gfn, size);
209 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
212 struct rb_node *node = vgpu->dma_addr_cache.rb_node;
228 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
230 struct rb_node *node = vgpu->gfn_cache.rb_node;
246 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
256 new->vgpu = vgpu;
263 link = &vgpu->gfn_cache.rb_node;
274 rb_insert_color(&new->gfn_node, &vgpu->gfn_cache);
278 link = &vgpu->dma_addr_cache.rb_node;
289 rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache);
291 vgpu->nr_cache_entries++;
295 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
298 rb_erase(&entry->gfn_node, &vgpu->gfn_cache);
299 rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache);
301 vgpu->nr_cache_entries--;
304 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
310 mutex_lock(&vgpu->cache_lock);
311 node = rb_first(&vgpu->gfn_cache);
313 mutex_unlock(&vgpu->cache_lock);
317 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
318 __gvt_cache_remove_entry(vgpu, dma);
319 mutex_unlock(&vgpu->cache_lock);
323 static void gvt_cache_init(struct intel_vgpu *vgpu)
325 vgpu->gfn_cache = RB_ROOT;
326 vgpu->dma_addr_cache = RB_ROOT;
327 vgpu->nr_cache_entries = 0;
328 mutex_init(&vgpu->cache_lock);
399 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
404 void *base = vgpu->region[i].data;
408 if (pos >= vgpu->region[i].size || iswrite) {
409 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
412 count = min(count, (size_t)(vgpu->region[i].size - pos));
418 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
440 static int handle_edid_regs(struct intel_vgpu *vgpu,
462 intel_vgpu_emulate_hotplug(vgpu, true);
464 intel_vgpu_emulate_hotplug(vgpu, false);
507 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
513 struct vfio_edid_region *region = vgpu->region[i].data;
517 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
529 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
540 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
547 region = krealloc(vgpu->region,
548 (vgpu->num_regions + 1) * sizeof(*region),
553 vgpu->region = region;
554 vgpu->region[vgpu->num_regions].type = type;
555 vgpu->region[vgpu->num_regions].subtype = subtype;
556 vgpu->region[vgpu->num_regions].ops = ops;
557 vgpu->region[vgpu->num_regions].size = size;
558 vgpu->region[vgpu->num_regions].flags = flags;
559 vgpu->region[vgpu->num_regions].data = data;
560 vgpu->num_regions++;
564 int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
569 /* Each vgpu has its own opregion, although VFIO would create another
573 base = vgpu_opregion(vgpu)->va;
582 ret = intel_vgpu_register_reg(vgpu,
591 int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
593 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
609 ret = intel_vgpu_register_reg(vgpu,
623 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
628 mutex_lock(&vgpu->cache_lock);
630 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
634 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
636 __gvt_cache_remove_entry(vgpu, entry);
638 mutex_unlock(&vgpu->cache_lock);
641 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
647 mutex_lock(&vgpu->gvt->lock);
648 for_each_active_vgpu(vgpu->gvt, itr, id) {
652 if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
658 mutex_unlock(&vgpu->gvt->lock);
664 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
667 if (__kvmgt_vgpu_exist(vgpu))
670 vgpu->track_node.track_write = kvmgt_page_track_write;
671 vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region;
672 ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
673 &vgpu->track_node);
679 set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
681 debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
682 &vgpu->nr_cache_entries);
684 intel_gvt_activate_vgpu(vgpu);
689 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
693 trigger = vgpu->msi_trigger;
696 vgpu->msi_trigger = NULL;
702 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
704 intel_gvt_release_vgpu(vgpu);
706 clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
708 debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
710 kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
711 &vgpu->track_node);
713 kvmgt_protect_table_destroy(vgpu);
714 gvt_cache_destroy(vgpu);
716 WARN_ON(vgpu->nr_cache_entries);
718 vgpu->gfn_cache = RB_ROOT;
719 vgpu->dma_addr_cache = RB_ROOT;
721 intel_vgpu_release_msi_eventfd_ctx(vgpu);
724 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
729 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
731 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
736 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
751 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
754 u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
758 ret = intel_vgpu_emulate_mmio_write(vgpu,
761 ret = intel_vgpu_emulate_mmio_read(vgpu,
766 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
768 return off >= vgpu_aperture_offset(vgpu) &&
769 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
772 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
777 if (!intel_vgpu_in_aperture(vgpu, off) ||
778 !intel_vgpu_in_aperture(vgpu, off + count)) {
783 aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
799 static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf,
807 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) {
815 ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
818 ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
822 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
826 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
836 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
840 return vgpu->region[index].ops->rw(vgpu, buf, count,
847 static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos)
850 struct intel_gvt *gvt = vgpu->gvt;
858 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
868 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
877 gtt_entry(vgpu, ppos)) {
880 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
892 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
904 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
916 ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos,
943 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
952 gtt_entry(vgpu, ppos)) {
958 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
970 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
982 ret = intel_vgpu_rw(vgpu, (char *)&val,
994 ret = intel_vgpu_rw(vgpu, &val, sizeof(val),
1016 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1040 if (!intel_vgpu_in_aperture(vgpu, req_start))
1043 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1046 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1051 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1059 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1067 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1074 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1081 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1095 vgpu->msi_trigger = trigger;
1097 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1102 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1106 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1140 return func(vgpu, index, start, count, flags, data);
1146 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1149 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1165 vgpu->num_regions;
1191 info.size = vgpu->gvt->device_info.cfg_space_size;
1197 info.size = vgpu->cfg_space.bar[info.index].size;
1217 info.size = gvt_aperture_sz(vgpu->gvt);
1229 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1230 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1256 vgpu->num_regions)
1261 vgpu->num_regions);
1267 info.size = vgpu->region[i].size;
1268 info.flags = vgpu->region[i].flags;
1270 cap_type.type = vgpu->region[i].type;
1271 cap_type.subtype = vgpu->region[i].subtype;
1343 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1365 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1381 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1387 intel_gvt_reset_vgpu(vgpu);
1400 ret = intel_vgpu_query_plane(vgpu, &dmabuf);
1411 return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
1421 struct intel_vgpu *vgpu = dev_get_drvdata(dev);
1423 return sprintf(buf, "%d\n", vgpu->id);
1446 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1451 vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt;
1452 ret = intel_gvt_create_vgpu(vgpu, type->conf);
1456 kvmgt_protect_table_init(vgpu);
1457 gvt_cache_init(vgpu);
1464 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1466 intel_gvt_destroy_vgpu(vgpu);
1487 struct intel_vgpu *vgpu;
1490 vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev,
1492 if (IS_ERR(vgpu)) {
1493 gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
1494 return PTR_ERR(vgpu);
1497 dev_set_drvdata(&mdev->dev, vgpu);
1498 ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
1507 vfio_put_device(&vgpu->vfio_device);
1513 struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
1515 vfio_unregister_group_dev(&vgpu->vfio_device);
1516 vfio_put_device(&vgpu->vfio_device);
1621 void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
1625 if (!vgpu->region)
1628 for (i = 0; i < vgpu->num_regions; i++)
1629 if (vgpu->region[i].ops->release)
1630 vgpu->region[i].ops->release(vgpu,
1631 &vgpu->region[i]);
1632 vgpu->num_regions = 0;
1633 kfree(vgpu->region);
1634 vgpu->region = NULL;
1637 int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
1643 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1646 mutex_lock(&vgpu->cache_lock);
1648 entry = __gvt_cache_find_gfn(vgpu, gfn);
1650 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1654 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1659 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1660 __gvt_cache_remove_entry(vgpu, entry);
1662 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1666 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1674 mutex_unlock(&vgpu->cache_lock);
1678 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1680 mutex_unlock(&vgpu->cache_lock);
1684 int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
1689 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1692 mutex_lock(&vgpu->cache_lock);
1693 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
1698 mutex_unlock(&vgpu->cache_lock);
1707 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1709 __gvt_cache_remove_entry(entry->vgpu, entry);
1712 void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
1717 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1720 mutex_lock(&vgpu->cache_lock);
1721 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
1724 mutex_unlock(&vgpu->cache_lock);
1746 struct intel_vgpu *vgpu;
1750 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
1753 if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
1754 intel_vgpu_emulate_vblank(vgpu);
1854 struct intel_vgpu *vgpu;
1913 vgpu = intel_gvt_create_idle_vgpu(gvt);
1914 if (IS_ERR(vgpu)) {
1915 ret = PTR_ERR(vgpu);
1916 gvt_err("failed to create idle vgpu\n");
1919 gvt->idle_vgpu = vgpu;