Lines Matching refs:group

26 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
31 mutex_lock(&group->device_lock);
32 list_for_each_entry(it, &group->device_list, group_next) {
50 mutex_unlock(&group->device_lock);
58 static bool vfio_group_has_iommu(struct vfio_group *group)
60 lockdep_assert_held(&group->group_lock);
65 WARN_ON(!group->container != !group->container_users);
67 return group->container || group->iommufd;
73 * the group, we know that still exists, therefore the only valid
76 static int vfio_group_ioctl_unset_container(struct vfio_group *group)
80 mutex_lock(&group->group_lock);
81 if (!vfio_group_has_iommu(group)) {
85 if (group->container) {
86 if (group->container_users != 1) {
90 vfio_group_detach_container(group);
92 if (group->iommufd) {
93 iommufd_ctx_put(group->iommufd);
94 group->iommufd = NULL;
98 mutex_unlock(&group->group_lock);
102 static int vfio_group_ioctl_set_container(struct vfio_group *group,
118 mutex_lock(&group->group_lock);
119 if (vfio_group_has_iommu(group)) {
123 if (!group->iommu_group) {
130 ret = vfio_container_attach_group(container, group);
137 group->type == VFIO_NO_IOMMU)
147 group->iommufd = iommufd;
155 mutex_unlock(&group->group_lock);
162 spin_lock(&device->group->kvm_ref_lock);
163 vfio_device_get_kvm_safe(device, device->group->kvm);
164 spin_unlock(&device->group->kvm_ref_lock);
172 mutex_lock(&device->group->group_lock);
173 if (!vfio_group_has_iommu(device->group)) {
182 * associated with the group (if there is one) and obtain a reference
189 df->iommufd = device->group->iommufd;
221 mutex_unlock(&device->group->group_lock);
232 mutex_unlock(&device->group->group_lock);
240 mutex_lock(&device->group->group_lock);
250 mutex_unlock(&device->group->group_lock);
265 df->group = device->group;
296 if (device->group->type == VFIO_NO_IOMMU)
313 static int vfio_group_ioctl_get_device_fd(struct vfio_group *group,
326 device = vfio_device_get_from_name(group, buf);
353 static int vfio_group_ioctl_get_status(struct vfio_group *group,
367 mutex_lock(&group->group_lock);
368 if (!group->iommu_group) {
369 mutex_unlock(&group->group_lock);
380 if (vfio_group_has_iommu(group))
383 else if (!iommu_group_dma_owner_claimed(group->iommu_group))
385 mutex_unlock(&group->group_lock);
395 struct vfio_group *group = filep->private_data;
400 return vfio_group_ioctl_get_device_fd(group, uarg);
402 return vfio_group_ioctl_get_status(group, uarg);
404 return vfio_group_ioctl_set_container(group, uarg);
406 return vfio_group_ioctl_unset_container(group);
414 struct vfio_group *group = device->group;
417 mutex_lock(&group->group_lock);
418 if (group->opened_file) {
423 group->cdev_device_open_cnt++;
426 mutex_unlock(&group->group_lock);
432 struct vfio_group *group = device->group;
434 mutex_lock(&group->group_lock);
435 group->cdev_device_open_cnt--;
436 mutex_unlock(&group->group_lock);
441 struct vfio_group *group =
445 mutex_lock(&group->group_lock);
449 * will be stable at 0 under the group rwsem
451 if (refcount_read(&group->drivers) == 0) {
456 if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
461 if (group->cdev_device_open_cnt) {
467 * Do we need multiple instances of the group open? Seems not.
469 if (group->opened_file) {
473 group->opened_file = filep;
474 filep->private_data = group;
477 mutex_unlock(&group->group_lock);
483 struct vfio_group *group = filep->private_data;
487 mutex_lock(&group->group_lock);
489 * Device FDs hold a group file reference, therefore the group release
492 WARN_ON(group->notifier.head);
493 if (group->container)
494 vfio_group_detach_container(group);
495 if (group->iommufd) {
496 iommufd_ctx_put(group->iommufd);
497 group->iommufd = NULL;
499 group->opened_file = NULL;
500 mutex_unlock(&group->group_lock);
518 struct vfio_group *group;
523 * group->iommu_group from the vfio.group_list cannot be NULL
526 list_for_each_entry(group, &vfio.group_list, vfio_next) {
527 if (group->iommu_group == iommu_group)
528 return group;
535 struct vfio_group *group = container_of(dev, struct vfio_group, dev);
537 mutex_destroy(&group->device_lock);
538 mutex_destroy(&group->group_lock);
539 WARN_ON(group->iommu_group);
540 WARN_ON(group->cdev_device_open_cnt);
541 ida_free(&vfio.group_ida, MINOR(group->dev.devt));
542 kfree(group);
548 struct vfio_group *group;
551 group = kzalloc(sizeof(*group), GFP_KERNEL);
552 if (!group)
557 kfree(group);
561 device_initialize(&group->dev);
562 group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor);
563 group->dev.class = vfio.class;
564 group->dev.release = vfio_group_release;
565 cdev_init(&group->cdev, &vfio_group_fops);
566 group->cdev.owner = THIS_MODULE;
568 refcount_set(&group->drivers, 1);
569 mutex_init(&group->group_lock);
570 spin_lock_init(&group->kvm_ref_lock);
571 INIT_LIST_HEAD(&group->device_list);
572 mutex_init(&group->device_lock);
573 group->iommu_group = iommu_group;
576 group->type = type;
577 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
579 return group;
585 struct vfio_group *group;
591 group = vfio_group_alloc(iommu_group, type);
592 if (IS_ERR(group))
593 return group;
595 err = dev_set_name(&group->dev, "%s%d",
596 group->type == VFIO_NO_IOMMU ? "noiommu-" : "",
603 err = cdev_device_add(&group->cdev, &group->dev);
609 list_add(&group->vfio_next, &vfio.group_list);
611 return group;
614 put_device(&group->dev);
622 struct vfio_group *group;
637 group = vfio_create_group(iommu_group, type);
639 if (IS_ERR(group)) {
640 ret = PTR_ERR(group);
644 return group;
653 static bool vfio_group_has_device(struct vfio_group *group, struct device *dev)
657 mutex_lock(&group->device_lock);
658 list_for_each_entry(device, &group->device_list, group_next) {
660 mutex_unlock(&group->device_lock);
664 mutex_unlock(&group->device_lock);
671 struct vfio_group *group;
676 * With noiommu enabled, create an IOMMU group for devices that
681 group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU);
682 if (!IS_ERR(group)) {
684 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
686 return group;
693 group = vfio_group_find_from_iommu(iommu_group);
694 if (group) {
695 if (WARN_ON(vfio_group_has_device(group, dev)))
696 group = ERR_PTR(-EINVAL);
698 refcount_inc(&group->drivers);
700 group = vfio_create_group(iommu_group, VFIO_IOMMU);
706 return group;
712 struct vfio_group *group;
715 group = vfio_group_find_or_alloc(device->dev);
717 group = vfio_noiommu_group_alloc(device->dev, type);
719 if (IS_ERR(group))
720 return PTR_ERR(group);
722 /* Our reference on group is moved to the device */
723 device->group = group;
729 struct vfio_group *group = device->group;
732 if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
736 if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock))
738 list_del(&group->vfio_next);
741 * We could concurrently probe another driver in the group that might
746 cdev_device_del(&group->cdev, &group->dev);
748 mutex_lock(&group->group_lock);
753 * properly hold the group reference.
755 WARN_ON(!list_empty(&group->device_list));
756 WARN_ON(group->notifier.head);
759 * Revoke all users of group->iommu_group. At this point we know there
763 if (group->container)
764 vfio_group_detach_container(group);
765 iommu_group = group->iommu_group;
766 group->iommu_group = NULL;
767 mutex_unlock(&group->group_lock);
771 put_device(&group->dev);
776 mutex_lock(&device->group->device_lock);
777 list_add(&device->group_next, &device->group->device_list);
778 mutex_unlock(&device->group->device_lock);
783 mutex_lock(&device->group->device_lock);
785 mutex_unlock(&device->group->device_lock);
790 struct vfio_group *group = device->group;
793 lockdep_assert_held(&group->group_lock);
795 if (WARN_ON(!group->container))
798 ret = vfio_group_use_container(group);
807 struct vfio_group *group = device->group;
809 lockdep_assert_held(&group->group_lock);
811 if (WARN_ON(!group->container))
815 vfio_group_unuse_container(group);
820 return device->group->container;
825 struct vfio_group *group = file->private_data;
829 return group;
833 * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
834 * @file: VFIO group file
837 * returns a reference on the group. This function is deprecated, only the SPAPR
842 struct vfio_group *group = vfio_group_from_file(file);
848 if (!group)
851 mutex_lock(&group->group_lock);
852 if (group->iommu_group) {
853 iommu_group = group->iommu_group;
856 mutex_unlock(&group->group_lock);
862 * vfio_file_is_group - True if the file is a vfio group file
863 * @file: VFIO group file
871 bool vfio_group_enforced_coherent(struct vfio_group *group)
879 * is set then the iommu_domain eventually attached to the device/group
882 mutex_lock(&group->device_lock);
883 list_for_each_entry(device, &group->device_list, group_next) {
890 mutex_unlock(&group->device_lock);
894 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
896 spin_lock(&group->kvm_ref_lock);
897 group->kvm = kvm;
898 spin_unlock(&group->kvm_ref_lock);
910 struct vfio_group *group = vfio_group_from_file(file);
912 if (!group)
915 return group == device->group;