Lines Matching refs:group

72 #define for_each_group_device(group, pos) \  argument
73 list_for_each_entry(pos, &(group)->devices, list)
77 ssize_t (*show)(struct iommu_group *group, char *buf);
78 ssize_t (*store)(struct iommu_group *group,
97 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type);
101 struct iommu_group *group);
107 static int __iommu_device_set_domain(struct iommu_group *group,
111 static int __iommu_group_set_domain_internal(struct iommu_group *group,
114 static int __iommu_group_set_domain(struct iommu_group *group, in __iommu_group_set_domain() argument
117 return __iommu_group_set_domain_internal(group, new_domain, 0); in __iommu_group_set_domain()
119 static void __iommu_group_set_domain_nofail(struct iommu_group *group, in __iommu_group_set_domain_nofail() argument
123 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); in __iommu_group_set_domain_nofail()
126 static int iommu_setup_default_domain(struct iommu_group *group,
130 static ssize_t iommu_group_store_type(struct iommu_group *group,
132 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
134 static void __iommu_group_free_device(struct iommu_group *group,
405 struct iommu_group *group; in iommu_init_device() local
427 group = ops->device_group(dev); in iommu_init_device()
428 if (WARN_ON_ONCE(group == NULL)) in iommu_init_device()
429 group = ERR_PTR(-EINVAL); in iommu_init_device()
430 if (IS_ERR(group)) { in iommu_init_device()
431 ret = PTR_ERR(group); in iommu_init_device()
434 dev->iommu_group = group; in iommu_init_device()
456 struct iommu_group *group = dev->iommu_group; in iommu_deinit_device() local
459 lockdep_assert_held(&group->mutex); in iommu_deinit_device()
490 if (list_empty(&group->devices)) { in iommu_deinit_device()
491 if (group->default_domain) { in iommu_deinit_device()
492 iommu_domain_free(group->default_domain); in iommu_deinit_device()
493 group->default_domain = NULL; in iommu_deinit_device()
495 if (group->blocking_domain) { in iommu_deinit_device()
496 iommu_domain_free(group->blocking_domain); in iommu_deinit_device()
497 group->blocking_domain = NULL; in iommu_deinit_device()
499 group->domain = NULL; in iommu_deinit_device()
513 struct iommu_group *group; in __iommu_probe_device() local
545 group = dev->iommu_group; in __iommu_probe_device()
546 gdev = iommu_group_alloc_device(group, dev); in __iommu_probe_device()
547 mutex_lock(&group->mutex); in __iommu_probe_device()
557 list_add_tail(&gdev->list, &group->devices); in __iommu_probe_device()
558 WARN_ON(group->default_domain && !group->domain); in __iommu_probe_device()
559 if (group->default_domain) in __iommu_probe_device()
560 iommu_create_device_direct_mappings(group->default_domain, dev); in __iommu_probe_device()
561 if (group->domain) { in __iommu_probe_device()
562 ret = __iommu_device_set_domain(group, dev, group->domain, 0); in __iommu_probe_device()
565 } else if (!group->default_domain && !group_list) { in __iommu_probe_device()
566 ret = iommu_setup_default_domain(group, 0); in __iommu_probe_device()
569 } else if (!group->default_domain) { in __iommu_probe_device()
575 if (list_empty(&group->entry)) in __iommu_probe_device()
576 list_add_tail(&group->entry, group_list); in __iommu_probe_device()
579 if (group->default_domain) in __iommu_probe_device()
582 mutex_unlock(&group->mutex); in __iommu_probe_device()
588 __iommu_group_free_device(group, gdev); in __iommu_probe_device()
591 mutex_unlock(&group->mutex); in __iommu_probe_device()
592 iommu_group_put(group); in __iommu_probe_device()
615 static void __iommu_group_free_device(struct iommu_group *group, in __iommu_group_free_device() argument
620 sysfs_remove_link(group->devices_kobj, grp_dev->name); in __iommu_group_free_device()
623 trace_remove_device_from_group(group->id, dev); in __iommu_group_free_device()
630 if (list_empty(&group->devices)) in __iommu_group_free_device()
631 WARN_ON(group->owner_cnt || in __iommu_group_free_device()
632 group->domain != group->default_domain); in __iommu_group_free_device()
641 struct iommu_group *group = dev->iommu_group; in __iommu_group_remove_device() local
644 mutex_lock(&group->mutex); in __iommu_group_remove_device()
645 for_each_group_device(group, device) { in __iommu_group_remove_device()
650 __iommu_group_free_device(group, device); in __iommu_group_remove_device()
657 mutex_unlock(&group->mutex); in __iommu_group_remove_device()
663 iommu_group_put(group); in __iommu_group_remove_device()
668 struct iommu_group *group = dev->iommu_group; in iommu_release_device() local
670 if (group) in iommu_release_device()
717 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_show() local
721 ret = attr->show(group, buf); in iommu_group_attr_show()
730 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_store() local
734 ret = attr->store(group, buf, count); in iommu_group_attr_store()
743 static int iommu_group_create_file(struct iommu_group *group, in iommu_group_create_file() argument
746 return sysfs_create_file(&group->kobj, &attr->attr); in iommu_group_create_file()
749 static void iommu_group_remove_file(struct iommu_group *group, in iommu_group_remove_file() argument
752 sysfs_remove_file(&group->kobj, &attr->attr); in iommu_group_remove_file()
755 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) in iommu_group_show_name() argument
757 return sysfs_emit(buf, "%s\n", group->name); in iommu_group_show_name()
836 int iommu_get_group_resv_regions(struct iommu_group *group, in iommu_get_group_resv_regions() argument
842 mutex_lock(&group->mutex); in iommu_get_group_resv_regions()
843 for_each_group_device(group, device) { in iommu_get_group_resv_regions()
860 mutex_unlock(&group->mutex); in iommu_get_group_resv_regions()
865 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, in iommu_group_show_resv_regions() argument
873 iommu_get_group_resv_regions(group, &group_resv_regions); in iommu_group_show_resv_regions()
887 static ssize_t iommu_group_show_type(struct iommu_group *group, in iommu_group_show_type() argument
892 mutex_lock(&group->mutex); in iommu_group_show_type()
893 if (group->default_domain) { in iommu_group_show_type()
894 switch (group->default_domain->type) { in iommu_group_show_type()
912 mutex_unlock(&group->mutex); in iommu_group_show_type()
927 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_release() local
929 pr_debug("Releasing group %d\n", group->id); in iommu_group_release()
931 if (group->iommu_data_release) in iommu_group_release()
932 group->iommu_data_release(group->iommu_data); in iommu_group_release()
934 ida_free(&iommu_group_ida, group->id); in iommu_group_release()
937 WARN_ON(group->default_domain); in iommu_group_release()
938 WARN_ON(group->blocking_domain); in iommu_group_release()
940 kfree(group->name); in iommu_group_release()
941 kfree(group); in iommu_group_release()
962 struct iommu_group *group; in iommu_group_alloc() local
965 group = kzalloc(sizeof(*group), GFP_KERNEL); in iommu_group_alloc()
966 if (!group) in iommu_group_alloc()
969 group->kobj.kset = iommu_group_kset; in iommu_group_alloc()
970 mutex_init(&group->mutex); in iommu_group_alloc()
971 INIT_LIST_HEAD(&group->devices); in iommu_group_alloc()
972 INIT_LIST_HEAD(&group->entry); in iommu_group_alloc()
973 xa_init(&group->pasid_array); in iommu_group_alloc()
977 kfree(group); in iommu_group_alloc()
980 group->id = ret; in iommu_group_alloc()
982 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, in iommu_group_alloc()
983 NULL, "%d", group->id); in iommu_group_alloc()
985 kobject_put(&group->kobj); in iommu_group_alloc()
989 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); in iommu_group_alloc()
990 if (!group->devices_kobj) { in iommu_group_alloc()
991 kobject_put(&group->kobj); /* triggers .release & free */ in iommu_group_alloc()
1000 kobject_put(&group->kobj); in iommu_group_alloc()
1002 ret = iommu_group_create_file(group, in iommu_group_alloc()
1005 kobject_put(group->devices_kobj); in iommu_group_alloc()
1009 ret = iommu_group_create_file(group, &iommu_group_attr_type); in iommu_group_alloc()
1011 kobject_put(group->devices_kobj); in iommu_group_alloc()
1015 pr_debug("Allocated group %d\n", group->id); in iommu_group_alloc()
1017 return group; in iommu_group_alloc()
1029 void *iommu_group_get_iommudata(struct iommu_group *group) in iommu_group_get_iommudata() argument
1031 return group->iommu_data; in iommu_group_get_iommudata()
1045 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, in iommu_group_set_iommudata() argument
1048 group->iommu_data = iommu_data; in iommu_group_set_iommudata()
1049 group->iommu_data_release = release; in iommu_group_set_iommudata()
1061 int iommu_group_set_name(struct iommu_group *group, const char *name) in iommu_group_set_name() argument
1065 if (group->name) { in iommu_group_set_name()
1066 iommu_group_remove_file(group, &iommu_group_attr_name); in iommu_group_set_name()
1067 kfree(group->name); in iommu_group_set_name()
1068 group->name = NULL; in iommu_group_set_name()
1073 group->name = kstrdup(name, GFP_KERNEL); in iommu_group_set_name()
1074 if (!group->name) in iommu_group_set_name()
1077 ret = iommu_group_create_file(group, &iommu_group_attr_name); in iommu_group_set_name()
1079 kfree(group->name); in iommu_group_set_name()
1080 group->name = NULL; in iommu_group_set_name()
1155 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, in iommu_group_alloc_device() argument
1167 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); in iommu_group_alloc_device()
1178 ret = sysfs_create_link_nowarn(group->devices_kobj, in iommu_group_alloc_device()
1194 trace_add_device_to_group(group->id, dev); in iommu_group_alloc_device()
1196 dev_info(dev, "Adding to iommu group %d\n", group->id); in iommu_group_alloc_device()
1206 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); in iommu_group_alloc_device()
1218 int iommu_group_add_device(struct iommu_group *group, struct device *dev) in iommu_group_add_device() argument
1222 gdev = iommu_group_alloc_device(group, dev); in iommu_group_add_device()
1226 iommu_group_ref_get(group); in iommu_group_add_device()
1227 dev->iommu_group = group; in iommu_group_add_device()
1229 mutex_lock(&group->mutex); in iommu_group_add_device()
1230 list_add_tail(&gdev->list, &group->devices); in iommu_group_add_device()
1231 mutex_unlock(&group->mutex); in iommu_group_add_device()
1245 struct iommu_group *group = dev->iommu_group; in iommu_group_remove_device() local
1247 if (!group) in iommu_group_remove_device()
1250 dev_info(dev, "Removing from iommu group %d\n", group->id); in iommu_group_remove_device()
1268 struct iommu_group *group = dev->iommu_group; in iommu_group_mutex_assert() local
1270 lockdep_assert_held(&group->mutex); in iommu_group_mutex_assert()
1275 static struct device *iommu_group_first_dev(struct iommu_group *group) in iommu_group_first_dev() argument
1277 lockdep_assert_held(&group->mutex); in iommu_group_first_dev()
1278 return list_first_entry(&group->devices, struct group_device, list)->dev; in iommu_group_first_dev()
1292 int iommu_group_for_each_dev(struct iommu_group *group, void *data, in iommu_group_for_each_dev() argument
1298 mutex_lock(&group->mutex); in iommu_group_for_each_dev()
1299 for_each_group_device(group, device) { in iommu_group_for_each_dev()
1304 mutex_unlock(&group->mutex); in iommu_group_for_each_dev()
1320 struct iommu_group *group = dev->iommu_group; in iommu_group_get() local
1322 if (group) in iommu_group_get()
1323 kobject_get(group->devices_kobj); in iommu_group_get()
1325 return group; in iommu_group_get()
1336 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) in iommu_group_ref_get() argument
1338 kobject_get(group->devices_kobj); in iommu_group_ref_get()
1339 return group; in iommu_group_ref_get()
1350 void iommu_group_put(struct iommu_group *group) in iommu_group_put() argument
1352 if (group) in iommu_group_put()
1353 kobject_put(group->devices_kobj); in iommu_group_put()
1363 int iommu_group_id(struct iommu_group *group) in iommu_group_id() argument
1365 return group->id; in iommu_group_id()
1392 struct iommu_group *group; in get_pci_function_alias_group() local
1403 group = get_pci_alias_group(tmp, devfns); in get_pci_function_alias_group()
1404 if (group) { in get_pci_function_alias_group()
1406 return group; in get_pci_function_alias_group()
1426 struct iommu_group *group; in get_pci_alias_group() local
1431 group = iommu_group_get(&pdev->dev); in get_pci_alias_group()
1432 if (group) in get_pci_alias_group()
1433 return group; in get_pci_alias_group()
1441 group = get_pci_alias_group(tmp, devfns); in get_pci_alias_group()
1442 if (group) { in get_pci_alias_group()
1444 return group; in get_pci_alias_group()
1447 group = get_pci_function_alias_group(tmp, devfns); in get_pci_alias_group()
1448 if (group) { in get_pci_alias_group()
1450 return group; in get_pci_alias_group()
1460 struct iommu_group *group; member
1472 data->group = iommu_group_get(&pdev->dev); in get_pci_alias_or_group()
1474 return data->group != NULL; in get_pci_alias_or_group()
1497 struct iommu_group *group; in generic_single_device_group() local
1499 group = iommu_group_alloc(); in generic_single_device_group()
1500 if (IS_ERR(group)) in generic_single_device_group()
1501 return group; in generic_single_device_group()
1502 iommu->singleton_group = group; in generic_single_device_group()
1517 struct iommu_group *group = NULL; in pci_device_group() local
1530 return data.group; in pci_device_group()
1549 group = iommu_group_get(&pdev->dev); in pci_device_group()
1550 if (group) in pci_device_group()
1551 return group; in pci_device_group()
1558 group = get_pci_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
1559 if (group) in pci_device_group()
1560 return group; in pci_device_group()
1567 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
1568 if (group) in pci_device_group()
1569 return group; in pci_device_group()
1580 struct iommu_group *group; in fsl_mc_device_group() local
1582 group = iommu_group_get(cont_dev); in fsl_mc_device_group()
1583 if (!group) in fsl_mc_device_group()
1584 group = iommu_group_alloc(); in fsl_mc_device_group()
1585 return group; in fsl_mc_device_group()
1590 __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) in __iommu_group_alloc_default_domain() argument
1592 if (group->default_domain && group->default_domain->type == req_type) in __iommu_group_alloc_default_domain()
1593 return group->default_domain; in __iommu_group_alloc_default_domain()
1594 return __iommu_group_domain_alloc(group, req_type); in __iommu_group_alloc_default_domain()
1602 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) in iommu_group_alloc_default_domain() argument
1604 const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); in iommu_group_alloc_default_domain()
1607 lockdep_assert_held(&group->mutex); in iommu_group_alloc_default_domain()
1621 return __iommu_group_alloc_default_domain(group, req_type); in iommu_group_alloc_default_domain()
1624 dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); in iommu_group_alloc_default_domain()
1631 dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); in iommu_group_alloc_default_domain()
1636 iommu_def_domain_type, group->name); in iommu_group_alloc_default_domain()
1640 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) in iommu_group_default_domain() argument
1642 return group->default_domain; in iommu_group_default_domain()
1681 static int iommu_get_def_domain_type(struct iommu_group *group, in iommu_get_def_domain_type() argument
1708 group->id); in iommu_get_def_domain_type()
1723 static int iommu_get_default_domain_type(struct iommu_group *group, in iommu_get_default_domain_type() argument
1730 lockdep_assert_held(&group->mutex); in iommu_get_default_domain_type()
1744 for_each_group_device(group, gdev) { in iommu_get_default_domain_type()
1745 driver_type = iommu_get_def_domain_type(group, gdev->dev, in iommu_get_default_domain_type()
1776 group->id, iommu_domain_type_str(driver_type)); in iommu_get_default_domain_type()
1800 struct iommu_group *group, *next; in bus_iommu_probe() local
1808 list_for_each_entry_safe(group, next, &group_list, entry) { in bus_iommu_probe()
1811 mutex_lock(&group->mutex); in bus_iommu_probe()
1814 list_del_init(&group->entry); in bus_iommu_probe()
1821 ret = iommu_setup_default_domain(group, 0); in bus_iommu_probe()
1823 mutex_unlock(&group->mutex); in bus_iommu_probe()
1826 for_each_group_device(group, gdev) in bus_iommu_probe()
1828 mutex_unlock(&group->mutex); in bus_iommu_probe()
1836 for_each_group_device(group, gdev) in bus_iommu_probe()
1901 bool iommu_group_has_isolated_msi(struct iommu_group *group) in iommu_group_has_isolated_msi() argument
1906 mutex_lock(&group->mutex); in iommu_group_has_isolated_msi()
1907 for_each_group_device(group, group_dev) in iommu_group_has_isolated_msi()
1909 mutex_unlock(&group->mutex); in iommu_group_has_isolated_msi()
1990 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) in __iommu_group_domain_alloc() argument
1992 struct device *dev = iommu_group_first_dev(group); in __iommu_group_domain_alloc()
2063 static void __iommu_group_set_core_domain(struct iommu_group *group) in __iommu_group_set_core_domain() argument
2067 if (group->owner) in __iommu_group_set_core_domain()
2068 new_domain = group->blocking_domain; in __iommu_group_set_core_domain()
2070 new_domain = group->default_domain; in __iommu_group_set_core_domain()
2072 __iommu_group_set_domain_nofail(group, new_domain); in __iommu_group_set_core_domain()
2106 struct iommu_group *group = dev->iommu_group; in iommu_attach_device() local
2109 if (!group) in iommu_attach_device()
2116 mutex_lock(&group->mutex); in iommu_attach_device()
2118 if (list_count_nodes(&group->devices) != 1) in iommu_attach_device()
2121 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
2124 mutex_unlock(&group->mutex); in iommu_attach_device()
2140 struct iommu_group *group = dev->iommu_group; in iommu_detach_device() local
2142 if (!group) in iommu_detach_device()
2145 mutex_lock(&group->mutex); in iommu_detach_device()
2146 if (WARN_ON(domain != group->domain) || in iommu_detach_device()
2147 WARN_ON(list_count_nodes(&group->devices) != 1)) in iommu_detach_device()
2149 __iommu_group_set_core_domain(group); in iommu_detach_device()
2152 mutex_unlock(&group->mutex); in iommu_detach_device()
2159 struct iommu_group *group = dev->iommu_group; in iommu_get_domain_for_dev() local
2161 if (!group) in iommu_get_domain_for_dev()
2164 return group->domain; in iommu_get_domain_for_dev()
2178 struct iommu_group *group) in __iommu_attach_group() argument
2182 if (group->domain && group->domain != group->default_domain && in __iommu_attach_group()
2183 group->domain != group->blocking_domain) in __iommu_attach_group()
2186 dev = iommu_group_first_dev(group); in __iommu_attach_group()
2190 return __iommu_group_set_domain(group, domain); in __iommu_attach_group()
2205 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
2209 mutex_lock(&group->mutex); in iommu_attach_group()
2210 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
2211 mutex_unlock(&group->mutex); in iommu_attach_group()
2228 int iommu_group_replace_domain(struct iommu_group *group, in iommu_group_replace_domain() argument
2236 mutex_lock(&group->mutex); in iommu_group_replace_domain()
2237 ret = __iommu_group_set_domain(group, new_domain); in iommu_group_replace_domain()
2238 mutex_unlock(&group->mutex); in iommu_group_replace_domain()
2243 static int __iommu_device_set_domain(struct iommu_group *group, in __iommu_device_set_domain() argument
2259 new_domain == group->blocking_domain)) { in __iommu_device_set_domain()
2266 if (new_domain == group->default_domain) in __iommu_device_set_domain()
2279 group->blocking_domain && in __iommu_device_set_domain()
2280 group->blocking_domain != new_domain) in __iommu_device_set_domain()
2281 __iommu_attach_device(group->blocking_domain, dev); in __iommu_device_set_domain()
2302 static int __iommu_group_set_domain_internal(struct iommu_group *group, in __iommu_group_set_domain_internal() argument
2311 lockdep_assert_held(&group->mutex); in __iommu_group_set_domain_internal()
2313 if (group->domain == new_domain) in __iommu_group_set_domain_internal()
2326 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2327 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, in __iommu_group_set_domain_internal()
2343 group->domain = new_domain; in __iommu_group_set_domain_internal()
2352 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2358 if (group->domain) in __iommu_group_set_domain_internal()
2360 group, gdev->dev, group->domain, in __iommu_group_set_domain_internal()
2368 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
2370 mutex_lock(&group->mutex); in iommu_detach_group()
2371 __iommu_group_set_core_domain(group); in iommu_detach_group()
2372 mutex_unlock(&group->mutex); in iommu_detach_group()
2945 static int iommu_setup_default_domain(struct iommu_group *group, in iommu_setup_default_domain() argument
2948 struct iommu_domain *old_dom = group->default_domain; in iommu_setup_default_domain()
2955 lockdep_assert_held(&group->mutex); in iommu_setup_default_domain()
2957 req_type = iommu_get_default_domain_type(group, target_type); in iommu_setup_default_domain()
2961 dom = iommu_group_alloc_default_domain(group, req_type); in iommu_setup_default_domain()
2965 if (group->default_domain == dom) in iommu_setup_default_domain()
2974 for_each_group_device(group, gdev) { in iommu_setup_default_domain()
2984 group->default_domain = dom; in iommu_setup_default_domain()
2985 if (!group->domain) { in iommu_setup_default_domain()
2993 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); in iommu_setup_default_domain()
2997 ret = __iommu_group_set_domain(group, dom); in iommu_setup_default_domain()
3009 for_each_group_device(group, gdev) { in iommu_setup_default_domain()
3024 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); in iommu_setup_default_domain()
3028 group->default_domain = old_dom; in iommu_setup_default_domain()
3042 static ssize_t iommu_group_store_type(struct iommu_group *group, in iommu_group_store_type() argument
3051 if (WARN_ON(!group) || !group->default_domain) in iommu_group_store_type()
3065 mutex_lock(&group->mutex); in iommu_group_store_type()
3068 group->default_domain->type == IOMMU_DOMAIN_DMA) { in iommu_group_store_type()
3069 ret = iommu_dma_init_fq(group->default_domain); in iommu_group_store_type()
3073 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; in iommu_group_store_type()
3079 if (list_empty(&group->devices) || group->owner_cnt) { in iommu_group_store_type()
3084 ret = iommu_setup_default_domain(group, req_type); in iommu_group_store_type()
3089 for_each_group_device(group, gdev) in iommu_group_store_type()
3093 mutex_unlock(&group->mutex); in iommu_group_store_type()
3108 struct iommu_group *group = dev->iommu_group; in iommu_device_use_default_domain() local
3111 if (!group) in iommu_device_use_default_domain()
3114 mutex_lock(&group->mutex); in iommu_device_use_default_domain()
3115 if (group->owner_cnt) { in iommu_device_use_default_domain()
3116 if (group->domain != group->default_domain || group->owner || in iommu_device_use_default_domain()
3117 !xa_empty(&group->pasid_array)) { in iommu_device_use_default_domain()
3123 group->owner_cnt++; in iommu_device_use_default_domain()
3126 mutex_unlock(&group->mutex); in iommu_device_use_default_domain()
3141 struct iommu_group *group = dev->iommu_group; in iommu_device_unuse_default_domain() local
3143 if (!group) in iommu_device_unuse_default_domain()
3146 mutex_lock(&group->mutex); in iommu_device_unuse_default_domain()
3147 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) in iommu_device_unuse_default_domain()
3148 group->owner_cnt--; in iommu_device_unuse_default_domain()
3150 mutex_unlock(&group->mutex); in iommu_device_unuse_default_domain()
3153 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) in __iommu_group_alloc_blocking_domain() argument
3157 if (group->blocking_domain) in __iommu_group_alloc_blocking_domain()
3160 domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); in __iommu_group_alloc_blocking_domain()
3166 domain = __iommu_group_domain_alloc(group, in __iommu_group_alloc_blocking_domain()
3171 group->blocking_domain = domain; in __iommu_group_alloc_blocking_domain()
3175 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) in __iommu_take_dma_ownership() argument
3179 if ((group->domain && group->domain != group->default_domain) || in __iommu_take_dma_ownership()
3180 !xa_empty(&group->pasid_array)) in __iommu_take_dma_ownership()
3183 ret = __iommu_group_alloc_blocking_domain(group); in __iommu_take_dma_ownership()
3186 ret = __iommu_group_set_domain(group, group->blocking_domain); in __iommu_take_dma_ownership()
3190 group->owner = owner; in __iommu_take_dma_ownership()
3191 group->owner_cnt++; in __iommu_take_dma_ownership()
3204 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) in iommu_group_claim_dma_owner() argument
3211 mutex_lock(&group->mutex); in iommu_group_claim_dma_owner()
3212 if (group->owner_cnt) { in iommu_group_claim_dma_owner()
3217 ret = __iommu_take_dma_ownership(group, owner); in iommu_group_claim_dma_owner()
3219 mutex_unlock(&group->mutex); in iommu_group_claim_dma_owner()
3237 struct iommu_group *group = dev->iommu_group; in iommu_device_claim_dma_owner() local
3243 if (!group) in iommu_device_claim_dma_owner()
3246 mutex_lock(&group->mutex); in iommu_device_claim_dma_owner()
3247 if (group->owner_cnt) { in iommu_device_claim_dma_owner()
3248 if (group->owner != owner) { in iommu_device_claim_dma_owner()
3252 group->owner_cnt++; in iommu_device_claim_dma_owner()
3256 ret = __iommu_take_dma_ownership(group, owner); in iommu_device_claim_dma_owner()
3258 mutex_unlock(&group->mutex); in iommu_device_claim_dma_owner()
3263 static void __iommu_release_dma_ownership(struct iommu_group *group) in __iommu_release_dma_ownership() argument
3265 if (WARN_ON(!group->owner_cnt || !group->owner || in __iommu_release_dma_ownership()
3266 !xa_empty(&group->pasid_array))) in __iommu_release_dma_ownership()
3269 group->owner_cnt = 0; in __iommu_release_dma_ownership()
3270 group->owner = NULL; in __iommu_release_dma_ownership()
3271 __iommu_group_set_domain_nofail(group, group->default_domain); in __iommu_release_dma_ownership()
3280 void iommu_group_release_dma_owner(struct iommu_group *group) in iommu_group_release_dma_owner() argument
3282 mutex_lock(&group->mutex); in iommu_group_release_dma_owner()
3283 __iommu_release_dma_ownership(group); in iommu_group_release_dma_owner()
3284 mutex_unlock(&group->mutex); in iommu_group_release_dma_owner()
3297 struct iommu_group *group = dev->iommu_group; in iommu_device_release_dma_owner() local
3299 mutex_lock(&group->mutex); in iommu_device_release_dma_owner()
3300 if (group->owner_cnt > 1) in iommu_device_release_dma_owner()
3301 group->owner_cnt--; in iommu_device_release_dma_owner()
3303 __iommu_release_dma_ownership(group); in iommu_device_release_dma_owner()
3304 mutex_unlock(&group->mutex); in iommu_device_release_dma_owner()
3315 bool iommu_group_dma_owner_claimed(struct iommu_group *group) in iommu_group_dma_owner_claimed() argument
3319 mutex_lock(&group->mutex); in iommu_group_dma_owner_claimed()
3320 user = group->owner_cnt; in iommu_group_dma_owner_claimed()
3321 mutex_unlock(&group->mutex); in iommu_group_dma_owner_claimed()
3328 struct iommu_group *group, ioasid_t pasid) in __iommu_set_group_pasid() argument
3333 for_each_group_device(group, device) { in __iommu_set_group_pasid()
3343 for_each_group_device(group, device) { in __iommu_set_group_pasid()
3353 static void __iommu_remove_group_pasid(struct iommu_group *group, in __iommu_remove_group_pasid() argument
3360 for_each_group_device(group, device) { in __iommu_remove_group_pasid()
3380 struct iommu_group *group = dev->iommu_group; in iommu_attach_device_pasid() local
3387 if (!group) in iommu_attach_device_pasid()
3394 mutex_lock(&group->mutex); in iommu_attach_device_pasid()
3395 for_each_group_device(group, device) { in iommu_attach_device_pasid()
3405 ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL); in iommu_attach_device_pasid()
3409 ret = __iommu_set_group_pasid(domain, group, pasid); in iommu_attach_device_pasid()
3411 xa_erase(&group->pasid_array, pasid); in iommu_attach_device_pasid()
3413 mutex_unlock(&group->mutex); in iommu_attach_device_pasid()
3431 struct iommu_group *group = dev->iommu_group; in iommu_detach_device_pasid() local
3433 mutex_lock(&group->mutex); in iommu_detach_device_pasid()
3434 __iommu_remove_group_pasid(group, pasid, domain); in iommu_detach_device_pasid()
3435 xa_erase(&group->pasid_array, pasid); in iommu_detach_device_pasid()
3436 mutex_unlock(&group->mutex); in iommu_detach_device_pasid()
3482 iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type) in iommu_attach_handle_get() argument
3486 xa_lock(&group->pasid_array); in iommu_attach_handle_get()
3487 handle = xa_load(&group->pasid_array, pasid); in iommu_attach_handle_get()
3492 xa_unlock(&group->pasid_array); in iommu_attach_handle_get()
3511 struct iommu_group *group, in iommu_attach_group_handle() argument
3519 mutex_lock(&group->mutex); in iommu_attach_group_handle()
3520 ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); in iommu_attach_group_handle()
3524 ret = __iommu_attach_group(domain, group); in iommu_attach_group_handle()
3527 mutex_unlock(&group->mutex); in iommu_attach_group_handle()
3531 xa_erase(&group->pasid_array, IOMMU_NO_PASID); in iommu_attach_group_handle()
3533 mutex_unlock(&group->mutex); in iommu_attach_group_handle()
3547 struct iommu_group *group) in iommu_detach_group_handle() argument
3549 mutex_lock(&group->mutex); in iommu_detach_group_handle()
3550 __iommu_group_set_core_domain(group); in iommu_detach_group_handle()
3551 xa_erase(&group->pasid_array, IOMMU_NO_PASID); in iommu_detach_group_handle()
3552 mutex_unlock(&group->mutex); in iommu_detach_group_handle()
3566 int iommu_replace_group_handle(struct iommu_group *group, in iommu_replace_group_handle() argument
3576 mutex_lock(&group->mutex); in iommu_replace_group_handle()
3578 ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); in iommu_replace_group_handle()
3584 ret = __iommu_group_set_domain(group, new_domain); in iommu_replace_group_handle()
3588 curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); in iommu_replace_group_handle()
3591 mutex_unlock(&group->mutex); in iommu_replace_group_handle()
3595 xa_release(&group->pasid_array, IOMMU_NO_PASID); in iommu_replace_group_handle()
3597 mutex_unlock(&group->mutex); in iommu_replace_group_handle()