/linux/drivers/vfio/ |
H A D | container.c | 20 struct list_head group_list; member 209 if (!list_empty(&container->group_list) && in vfio_container_ioctl_check_extension() 242 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups() 252 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups() 276 if (list_empty(&container->group_list) || container->iommu_driver) { in vfio_ioctl_set_iommu() 367 INIT_LIST_HEAD(&container->group_list); in vfio_fops_open() 430 if (!list_empty(&container->group_list) && in vfio_container_attach_group() 458 list_add(&group->container_next, &container->group_list); in vfio_container_attach_group() 491 if (driver && list_empty(&container->group_list)) { in vfio_group_detach_container()
|
H A D | vfio_iommu_spapr_tce.c | 71 struct list_head group_list; member 200 return !list_empty(&container->group_list); in tce_groups_attached() 280 tcegrp = list_first_entry(&container->group_list, in tce_iommu_enable() 330 INIT_LIST_HEAD_RCU(&container->group_list); in tce_iommu_open() 352 tcegrp = list_first_entry(&container->group_list, in tce_iommu_release() 657 tcegrp = list_first_entry(&container->group_list, in tce_iommu_create_window() 683 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_create_window() 699 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_create_window() 725 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_remove_window() 761 tcegrp = list_first_entry(&container->group_list, in tce_iommu_create_default_window() [all …]
|
H A D | group.c | 20 struct list_head group_list; member 524 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_find_from_iommu() 607 list_add(&group->vfio_next, &vfio.group_list); in vfio_create_group() 928 INIT_LIST_HEAD(&vfio.group_list); in vfio_group_init() 958 WARN_ON(!list_empty(&vfio.group_list)); in vfio_group_cleanup()
|
H A D | vfio_iommu_type1.c | 82 struct list_head group_list; member 1853 list_for_each_entry(g, &domain->group_list, next) { in find_iommu_group() 2238 INIT_LIST_HEAD(&domain->group_list); in vfio_iommu_type1_attach_group() 2239 list_add(&group->next, &domain->group_list); in vfio_iommu_type1_attach_group() 2273 list_add(&group->next, &d->group_list); in vfio_iommu_type1_attach_group() 2414 list_for_each_entry(g, &d->group_list, next) { in vfio_iommu_resv_refresh() 2489 if (list_empty(&domain->group_list)) { in vfio_iommu_type1_detach_group() 2564 &domain->group_list, next) { in vfio_release_domain()
|
/linux/drivers/infiniband/hw/mlx4/ |
H A D | mcg.c | 134 struct list_head group_list; member 550 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in mlx4_ib_mcg_timeout_handler() 551 list_del(&req->group_list); in mlx4_ib_mcg_timeout_handler() 599 list_del(&req->group_list); in handle_leave_req() 621 list_del(&req->group_list); in handle_join_req() 630 list_del(&req->group_list); in handle_join_req() 675 struct mcast_req, group_list); in mlx4_ib_mcg_work_handler() 680 list_del(&req->group_list); in mlx4_ib_mcg_work_handler() 711 group_list); in mlx4_ib_mcg_work_handler() 767 struct mcast_req, group_list); in search_relocate_mgid0_group() [all …]
|
/linux/drivers/net/ethernet/netronome/nfp/flower/ |
H A D | lag_conf.c | 136 list_add_tail(&group->list, &lag->group_list); in nfp_fl_lag_group_create() 150 list_for_each_entry(entry, &lag->group_list, list) in nfp_fl_lag_find_group_for_master_with_lag() 308 list_for_each_entry_safe(entry, storage, &lag->group_list, list) { in nfp_fl_lag_do_work() 486 list_for_each_entry(group_entry, &priv->nfp_lag.group_list, in nfp_flower_lag_unprocessed_msg() 700 INIT_LIST_HEAD(&lag->group_list); in nfp_flower_lag_init() 720 list_for_each_entry_safe(entry, storage, &lag->group_list, list) { in nfp_flower_lag_cleanup()
|
H A D | main.h | 236 struct list_head group_list; member
|
/linux/drivers/dma/ppc4xx/ |
H A D | adma.h | 150 struct list_head group_list; /* list */ member
|
/linux/security/tomoyo/ |
H A D | memory.c | 111 list = ¶m->ns->group_list[idx]; in tomoyo_get_group()
|
H A D | gc.c | 563 struct list_head *list = &ns->group_list[i]; in tomoyo_collect_entry()
|
H A D | common.h | 906 struct list_head group_list[TOMOYO_MAX_GROUP]; member
|
H A D | common.c | 346 INIT_LIST_HEAD(&ns->group_list[idx]); in tomoyo_init_policy_namespace() 1793 struct list_head *list = &ns->group_list[idx]; in tomoyo_read_group()
|
/linux/drivers/iommu/ |
H A D | iommu.c | 515 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) in __iommu_probe_device() argument 570 } else if (!group->default_domain && !group_list) { in __iommu_probe_device() 581 list_add_tail(&group->entry, group_list); in __iommu_probe_device() 1695 struct list_head *group_list = data; in probe_iommu_group() local 1699 ret = __iommu_probe_device(dev, group_list); in probe_iommu_group() 1849 LIST_HEAD(group_list); in bus_iommu_probe() 1852 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); in bus_iommu_probe() 1856 list_for_each_entry_safe(group, next, &group_list, entry) { in bus_iommu_probe()
|
/linux/drivers/thunderbolt/ |
H A D | tb.c | 1414 list_for_each_entry(in, &group->ports, group_list) { in tb_recalc_estimated_bandwidth_for_group() 1532 in = list_first_entry(&group->ports, struct tb_port, group_list); in __configure_group_sym() 1573 list_add_tail(&in->group_list, &group->ports); in tb_bandwidth_group_attach_port() 1653 list_del_init(&in->group_list); in tb_detach_bandwidth_group()
|
/linux/Documentation/security/ |
H A D | credentials.rst | 457 groups_sort() must not be called on a ``struct group_list`` which
|