| /linux/drivers/infiniband/hw/mlx4/ |
| H A D | mcg.c | 50 #define mcg_warn_group(group, format, arg...) \ argument 52 (group)->name, group->demux->port, ## arg) 54 #define mcg_debug_group(group, format, arg...) \ argument 56 (group)->name, (group)->demux->port, ## arg) 58 #define mcg_error_group(group, format, arg...) \ argument 59 pr_err(" %16s: " format, (group)->name, ## arg) 136 struct mcast_group *group; member 144 mcg_warn_group(group, "did not expect to reach zero\n"); \ 166 struct mcast_group *group; in mcast_find() local 170 group = rb_entry(node, struct mcast_group, node); in mcast_find() [all …]
|
| /linux/fs/notify/ |
| H A D | notification.c | 50 void fsnotify_destroy_event(struct fsnotify_group *group, in fsnotify_destroy_event() argument 54 if (!event || event == group->overflow_event) in fsnotify_destroy_event() 63 spin_lock(&group->notification_lock); in fsnotify_destroy_event() 65 spin_unlock(&group->notification_lock); in fsnotify_destroy_event() 67 group->ops->free_event(group, event); in fsnotify_destroy_event() 81 int fsnotify_insert_event(struct fsnotify_group *group, in fsnotify_insert_event() argument 89 struct list_head *list = &group->notification_list; in fsnotify_insert_event() 91 pr_debug("%s: group=%p event=%p\n", __func__, group, event); in fsnotify_insert_event() 93 spin_lock(&group->notification_lock); in fsnotify_insert_event() 95 if (group->shutdown) { in fsnotify_insert_event() [all …]
|
| H A D | mark.c | 183 highest_prio = first_mark->group->priority; in fsnotify_update_sb_watchers() 368 struct fsnotify_group *group = mark->group; in fsnotify_final_mark_destroy() local 370 if (WARN_ON_ONCE(!group)) in fsnotify_final_mark_destroy() 372 group->ops->free_mark(mark); in fsnotify_final_mark_destroy() 373 fsnotify_put_group(group); in fsnotify_final_mark_destroy() 463 atomic_inc(&mark->group->user_waits); in fsnotify_get_mark_safe() 481 struct fsnotify_group *group = mark->group; in fsnotify_put_mark_wake() local 488 if (atomic_dec_and_test(&group->user_waits) && group->shutdown) in fsnotify_put_mark_wake() 489 wake_up(&group->notification_waitq); in fsnotify_put_mark_wake() 543 fsnotify_group_assert_locked(mark->group); in fsnotify_detach_mark() [all …]
|
| /linux/drivers/iommu/ |
| H A D | iommu.c | 77 #define for_each_group_device(group, pos) \ argument 78 list_for_each_entry(pos, &(group)->devices, list) 82 ssize_t (*show)(struct iommu_group *group, char *buf); 83 ssize_t (*store)(struct iommu_group *group, 105 struct iommu_group *group); 114 static int __iommu_device_set_domain(struct iommu_group *group, 119 static int __iommu_group_set_domain_internal(struct iommu_group *group, 122 static int __iommu_group_set_domain(struct iommu_group *group, in __iommu_group_set_domain() argument 125 return __iommu_group_set_domain_internal(group, new_domain, 0); in __iommu_group_set_domain() 127 static void __iommu_group_set_domain_nofail(struct iommu_group *group, in __iommu_group_set_domain_nofail() argument [all …]
|
| H A D | io-pgfault.c | 42 static void __iopf_free_group(struct iopf_group *group) in __iopf_free_group() argument 46 list_for_each_entry_safe(iopf, next, &group->faults, list) { in __iopf_free_group() 52 iopf_put_dev_fault_param(group->fault_param); in __iopf_free_group() 55 void iopf_free_group(struct iopf_group *group) in iopf_free_group() argument 57 __iopf_free_group(group); in iopf_free_group() 58 kfree(group); in iopf_free_group() 86 struct iopf_group *group; in iopf_group_alloc() local 88 group = kzalloc(sizeof(*group), GFP_KERNEL); in iopf_group_alloc() 89 if (!group) { in iopf_group_alloc() 94 group = abort_group; in iopf_group_alloc() [all …]
|
| /linux/drivers/infiniband/core/ |
| H A D | multicast.c | 117 struct mcast_group *group; member 133 struct mcast_group *group; in mcast_find() local 137 group = rb_entry(node, struct mcast_group, node); in mcast_find() 138 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find() 140 return group; in mcast_find() 151 struct mcast_group *group, in mcast_insert() argument 163 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert() 164 sizeof group->rec.mgid); in mcast_insert() 174 rb_link_node(&group->node, parent, link); in mcast_insert() 175 rb_insert_color(&group->node, &port->table); in mcast_insert() [all …]
|
| H A D | cma_configfs.c | 47 struct config_group group; member 59 struct config_group *group; in to_dev_port_group() local 64 group = container_of(item, struct config_group, cg_item); in to_dev_port_group() 65 return container_of(group, struct cma_dev_port_group, group); in to_dev_port_group() 77 struct cma_dev_port_group *group = to_dev_port_group(item); in cma_configfs_params_get() local 80 if (!group) in cma_configfs_params_get() 84 group->cma_dev_group->name); in cma_configfs_params_get() 89 *pgroup = group; in cma_configfs_params_get() 103 struct cma_dev_port_group *group; in default_roce_mode_show() local 107 ret = cma_configfs_params_get(item, &cma_dev, &group); in default_roce_mode_show() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_hw_engine_group.c | 19 struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work); in hw_engine_group_resume_lr_jobs_func() local 23 err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode); in hw_engine_group_resume_lr_jobs_func() 30 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { in hw_engine_group_resume_lr_jobs_func() 38 xe_hw_engine_group_put(group); in hw_engine_group_resume_lr_jobs_func() 44 struct xe_hw_engine_group *group; in hw_engine_group_alloc() local 47 group = drmm_kzalloc(&xe->drm, sizeof(*group), GFP_KERNEL); in hw_engine_group_alloc() 48 if (!group) in hw_engine_group_alloc() 51 group->resume_wq = alloc_workqueue("xe-resume-lr-jobs-wq", 0, 0); in hw_engine_group_alloc() 52 if (!group->resume_wq) in hw_engine_group_alloc() 55 err = drmm_add_action_or_reset(&xe->drm, __drmm_workqueue_release, group->resume_wq); in hw_engine_group_alloc() [all …]
|
| /linux/arch/sparc/kernel/ |
| H A D | hvapi.c | 19 unsigned long group; member 28 { .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API }, 29 { .group = HV_GRP_CORE, .flags = FLAG_PRE_API }, 30 { .group = HV_GRP_INTR, }, 31 { .group = HV_GRP_SOFT_STATE, }, 32 { .group = HV_GRP_TM, }, 33 { .group = HV_GRP_PCI, .flags = FLAG_PRE_API }, 34 { .group = HV_GRP_LDOM, }, 35 { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API }, 36 { .group = HV_GRP_NCS, .flags = FLAG_PRE_API }, [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_sched.c | 107 struct panthor_group *group; member 739 #define group_queue_work(group, wname) \ argument 741 group_get(group); \ 742 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \ 743 group_put(group); \ 809 struct panthor_group *group; member 874 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue) in panthor_queue_get_syncwait_obj() argument 876 struct panthor_device *ptdev = group->ptdev; in panthor_queue_get_syncwait_obj() 884 bo = panthor_vm_get_bo_for_va(group->vm, in panthor_queue_get_syncwait_obj() 906 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue) in group_free_queue() argument [all …]
|
| /linux/kernel/sched/ |
| H A D | psi.c | 205 static void group_init(struct psi_group *group) in group_init() argument 207 group->enabled = true; in group_init() 208 group->avg_last_update = sched_clock(); in group_init() 209 group->avg_next_update = group->avg_last_update + psi_period; in group_init() 210 mutex_init(&group->avgs_lock); in group_init() 213 INIT_LIST_HEAD(&group->avg_triggers); in group_init() 214 memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers)); in group_init() 215 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); in group_init() 218 atomic_set(&group->rtpoll_scheduled, 0); in group_init() 219 mutex_init(&group->rtpoll_trigger_lock); in group_init() [all …]
|
| /linux/drivers/gpio/ |
| H A D | gpio-lpc32xx.c | 168 static inline u32 gpreg_read(struct lpc32xx_gpio_chip *group, unsigned long offset) in gpreg_read() argument 170 return __raw_readl(group->reg_base + offset); in gpreg_read() 173 static inline void gpreg_write(struct lpc32xx_gpio_chip *group, u32 val, unsigned long offset) in gpreg_write() argument 175 __raw_writel(val, group->reg_base + offset); in gpreg_write() 178 static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group, in __set_gpio_dir_p012() argument 182 gpreg_write(group, GPIO012_PIN_TO_BIT(pin), in __set_gpio_dir_p012() 183 group->gpio_grp->dir_clr); in __set_gpio_dir_p012() 185 gpreg_write(group, GPIO012_PIN_TO_BIT(pin), in __set_gpio_dir_p012() 186 group->gpio_grp->dir_set); in __set_gpio_dir_p012() 189 static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group, in __set_gpio_dir_p3() argument [all …]
|
| /linux/drivers/clk/renesas/ |
| H A D | clk-mstp.c | 59 struct mstp_clock_group *group; member 64 static inline u32 cpg_mstp_read(struct mstp_clock_group *group, in cpg_mstp_read() argument 67 return group->width_8bit ? readb(reg) : readl(reg); in cpg_mstp_read() 70 static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val, in cpg_mstp_write() argument 73 group->width_8bit ? writeb(val, reg) : writel(val, reg); in cpg_mstp_write() 79 struct mstp_clock_group *group = clock->group; in cpg_mstp_clock_endisable() local 85 spin_lock_irqsave(&group->lock, flags); in cpg_mstp_clock_endisable() 87 value = cpg_mstp_read(group, group->smstpcr); in cpg_mstp_clock_endisable() 92 cpg_mstp_write(group, value, group->smstpcr); in cpg_mstp_clock_endisable() 94 if (!group->mstpsr) { in cpg_mstp_clock_endisable() [all …]
|
| /linux/fs/ext2/ |
| H A D | ialloc.c | 66 static void ext2_release_inode(struct super_block *sb, int group, int dir) in ext2_release_inode() argument 71 desc = ext2_get_group_desc(sb, group, &bh); in ext2_release_inode() 74 "can't get descriptor for group %d", group); in ext2_release_inode() 78 spin_lock(sb_bgl_lock(EXT2_SB(sb), group)); in ext2_release_inode() 82 spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); in ext2_release_inode() 204 int group, best_group = -1; in find_group_dir() local 206 for (group = 0; group < ngroups; group++) { in find_group_dir() 207 desc = ext2_get_group_desc (sb, group, NULL); in find_group_dir() 215 best_group = group; in find_group_dir() 265 int group = -1, i; in find_group_orlov() local [all …]
|
| /linux/drivers/vfio/ |
| H A D | container.c | 167 device->group->container->iommu_driver; in vfio_device_container_register() 171 device->group->container->iommu_data, device); in vfio_device_container_register() 177 device->group->container->iommu_driver; in vfio_device_container_unregister() 181 device->group->container->iommu_data, device); in vfio_device_container_unregister() 239 struct vfio_group *group; in __vfio_container_attach_groups() local 242 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups() 243 ret = driver->ops->attach_group(data, group->iommu_group, in __vfio_container_attach_groups() 244 group->type); in __vfio_container_attach_groups() 252 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups() 254 driver->ops->detach_group(data, group->iommu_group); in __vfio_container_attach_groups() [all …]
|
| /linux/drivers/pinctrl/aspeed/ |
| H A D | pinmux-aspeed.h | 513 #define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group argument 514 #define SIG_DESC_LIST_DECL(sig, group, ...) \ argument 515 static const struct aspeed_sig_desc SIG_DESC_LIST_SYM(sig, group)[] = \ 518 #define SIG_EXPR_SYM(sig, group) sig_expr_ ## sig ## _ ## group argument 519 #define SIG_EXPR_DECL_(sig, group, func) \ argument 520 static const struct aspeed_sig_expr SIG_EXPR_SYM(sig, group) = \ 524 .ndescs = ARRAY_SIZE(SIG_DESC_LIST_SYM(sig, group)), \ 525 .descs = &(SIG_DESC_LIST_SYM(sig, group))[0], \ 545 #define SIG_EXPR_DECL(sig, group, func, ...) \ argument 546 SIG_DESC_LIST_DECL(sig, group, __VA_ARGS__); \ [all …]
|
| /linux/net/psample/ |
| H A D | psample.c | 41 struct psample_group *group, in psample_group_nl_fill() argument 52 ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num); in psample_group_nl_fill() 56 ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount); in psample_group_nl_fill() 60 ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq); in psample_group_nl_fill() 75 struct psample_group *group; in psample_nl_cmd_get_group_dumpit() local 81 list_for_each_entry(group, &psample_groups_list, list) { in psample_nl_cmd_get_group_dumpit() 82 if (!net_eq(group->net, sock_net(msg->sk))) in psample_nl_cmd_get_group_dumpit() 88 err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP, in psample_nl_cmd_get_group_dumpit() 123 static void psample_group_notify(struct psample_group *group, in psample_group_notify() argument 133 err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI); in psample_group_notify() [all …]
|
| /linux/fs/notify/inotify/ |
| H A D | inotify_fsnotify.c | 49 static int inotify_merge(struct fsnotify_group *group, in inotify_merge() argument 52 struct list_head *list = &group->notification_list; in inotify_merge() 66 struct fsnotify_group *group = inode_mark->group; in inotify_handle_inode_event() local 77 pr_debug("%s: group=%p mark=%p mask=%x\n", __func__, group, inode_mark, in inotify_handle_inode_event() 95 old_memcg = set_active_memcg(group->memcg); in inotify_handle_inode_event() 104 fsnotify_queue_overflow(group); in inotify_handle_inode_event() 126 ret = fsnotify_add_event(group, fsn_event, inotify_merge); in inotify_handle_inode_event() 129 fsnotify_destroy_event(group, fsn_event); in inotify_handle_inode_event() 133 fsnotify_destroy_mark(inode_mark, group); in inotify_handle_inode_event() 138 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) in inotify_freeing_mark() argument [all …]
|
| /linux/drivers/iommu/iommufd/ |
| H A D | eventq.c | 23 struct iopf_group *group, *next; in iommufd_auto_response_faults() local 33 list_for_each_entry_safe(group, next, &fault->common.deliver, node) { in iommufd_auto_response_faults() 34 if (group->attach_handle != &handle->handle) in iommufd_auto_response_faults() 36 list_move(&group->node, &free_list); in iommufd_auto_response_faults() 40 list_for_each_entry_safe(group, next, &free_list, node) { in iommufd_auto_response_faults() 41 list_del(&group->node); in iommufd_auto_response_faults() 42 iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); in iommufd_auto_response_faults() 43 iopf_free_group(group); in iommufd_auto_response_faults() 46 xa_for_each(&fault->response, index, group) { in iommufd_auto_response_faults() 47 if (group->attach_handle != &handle->handle) in iommufd_auto_response_faults() [all …]
|
| /linux/arch/s390/tools/ |
| H A D | gen_opcode_table.c | 46 struct insn_group *group; member 263 struct insn_group *group; in add_to_group() local 265 group = desc->group ? &desc->group[desc->nr_groups - 1] : NULL; in add_to_group() 266 if (group && (!strncmp(group->opcode, insn->opcode, 2) || group->type->byte == 0)) { in add_to_group() 267 group->count++; in add_to_group() 271 desc->group = realloc(desc->group, desc->nr_groups * sizeof(*desc->group)); in add_to_group() 272 if (!desc->group) in add_to_group() 274 group = &desc->group[desc->nr_groups - 1]; in add_to_group() 275 memcpy(group->opcode, insn->opcode, 2); in add_to_group() 276 group->type = insn->type; in add_to_group() [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/flower/ |
| H A D | lag_conf.c | 108 struct nfp_fl_lag_group *group; in nfp_fl_lag_group_create() local 122 group = kmalloc(sizeof(*group), GFP_KERNEL); in nfp_fl_lag_group_create() 123 if (!group) { in nfp_fl_lag_group_create() 128 group->group_id = id; in nfp_fl_lag_group_create() 129 group->master_ndev = master; in nfp_fl_lag_group_create() 130 group->dirty = true; in nfp_fl_lag_group_create() 131 group->offloaded = false; in nfp_fl_lag_group_create() 132 group->to_remove = false; in nfp_fl_lag_group_create() 133 group->to_destroy = false; in nfp_fl_lag_group_create() 134 group->slave_cnt = 0; in nfp_fl_lag_group_create() [all …]
|
| /linux/drivers/pci/endpoint/ |
| H A D | pci-ep-cfs.c | 23 struct config_group group; member 32 struct config_group group; member 39 return container_of(to_config_group(item), struct pci_epf_group, group); in to_pci_epf_group() 44 return container_of(to_config_group(item), struct pci_epc_group, group); in to_pci_epc_group() 106 configfs_register_group(&epf_group->group, secondary_epc_group); in pci_ep_cfs_add_secondary_group() 169 configfs_register_group(&epf_group->group, primary_epc_group); in pci_ep_cfs_add_primary_group() 275 struct config_group *group; in pci_ep_cfs_add_epc_group() local 284 group = &epc_group->group; in pci_ep_cfs_add_epc_group() 286 config_group_init_type_name(group, name, &pci_epc_type); in pci_ep_cfs_add_epc_group() 287 ret = configfs_register_group(controllers_group, group); in pci_ep_cfs_add_epc_group() [all …]
|
| /linux/drivers/soundwire/ |
| H A D | generic_bandwidth_allocation.c | 228 struct sdw_group *group) in sdw_compute_group_params() argument 237 for (i = 0; i < group->count; i++) { in sdw_compute_group_params() 238 params[i].rate = group->rates[i]; in sdw_compute_group_params() 239 params[i].lane = group->lanes[i]; in sdw_compute_group_params() 263 for (i = 0; i < group->count; i++) { in sdw_compute_group_params() 275 for (i = 0; i < group->count; i++) { in sdw_compute_group_params() 296 static int sdw_add_element_group_count(struct sdw_group *group, in sdw_add_element_group_count() argument 299 int num = group->count; in sdw_add_element_group_count() 303 if (rate == group->rates[i] && lane == group->lanes[i]) in sdw_add_element_group_count() 309 if (group->count >= group->max_size) { in sdw_add_element_group_count() [all …]
|
| /linux/sound/core/ |
| H A D | control_led.c | 40 unsigned int group; member 60 .group = (SNDRV_CTL_ELEM_ACCESS_SPK_LED >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) - 1, 66 .group = (SNDRV_CTL_ELEM_ACCESS_MIC_LED >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) - 1, 88 static inline unsigned int group_to_access(unsigned int group) in group_to_access() argument 90 return (group + 1) << SNDRV_CTL_ELEM_ACCESS_LED_SHIFT; in group_to_access() 95 unsigned int group = access_to_group(access); in snd_ctl_led_get_by_access() local 96 if (group >= MAX_LED) in snd_ctl_led_get_by_access() 98 return &snd_ctl_leds[group]; in snd_ctl_led_get_by_access() 189 unsigned int group; in snd_ctl_led_find() local 191 for (group = 0; group < MAX_LED; group++) { in snd_ctl_led_find() [all …]
|
| /linux/arch/mips/rb532/ |
| H A D | irq.c | 86 static inline int group_to_ip(unsigned int group) in group_to_ip() argument 88 return group + 2; in group_to_ip() 114 unsigned int group, intr_bit, irq_nr = d->irq; in rb532_enable_irq() local 121 group = ip >> 5; in rb532_enable_irq() 126 enable_local_irq(group_to_ip(group)); in rb532_enable_irq() 128 addr = intr_group[group].base_addr; in rb532_enable_irq() 135 unsigned int group, intr_bit, mask, irq_nr = d->irq; in rb532_disable_irq() local 142 group = ip >> 5; in rb532_disable_irq() 146 addr = intr_group[group].base_addr; in rb532_disable_irq() 152 if (group == GPIO_MAPPED_IRQ_GROUP && irq_nr <= (GROUP4_IRQ_BASE + 13)) in rb532_disable_irq() [all …]
|