Home
last modified time | relevance | path

Searched full:group (Results 1 – 25 of 2970) sorted by relevance

12345678910>>...119

/linux/drivers/vfio/
H A Dgroup.c26 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, in vfio_device_get_from_name() argument
31 mutex_lock(&group->device_lock); in vfio_device_get_from_name()
32 list_for_each_entry(it, &group->device_list, group_next) { in vfio_device_get_from_name()
50 mutex_unlock(&group->device_lock); in vfio_device_get_from_name()
56 * VFIO Group fd, /dev/vfio/$GROUP
58 static bool vfio_group_has_iommu(struct vfio_group *group) in vfio_group_has_iommu() argument
60 lockdep_assert_held(&group->group_lock); in vfio_group_has_iommu()
65 WARN_ON(!group->container != !group->container_users); in vfio_group_has_iommu()
67 return group->container || group->iommufd; in vfio_group_has_iommu()
73 * the group, we know that still exists, therefore the only valid
[all …]
/linux/drivers/infiniband/hw/mlx4/
H A Dmcg.c50 #define mcg_warn_group(group, format, arg...) \ argument
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_debug_group(group, format, arg...) \ argument
56 (group)->name, (group)->demux->port, ## arg)
58 #define mcg_error_group(group, format, arg...) \ argument
59 pr_err(" %16s: " format, (group)->name, ## arg)
136 struct mcast_group *group; member
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
166 struct mcast_group *group; in mcast_find() local
170 group = rb_entry(node, struct mcast_group, node); in mcast_find()
[all …]
/linux/drivers/pinctrl/meson/
H A Dpinctrl-meson-g12a.c533 GROUP(emmc_nand_d0, 1),
534 GROUP(emmc_nand_d1, 1),
535 GROUP(emmc_nand_d2, 1),
536 GROUP(emmc_nand_d3, 1),
537 GROUP(emmc_nand_d4, 1),
538 GROUP(emmc_nand_d5, 1),
539 GROUP(emmc_nand_d6, 1),
540 GROUP(emmc_nand_d7, 1),
541 GROUP(emmc_clk, 1),
542 GROUP(emmc_cmd, 1),
[all …]
H A Dpinctrl-amlogic-c3.c436 GROUP(pwm_a, 1),
437 GROUP(pwm_b, 1),
438 GROUP(i2c2_sda, 1),
439 GROUP(i2c2_scl, 1),
440 GROUP(gen_clk_e, 1),
443 GROUP(i2c0_sda_e, 2),
444 GROUP(i2c0_scl_e, 2),
445 GROUP(clk_32k_in, 2),
448 GROUP(i2c_slave_scl, 3),
449 GROUP(i2c_slave_sda, 3),
[all …]
H A Dpinctrl-meson-s4.c506 GROUP(i2c0_sda, 1),
507 GROUP(i2c0_scl, 1),
510 GROUP(uart_b_tx_e, 2),
511 GROUP(uart_b_rx_e, 2),
514 GROUP(pwm_h, 3),
515 GROUP(pwm_j, 3),
518 GROUP(emmc_nand_d0, 1),
519 GROUP(emmc_nand_d1, 1),
520 GROUP(emmc_nand_d2, 1),
521 GROUP(emmc_nand_d3, 1),
[all …]
H A Dpinctrl-amlogic-t7.c708 GROUP(emmc_nand_d0, 1),
709 GROUP(emmc_nand_d1, 1),
710 GROUP(emmc_nand_d2, 1),
711 GROUP(emmc_nand_d3, 1),
712 GROUP(emmc_nand_d4, 1),
713 GROUP(emmc_nand_d5, 1),
714 GROUP(emmc_nand_d6, 1),
715 GROUP(emmc_nand_d7, 1),
716 GROUP(emmc_clk, 1),
717 GROUP(emmc_cmd, 1),
[all …]
H A Dpinctrl-meson-axg.c448 GROUP(emmc_nand_d0, 1),
449 GROUP(emmc_nand_d1, 1),
450 GROUP(emmc_nand_d2, 1),
451 GROUP(emmc_nand_d3, 1),
452 GROUP(emmc_nand_d4, 1),
453 GROUP(emmc_nand_d5, 1),
454 GROUP(emmc_nand_d6, 1),
455 GROUP(emmc_nand_d7, 1),
456 GROUP(emmc_clk, 1),
457 GROUP(emmc_cmd, 1),
[all …]
H A Dpinctrl-meson-a1.c407 GROUP(psram_clkn, 1),
408 GROUP(psram_clkp, 1),
409 GROUP(psram_ce_n, 1),
410 GROUP(psram_rst_n, 1),
411 GROUP(psram_adq0, 1),
412 GROUP(psram_adq1, 1),
413 GROUP(psram_adq2, 1),
414 GROUP(psram_adq3, 1),
415 GROUP(psram_adq4, 1),
416 GROUP(psram_adq5, 1),
[all …]
H A Dpinctrl-meson8.c531 GROUP(sd_d0_a, 8, 5),
532 GROUP(sd_d1_a, 8, 4),
533 GROUP(sd_d2_a, 8, 3),
534 GROUP(sd_d3_a, 8, 2),
535 GROUP(sd_clk_a, 8, 1),
536 GROUP(sd_cmd_a, 8, 0),
538 GROUP(sdxc_d0_a, 5, 14),
539 GROUP(sdxc_d13_a, 5, 13),
540 GROUP(sdxc_d47_a, 5, 12),
541 GROUP(sdxc_clk_a, 5, 11),
[all …]
H A Dpinctrl-meson8b.c444 GROUP(sd_d0_a, 8, 5),
445 GROUP(sd_d1_a, 8, 4),
446 GROUP(sd_d2_a, 8, 3),
447 GROUP(sd_d3_a, 8, 2),
448 GROUP(sdxc_d0_0_a, 5, 29),
449 GROUP(sdxc_d47_a, 5, 12),
450 GROUP(sdxc_d13_0_a, 5, 28),
451 GROUP(sd_clk_a, 8, 1),
452 GROUP(sd_cmd_a, 8, 0),
453 GROUP(xtal_32k_out, 3, 22),
[all …]
H A Dpinctrl-meson-gxl.c414 GROUP(sdio_d0, 5, 31),
415 GROUP(sdio_d1, 5, 30),
416 GROUP(sdio_d2, 5, 29),
417 GROUP(sdio_d3, 5, 28),
418 GROUP(sdio_clk, 5, 27),
419 GROUP(sdio_cmd, 5, 26),
420 GROUP(sdio_irq, 5, 24),
421 GROUP(uart_tx_a, 5, 19),
422 GROUP(uart_rx_a, 5, 18),
423 GROUP(uart_cts_a, 5, 17),
[all …]
H A Dpinctrl-meson-gxbb.c440 GROUP(sdio_d0, 8, 5),
441 GROUP(sdio_d1, 8, 4),
442 GROUP(sdio_d2, 8, 3),
443 GROUP(sdio_d3, 8, 2),
444 GROUP(sdio_cmd, 8, 1),
445 GROUP(sdio_clk, 8, 0),
446 GROUP(sdio_irq, 8, 11),
447 GROUP(uart_tx_a, 4, 13),
448 GROUP(uart_rx_a, 4, 12),
449 GROUP(uart_cts_a, 4, 11),
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_hw_engine_group.c19 struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work); in hw_engine_group_resume_lr_jobs_func() local
23 err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode); in hw_engine_group_resume_lr_jobs_func()
30 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { in hw_engine_group_resume_lr_jobs_func()
38 xe_hw_engine_group_put(group); in hw_engine_group_resume_lr_jobs_func()
44 struct xe_hw_engine_group *group; in hw_engine_group_alloc() local
47 group = drmm_kzalloc(&xe->drm, sizeof(*group), GFP_KERNEL); in hw_engine_group_alloc()
48 if (!group) in hw_engine_group_alloc()
51 group->resume_wq = alloc_workqueue("xe-resume-lr-jobs-wq", 0, 0); in hw_engine_group_alloc()
52 if (!group->resume_wq) in hw_engine_group_alloc()
55 err = drmm_add_action_or_reset(&xe->drm, __drmm_workqueue_release, group->resume_wq); in hw_engine_group_alloc()
[all …]
/linux/drivers/iommu/
H A Diommu.c49 /* Tags used with xa_tag_pointer() in group->pasid_array */
77 #define for_each_group_device(group, pos) \ argument
78 list_for_each_entry(pos, &(group)->devices, list)
82 ssize_t (*show)(struct iommu_group *group, char *buf);
83 ssize_t (*store)(struct iommu_group *group,
105 struct iommu_group *group);
114 static int __iommu_device_set_domain(struct iommu_group *group,
118 static int __iommu_group_set_domain_internal(struct iommu_group *group,
121 static int __iommu_group_set_domain(struct iommu_group *group, in __iommu_group_set_domain() argument
124 return __iommu_group_set_domain_internal(group, new_domain, 0); in __iommu_group_set_domain()
[all …]
/linux/drivers/infiniband/core/
H A Dmulticast.c117 struct mcast_group *group; member
133 struct mcast_group *group; in mcast_find() local
137 group = rb_entry(node, struct mcast_group, node); in mcast_find()
138 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
140 return group; in mcast_find()
151 struct mcast_group *group, in mcast_insert() argument
163 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
164 sizeof group->rec.mgid); in mcast_insert()
174 rb_link_node(&group->node, parent, link); in mcast_insert()
175 rb_insert_color(&group->node, &port->table); in mcast_insert()
[all …]
/linux/fs/notify/
H A Dnotification.c7 * Basic idea behind the notification queue: An fsnotify group (like inotify)
10 * event to the group notify queue. Since a single event might need to be on
11 * multiple group's notification queues we can't add the event directly to each
17 * another group a new event_holder (from fsnotify_event_holder_cachep) will be
50 void fsnotify_destroy_event(struct fsnotify_group *group, in fsnotify_destroy_event() argument
53 /* Overflow events are per-group and we don't want to free them */ in fsnotify_destroy_event()
54 if (!event || event == group->overflow_event) in fsnotify_destroy_event()
63 spin_lock(&group->notification_lock); in fsnotify_destroy_event()
65 spin_unlock(&group->notification_lock); in fsnotify_destroy_event()
67 group->ops->free_event(group, event); in fsnotify_destroy_event()
[all …]
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_sched.c42 * The scheduling happens at the scheduling group level, each group
51 * rotating the groups passed to the firmware so every group gets
67 * queue ring-buffer, and the group is scheduled for execution if it
70 * Kernel-side group scheduling is timeslice-based. When we have less
73 * groups than slots, we let each group a chance to execute stuff for
75 * to schedule. The group selection algorithm is based on
80 * group/queue state that would be based on information we wouldn't have
82 * reason we don't do 'cooperative' scheduling (encoding FW group slot
84 * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
100 * struct panthor_csg_slot - Command stream group slot
[all …]
/linux/arch/sparc/kernel/
H A Dhvapi.c19 unsigned long group; member
28 { .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API },
29 { .group = HV_GRP_CORE, .flags = FLAG_PRE_API },
30 { .group = HV_GRP_INTR, },
31 { .group = HV_GRP_SOFT_STATE, },
32 { .group = HV_GRP_TM, },
33 { .group = HV_GRP_PCI, .flags = FLAG_PRE_API },
34 { .group = HV_GRP_LDOM, },
35 { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API },
36 { .group = HV_GRP_NCS, .flags = FLAG_PRE_API },
[all …]
/linux/tools/testing/selftests/drivers/net/netdevsim/
H A Dnexthop.sh158 $IP nexthop add id 10 group 1/2
159 nexthop_check "id 10" "id 10 group 1/2 trap"
160 check_err $? "Unexpected nexthop group entry"
169 $IP nexthop add id 10 group 1,20/2,39
170 nexthop_check "id 10" "id 10 group 1,20/2,39 trap"
171 check_err $? "Unexpected weighted nexthop group entry"
180 log_test "Nexthop group add and delete"
194 $IP nexthop add id 10 group 1/2 &> /dev/null
195 check_fail $? "Nexthop group addition succeeded when should fail"
200 log_test "Nexthop group add failure"
[all …]
/linux/drivers/clk/renesas/
H A Dclk-mstp.c32 * struct mstp_clock_group - MSTP gating clocks group
34 * @data: clock specifier translation for clocks in this group
39 * @clks: clocks in this group
54 * @group: MSTP clocks group
59 struct mstp_clock_group *group; member
64 static inline u32 cpg_mstp_read(struct mstp_clock_group *group, in cpg_mstp_read() argument
67 return group->width_8bit ? readb(reg) : readl(reg); in cpg_mstp_read()
70 static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val, in cpg_mstp_write() argument
73 group->width_8bit ? writeb(val, reg) : writel(val, reg); in cpg_mstp_write()
79 struct mstp_clock_group *group = clock->group; in cpg_mstp_clock_endisable() local
[all …]
/linux/drivers/gpio/
H A Dgpio-lpc32xx.c168 static inline u32 gpreg_read(struct lpc32xx_gpio_chip *group, unsigned long offset) in gpreg_read() argument
170 return __raw_readl(group->reg_base + offset); in gpreg_read()
173 static inline void gpreg_write(struct lpc32xx_gpio_chip *group, u32 val, unsigned long offset) in gpreg_write() argument
175 __raw_writel(val, group->reg_base + offset); in gpreg_write()
178 static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group, in __set_gpio_dir_p012() argument
182 gpreg_write(group, GPIO012_PIN_TO_BIT(pin), in __set_gpio_dir_p012()
183 group->gpio_grp->dir_clr); in __set_gpio_dir_p012()
185 gpreg_write(group, GPIO012_PIN_TO_BIT(pin), in __set_gpio_dir_p012()
186 group->gpio_grp->dir_set); in __set_gpio_dir_p012()
189 static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group, in __set_gpio_dir_p3() argument
[all …]
/linux/tools/testing/selftests/net/
H A Dfib_nexthops.sh349 run_cmd "$IP nexthop add id $grpidstr group $grpstr"
350 check_nexthop "id $grpidstr" "id $grpidstr group $grpstr"
371 # create a resilient group with $buckets buckets and dump them
373 run_cmd "$IP nexthop add id 1000 group 100 type resilient buckets $buckets"
439 echo "SKIP: iproute2 too old, missing resilient nexthop group support"
457 # create group with multiple nexthops
460 run_cmd "$IP nexthop add id 102 group 61/62 fdb"
461 check_nexthop "id 102" "id 102 group 61/62 fdb"
462 log_test $? 0 "Fdb Nexthop group with multiple nexthops"
464 ## get nexthop group
[all …]
/linux/kernel/time/
H A Dtimer_migration.c22 * lowest level group contains CPUs, the next level groups of CPU groups
43 * Each group has a designated migrator CPU/group as long as a CPU/group is
44 * active in the group. This designated role is necessary to avoid that all
45 * active CPUs in a group try to migrate expired timers from other CPUs,
48 * When a CPU is awake, it checks in it's own timer tick the group
53 * If it finds expired timers in one of the group queues it pulls them over
55 * group and the parent groups if required.
60 * CPU does not queue an event in the LVL0 group. If the next migratable
62 * in the LVL0 group. In both cases the CPU marks itself idle in the LVL0
63 * group.
[all …]
/linux/include/trace/events/
H A Dtimer_migration.h11 /* Group events */
14 TP_PROTO(struct tmigr_group *group),
16 TP_ARGS(group),
19 __field( void *, group )
25 __entry->group = group;
26 __entry->lvl = group->level;
27 __entry->numa_node = group->numa_node;
30 TP_printk("group=%p lvl=%d numa=%d",
31 __entry->group, __entry->lvl, __entry->numa_node)
58 TP_printk("group=%p groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
[all …]
/linux/Documentation/devicetree/bindings/pinctrl/
H A Dmarvell,armada-37xx-pinctrl.txt32 group: jtag
36 group sdio0
40 group emmc_nb
44 group pwm0
48 group pwm1
52 group pwm2
56 group pwm3
60 group pmic1
64 group pmic0
68 group i2c2
[all …]

12345678910>>...119