/linux/drivers/vfio/ |
H A D | group.c | 26 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, in vfio_device_get_from_name() argument 31 mutex_lock(&group->device_lock); in vfio_device_get_from_name() 32 list_for_each_entry(it, &group->device_list, group_next) { in vfio_device_get_from_name() 50 mutex_unlock(&group->device_lock); in vfio_device_get_from_name() 56 * VFIO Group fd, /dev/vfio/$GROUP 58 static bool vfio_group_has_iommu(struct vfio_group *group) in vfio_group_has_iommu() argument 60 lockdep_assert_held(&group->group_lock); in vfio_group_has_iommu() 65 WARN_ON(!group->container != !group->container_users); in vfio_group_has_iommu() 67 return group->container || group->iommufd; in vfio_group_has_iommu() 73 * the group, we know that still exists, therefore the only valid [all …]
|
/linux/drivers/infiniband/hw/mlx4/ |
H A D | mcg.c | 50 #define mcg_warn_group(group, format, arg...) \ argument 52 (group)->name, group->demux->port, ## arg) 54 #define mcg_debug_group(group, format, arg...) \ argument 56 (group)->name, (group)->demux->port, ## arg) 58 #define mcg_error_group(group, format, arg...) \ argument 59 pr_err(" %16s: " format, (group)->name, ## arg) 136 struct mcast_group *group; member 144 mcg_warn_group(group, "did not expect to reach zero\n"); \ 166 struct mcast_group *group; in mcast_find() local 170 group = rb_entry(node, struct mcast_group, node); in mcast_find() [all …]
|
/linux/drivers/iommu/ |
H A D | iommu.c | 73 #define for_each_group_device(group, pos) \ argument 74 list_for_each_entry(pos, &(group)->devices, list) 78 ssize_t (*show)(struct iommu_group *group, char *buf); 79 ssize_t (*store)(struct iommu_group *group, 101 struct iommu_group *group); 110 static int __iommu_device_set_domain(struct iommu_group *group, 114 static int __iommu_group_set_domain_internal(struct iommu_group *group, 117 static int __iommu_group_set_domain(struct iommu_group *group, in __iommu_group_set_domain() argument 120 return __iommu_group_set_domain_internal(group, new_domain, 0); in __iommu_group_set_domain() 122 static void __iommu_group_set_domain_nofail(struct iommu_group *group, in __iommu_group_set_domain_nofail() argument [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_hw_engine_group.c | 18 struct xe_hw_engine_group *group = arg; in hw_engine_group_free() local 20 destroy_workqueue(group->resume_wq); in hw_engine_group_free() 21 kfree(group); in hw_engine_group_free() 28 struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work); in hw_engine_group_resume_lr_jobs_func() local 32 err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode); in hw_engine_group_resume_lr_jobs_func() 39 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { in hw_engine_group_resume_lr_jobs_func() 47 xe_hw_engine_group_put(group); in hw_engine_group_resume_lr_jobs_func() 53 struct xe_hw_engine_group *group; in hw_engine_group_alloc() local 56 group = kzalloc(sizeof(*group), GFP_KERNEL); in hw_engine_group_alloc() 57 if (!group) in hw_engine_group_alloc() [all …]
|
/linux/drivers/pinctrl/meson/ |
H A D | pinctrl-meson-g12a.c | 527 GROUP(emmc_nand_d0, 1), 528 GROUP(emmc_nand_d1, 1), 529 GROUP(emmc_nand_d2, 1), 530 GROUP(emmc_nand_d3, 1), 531 GROUP(emmc_nand_d4, 1), 532 GROUP(emmc_nand_d5, 1), 533 GROUP(emmc_nand_d6, 1), 534 GROUP(emmc_nand_d7, 1), 535 GROUP(emmc_clk, 1), 536 GROUP(emmc_cmd, 1), [all …]
|
H A D | pinctrl-amlogic-c3.c | 436 GROUP(pwm_a, 1), 437 GROUP(pwm_b, 1), 438 GROUP(i2c2_sda, 1), 439 GROUP(i2c2_scl, 1), 440 GROUP(gen_clk_e, 1), 443 GROUP(i2c0_sda_e, 2), 444 GROUP(i2c0_scl_e, 2), 445 GROUP(clk_32k_in, 2), 448 GROUP(i2c_slave_scl, 3), 449 GROUP(i2c_slave_sda, 3), [all …]
|
H A D | pinctrl-meson-s4.c | 506 GROUP(i2c0_sda, 1), 507 GROUP(i2c0_scl, 1), 510 GROUP(uart_b_tx_e, 2), 511 GROUP(uart_b_rx_e, 2), 514 GROUP(pwm_h, 3), 515 GROUP(pwm_j, 3), 518 GROUP(emmc_nand_d0, 1), 519 GROUP(emmc_nand_d1, 1), 520 GROUP(emmc_nand_d2, 1), 521 GROUP(emmc_nand_d3, 1), [all …]
|
H A D | pinctrl-amlogic-t7.c | 708 GROUP(emmc_nand_d0, 1), 709 GROUP(emmc_nand_d1, 1), 710 GROUP(emmc_nand_d2, 1), 711 GROUP(emmc_nand_d3, 1), 712 GROUP(emmc_nand_d4, 1), 713 GROUP(emmc_nand_d5, 1), 714 GROUP(emmc_nand_d6, 1), 715 GROUP(emmc_nand_d7, 1), 716 GROUP(emmc_clk, 1), 717 GROUP(emmc_cmd, 1), [all …]
|
H A D | pinctrl-meson-axg.c | 448 GROUP(emmc_nand_d0, 1), 449 GROUP(emmc_nand_d1, 1), 450 GROUP(emmc_nand_d2, 1), 451 GROUP(emmc_nand_d3, 1), 452 GROUP(emmc_nand_d4, 1), 453 GROUP(emmc_nand_d5, 1), 454 GROUP(emmc_nand_d6, 1), 455 GROUP(emmc_nand_d7, 1), 456 GROUP(emmc_clk, 1), 457 GROUP(emmc_cmd, 1), [all …]
|
H A D | pinctrl-meson-a1.c | 407 GROUP(psram_clkn, 1), 408 GROUP(psram_clkp, 1), 409 GROUP(psram_ce_n, 1), 410 GROUP(psram_rst_n, 1), 411 GROUP(psram_adq0, 1), 412 GROUP(psram_adq1, 1), 413 GROUP(psram_adq2, 1), 414 GROUP(psram_adq3, 1), 415 GROUP(psram_adq4, 1), 416 GROUP(psram_adq5, 1), [all …]
|
H A D | pinctrl-meson8.c | 531 GROUP(sd_d0_a, 8, 5), 532 GROUP(sd_d1_a, 8, 4), 533 GROUP(sd_d2_a, 8, 3), 534 GROUP(sd_d3_a, 8, 2), 535 GROUP(sd_clk_a, 8, 1), 536 GROUP(sd_cmd_a, 8, 0), 538 GROUP(sdxc_d0_a, 5, 14), 539 GROUP(sdxc_d13_a, 5, 13), 540 GROUP(sdxc_d47_a, 5, 12), 541 GROUP(sdxc_clk_a, 5, 11), [all …]
|
H A D | pinctrl-meson8b.c | 444 GROUP(sd_d0_a, 8, 5), 445 GROUP(sd_d1_a, 8, 4), 446 GROUP(sd_d2_a, 8, 3), 447 GROUP(sd_d3_a, 8, 2), 448 GROUP(sdxc_d0_0_a, 5, 29), 449 GROUP(sdxc_d47_a, 5, 12), 450 GROUP(sdxc_d13_0_a, 5, 28), 451 GROUP(sd_clk_a, 8, 1), 452 GROUP(sd_cmd_a, 8, 0), 453 GROUP(xtal_32k_out, 3, 22), [all …]
|
H A D | pinctrl-meson-gxl.c | 414 GROUP(sdio_d0, 5, 31), 415 GROUP(sdio_d1, 5, 30), 416 GROUP(sdio_d2, 5, 29), 417 GROUP(sdio_d3, 5, 28), 418 GROUP(sdio_clk, 5, 27), 419 GROUP(sdio_cmd, 5, 26), 420 GROUP(sdio_irq, 5, 24), 421 GROUP(uart_tx_a, 5, 19), 422 GROUP(uart_rx_a, 5, 18), 423 GROUP(uart_cts_a, 5, 17), [all …]
|
H A D | pinctrl-meson-gxbb.c | 440 GROUP(sdio_d0, 8, 5), 441 GROUP(sdio_d1, 8, 4), 442 GROUP(sdio_d2, 8, 3), 443 GROUP(sdio_d3, 8, 2), 444 GROUP(sdio_cmd, 8, 1), 445 GROUP(sdio_clk, 8, 0), 446 GROUP(sdio_irq, 8, 11), 447 GROUP(uart_tx_a, 4, 13), 448 GROUP(uart_rx_a, 4, 12), 449 GROUP(uart_cts_a, 4, 11), [all …]
|
/linux/drivers/infiniband/core/ |
H A D | multicast.c | 117 struct mcast_group *group; member 133 struct mcast_group *group; in mcast_find() local 137 group = rb_entry(node, struct mcast_group, node); in mcast_find() 138 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find() 140 return group; in mcast_find() 151 struct mcast_group *group, in mcast_insert() argument 163 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert() 164 sizeof group->rec.mgid); in mcast_insert() 174 rb_link_node(&group->node, parent, link); in mcast_insert() 175 rb_insert_color(&group->node, &port->table); in mcast_insert() [all …]
|
/linux/fs/notify/ |
H A D | notification.c | 7 * Basic idea behind the notification queue: An fsnotify group (like inotify) 10 * event to the group notify queue. Since a single event might need to be on 11 * multiple group's notification queues we can't add the event directly to each 17 * another group a new event_holder (from fsnotify_event_holder_cachep) will be 50 void fsnotify_destroy_event(struct fsnotify_group *group, in fsnotify_destroy_event() argument 53 /* Overflow events are per-group and we don't want to free them */ in fsnotify_destroy_event() 54 if (!event || event == group->overflow_event) in fsnotify_destroy_event() 63 spin_lock(&group->notification_lock); in fsnotify_destroy_event() 65 spin_unlock(&group->notification_lock); in fsnotify_destroy_event() 67 group->ops->free_event(group, event); in fsnotify_destroy_event() [all …]
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_sched.c | 41 * The scheduling happens at the scheduling group level, each group 50 * rotating the groups passed to the firmware so every group gets 66 * queue ring-buffer, and the group is scheduled for execution if it 69 * Kernel-side group scheduling is timeslice-based. When we have less 72 * groups than slots, we let each group a chance to execute stuff for 74 * to schedule. The group selection algorithm is based on 79 * group/queue state that would be based on information we wouldn't have 81 * reason we don't do 'cooperative' scheduling (encoding FW group slot 83 * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as 102 * struct panthor_csg_slot - Command stream group slot [all …]
|
/linux/arch/sparc/kernel/ |
H A D | hvapi.c | 19 unsigned long group; member 28 { .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API }, 29 { .group = HV_GRP_CORE, .flags = FLAG_PRE_API }, 30 { .group = HV_GRP_INTR, }, 31 { .group = HV_GRP_SOFT_STATE, }, 32 { .group = HV_GRP_TM, }, 33 { .group = HV_GRP_PCI, .flags = FLAG_PRE_API }, 34 { .group = HV_GRP_LDOM, }, 35 { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API }, 36 { .group = HV_GRP_NCS, .flags = FLAG_PRE_API }, [all …]
|
/linux/Documentation/filesystems/ext4/ |
H A D | blockgroup.rst | 6 The layout of a standard block group is approximately as follows (each 13 * - Group 0 Padding 15 - Group Descriptors 30 For the special case of block group 0, the first 1024 bytes are unused, 37 The ext4 driver primarily works with the superblock and the group 38 descriptors that are found in block group 0. Redundant copies of the 39 superblock and group descriptors are written to some of the block groups 42 paragraph for more details). If the group does not have a redundant 43 copy, the block group begins with the data block bitmap. Note also that 45 GDT block” space after the block group descriptors and before the start [all …]
|
/linux/tools/testing/selftests/drivers/net/netdevsim/ |
H A D | nexthop.sh | 158 $IP nexthop add id 10 group 1/2 159 nexthop_check "id 10" "id 10 group 1/2 trap" 160 check_err $? "Unexpected nexthop group entry" 169 $IP nexthop add id 10 group 1,20/2,39 170 nexthop_check "id 10" "id 10 group 1,20/2,39 trap" 171 check_err $? "Unexpected weighted nexthop group entry" 180 log_test "Nexthop group add and delete" 194 $IP nexthop add id 10 group 1/2 &> /dev/null 195 check_fail $? "Nexthop group addition succeeded when should fail" 200 log_test "Nexthop group add failure" [all …]
|
/linux/drivers/clk/renesas/ |
H A D | clk-mstp.c | 32 * struct mstp_clock_group - MSTP gating clocks group 34 * @data: clock specifier translation for clocks in this group 39 * @clks: clocks in this group 54 * @group: MSTP clocks group 59 struct mstp_clock_group *group; member 64 static inline u32 cpg_mstp_read(struct mstp_clock_group *group, in cpg_mstp_read() argument 67 return group->width_8bit ? readb(reg) : readl(reg); in cpg_mstp_read() 70 static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val, in cpg_mstp_write() argument 73 group->width_8bit ? writeb(val, reg) : writel(val, reg); in cpg_mstp_write() 79 struct mstp_clock_group *group = clock->group; in cpg_mstp_clock_endisable() local [all …]
|
/linux/drivers/gpio/ |
H A D | gpio-lpc32xx.c | 168 static inline u32 gpreg_read(struct lpc32xx_gpio_chip *group, unsigned long offset) in gpreg_read() argument 170 return __raw_readl(group->reg_base + offset); in gpreg_read() 173 static inline void gpreg_write(struct lpc32xx_gpio_chip *group, u32 val, unsigned long offset) in gpreg_write() argument 175 __raw_writel(val, group->reg_base + offset); in gpreg_write() 178 static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group, in __set_gpio_dir_p012() argument 182 gpreg_write(group, GPIO012_PIN_TO_BIT(pin), in __set_gpio_dir_p012() 183 group->gpio_grp->dir_clr); in __set_gpio_dir_p012() 185 gpreg_write(group, GPIO012_PIN_TO_BIT(pin), in __set_gpio_dir_p012() 186 group->gpio_grp->dir_set); in __set_gpio_dir_p012() 189 static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group, in __set_gpio_dir_p3() argument [all …]
|
/linux/tools/testing/selftests/net/ |
H A D | fib_nexthops.sh | 347 run_cmd "$IP nexthop add id $grpidstr group $grpstr" 348 check_nexthop "id $grpidstr" "id $grpidstr group $grpstr" 369 # create a resilient group with $buckets buckets and dump them 371 run_cmd "$IP nexthop add id 1000 group 100 type resilient buckets $buckets" 437 echo "SKIP: iproute2 too old, missing resilient nexthop group support" 455 # create group with multiple nexthops 458 run_cmd "$IP nexthop add id 102 group 61/62 fdb" 459 check_nexthop "id 102" "id 102 group 61/62 fdb" 460 log_test $? 0 "Fdb Nexthop group with multiple nexthops" 462 ## get nexthop group [all …]
|
/linux/kernel/time/ |
H A D | timer_migration.c | 22 * lowest level group contains CPUs, the next level groups of CPU groups 43 * Each group has a designated migrator CPU/group as long as a CPU/group is 44 * active in the group. This designated role is necessary to avoid that all 45 * active CPUs in a group try to migrate expired timers from other CPUs, 48 * When a CPU is awake, it checks in it's own timer tick the group 53 * If it finds expired timers in one of the group queues it pulls them over 55 * group and the parent groups if required. 60 * CPU does not queue an event in the LVL0 group. If the next migratable 62 * in the LVL0 group. In both cases the CPU marks itself idle in the LVL0 63 * group. [all …]
|
/linux/include/trace/events/ |
H A D | timer_migration.h | 11 /* Group events */ 14 TP_PROTO(struct tmigr_group *group), 16 TP_ARGS(group), 19 __field( void *, group ) 25 __entry->group = group; 26 __entry->lvl = group->level; 27 __entry->numa_node = group->numa_node; 30 TP_printk("group=%p lvl=%d numa=%d", 31 __entry->group, __entry->lvl, __entry->numa_node) 58 TP_printk("group=%p groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d", [all …]
|