| /linux/drivers/soc/ti/ |
| H A D | knav_qmss_acc.c | 283 cmd->command, cmd->queue_mask, cmd->list_dma, in knav_acc_write() 289 writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask); in knav_acc_write() 308 u32 queue_mask; in knav_acc_setup_cmd() local 313 queue_mask = BIT(range->num_queues) - 1; in knav_acc_setup_cmd() 317 queue_mask = 0; in knav_acc_setup_cmd() 322 cmd->queue_mask = queue_mask; in knav_acc_setup_cmd()
|
| H A D | knav_qmss.h | 89 u32 queue_mask; member
|
| /linux/Documentation/networking/device_drivers/ethernet/intel/ |
| H A D | idpf.rst | 121 # ethtool --per-queue <ethX> queue_mask 0xa --coalesce adaptive-rx off 126 # ethtool --per-queue <ethX> queue_mask 0xa --show-coalesce
|
| H A D | ice.rst | 1159 # ethtool --per-queue <ethX> queue_mask 0xa --coalesce adaptive-rx off 1164 # ethtool --per-queue <ethX> queue_mask 0xa --show-coalesce
|
| /linux/drivers/net/ethernet/marvell/ |
| H A D | mv643xx_eth.c | 2255 u8 queue_mask; in mv643xx_eth_poll() local 2266 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; in mv643xx_eth_poll() 2268 queue_mask |= mp->work_rx_refill; in mv643xx_eth_poll() 2270 if (!queue_mask) { in mv643xx_eth_poll() 2276 queue = fls(queue_mask) - 1; in mv643xx_eth_poll() 2277 queue_mask = 1 << queue; in mv643xx_eth_poll() 2283 if (mp->work_tx_end & queue_mask) { in mv643xx_eth_poll() 2285 } else if (mp->work_tx & queue_mask) { in mv643xx_eth_poll() 2288 } else if (mp->work_rx & queue_mask) { in mv643xx_eth_poll() 2290 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { in mv643xx_eth_poll()
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_sched.c | 1423 u32 queue_mask = 0, i; in csg_slot_prog_locked() local 1444 queue_mask |= BIT(i); in csg_slot_prog_locked() 1472 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask); in csg_slot_prog_locked() 2672 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) in group_schedule_locked() argument 2683 if ((queue_mask & group->blocked_queues) == queue_mask) in group_schedule_locked() 2687 group->idle_queues &= ~queue_mask; in group_schedule_locked() 3361 u32 queue_mask = BIT(job->queue_idx); in queue_run_job() local 3363 (group->idle_queues & queue_mask) && in queue_run_job() 3364 !(group->blocked_queues & queue_mask) && in queue_run_job() 3368 group->idle_queues &= ~queue_mask; in queue_run_job()
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_gfx.c | 676 uint64_t queue_mask = ~0ULL; in amdgpu_gfx_mes_enable_kcq() local 690 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); in amdgpu_gfx_mes_enable_kcq() 715 uint64_t queue_mask = 0; in amdgpu_gfx_enable_kcq() local 731 if (WARN_ON(i > (sizeof(queue_mask)*8))) { in amdgpu_gfx_enable_kcq() 736 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); in amdgpu_gfx_enable_kcq() 754 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); in amdgpu_gfx_enable_kcq()
|
| H A D | gfx_v12_1.c | 98 uint64_t queue_mask) in gfx_v12_1_kiq_set_resources() argument 103 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ in gfx_v12_1_kiq_set_resources() 104 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ in gfx_v12_1_kiq_set_resources()
|
| H A D | gfx_v12_0.c | 293 uint64_t queue_mask) in gfx_v12_0_kiq_set_resources() argument 298 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ in gfx_v12_0_kiq_set_resources() 299 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ in gfx_v12_0_kiq_set_resources()
|
| H A D | gfx_v11_0.c | 356 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) in gfx11_kiq_set_resources() argument 368 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ in gfx11_kiq_set_resources() 369 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ in gfx11_kiq_set_resources()
|
| H A D | gfx_v9_0.c | 928 uint64_t queue_mask) in gfx_v9_0_kiq_set_resources() argument 942 lower_32_bits(queue_mask)); /* queue mask lo */ in gfx_v9_0_kiq_set_resources() 944 upper_32_bits(queue_mask)); /* queue mask hi */ in gfx_v9_0_kiq_set_resources()
|
| /linux/drivers/net/ethernet/cadence/ |
| H A D | macb_main.c | 4313 u32 queue_mask; in macb_taprio_setup_replace() local 4430 queue_mask = BIT_U32(bp->num_queues) - 1; in macb_taprio_setup_replace() 4432 queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); in macb_taprio_setup_replace() 4461 u32 queue_mask; in macb_taprio_destroy() local 4465 queue_mask = BIT_U32(bp->num_queues) - 1; in macb_taprio_destroy() 4470 queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); in macb_taprio_destroy() 4595 unsigned int queue_mask = 0x1; in macb_probe_queues() local 4600 queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF; in macb_probe_queues() 4602 queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF; in macb_probe_queues() 4604 if (fls(queue_mask) != ffz(queue_mask)) { in macb_probe_queues() [all …]
|
| /linux/net/sched/ |
| H A D | sch_taprio.c | 1441 u32 i, queue_mask = 0; in tc_map_to_queue_mask() local 1452 queue_mask |= GENMASK(offset + count - 1, offset); in tc_map_to_queue_mask() 1455 return queue_mask; in tc_map_to_queue_mask()
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_device_queue_manager.c | 1709 res.queue_mask = 0; in set_sched_resources() 1725 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { in set_sched_resources() 1730 res.queue_mask |= 1ull in set_sched_resources() 1740 res.vmid_mask, res.queue_mask); in set_sched_resources()
|
| H A D | kfd_priv.h | 649 uint64_t queue_mask; member
|