Home
last modified time | relevance | path

Searched full:queues (Results 1 – 25 of 1470) sorted by relevance

12345678910>>...59

/linux/tools/testing/selftests/drivers/net/
H A Dqueues.py12 folders = glob.glob(f'/sys/class/net/{ifname}/queues/rx-*')
17 queues = nl.queue_get({'ifindex': cfg.ifindex}, dump=True)
18 if queues:
19 return len([q for q in queues if q['type'] == 'rx'])
24 queues = nl_get_queues(cfg, nl)
25 if not queues:
29 ksft_eq(queues, expected)
33 queues = nl_get_queues(cfg, nl)
34 if not queues:
50 queues = nl_get_queues(cfg, nl)
[all …]
/linux/Documentation/networking/
H A Dmulti-pf-netdev.rst63 Each combined channel works against one specific PF, creating all its datapath queues against it. We
126 that is capable of pointing to the receive queues of a different PF.
142 - /sys/class/net/eth2/queues/tx-0/xps_cpus:000001
143 - /sys/class/net/eth2/queues/tx-1/xps_cpus:001000
144 - /sys/class/net/eth2/queues/tx-2/xps_cpus:000002
145 - /sys/class/net/eth2/queues/tx-3/xps_cpus:002000
146 - /sys/class/net/eth2/queues/tx-4/xps_cpus:000004
147 - /sys/class/net/eth2/queues/tx-5/xps_cpus:004000
148 - /sys/class/net/eth2/queues/tx-6/xps_cpus:000008
149 - /sys/class/net/eth2/queues/tx-7/xps_cpus:008000
[all …]
H A Dscaling.rst27 Contemporary NICs support multiple receive and transmit descriptor queues
29 queues to distribute processing among CPUs. The NIC distributes packets by
62 Some advanced NICs allow steering packets to queues based on
72 module parameter for specifying the number of hardware queues to
75 for each CPU if the device supports enough queues, or otherwise at least
81 default mapping is to distribute the queues evenly in the table, but the
84 indirection table could be done to give different queues different
95 of queues to IRQs can be determined from /proc/interrupts. By default,
110 is to allocate as many queues as there are CPUs in the system (or the
112 is likely the one with the smallest number of receive queues where no
[all …]
/linux/Documentation/networking/device_drivers/ethernet/huawei/
H A Dhinic.rst55 Asynchronous Event Queues(AEQs) - The event queues for receiving messages from
69 Completion Event Queues(CEQs) - The completion Event Queues that describe IO
72 Work Queues(WQ) - Contain the memory and operations for use by CMD queues and
77 Command Queues(CMDQ) - The queues for sending commands for IO management and is
82 Queue Pairs(QPs) - The HW Receive and Send queues for Receiving and Transmitting
104 Tx Queues - Logical Tx Queues that use the HW Send Queues for transmit.
108 Rx Queues - Logical Rx Queues that use the HW Receive Queues for receive.
112 hinic_dev - de/constructs the Logical Tx and Rx Queues.
/linux/Documentation/ABI/testing/
H A Dsysfs-class-net-queues1 What: /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
11 What: /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
19 What: /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
27 What: /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
35 What: /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
45 What: /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
56 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
65 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
73 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
82 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
[all …]
/linux/net/sched/
H A Dsch_multiq.c25 struct Qdisc **queues; member
54 return q->queues[0]; in multiq_classify()
56 return q->queues[band]; in multiq_classify()
105 qdisc = q->queues[q->curband]; in multiq_dequeue()
137 qdisc = q->queues[curband]; in multiq_peek()
154 qdisc_reset(q->queues[band]); in multiq_reset()
166 qdisc_put(q->queues[band]); in multiq_destroy()
168 kfree(q->queues); in multiq_destroy()
196 if (q->queues[i] != &noop_qdisc) { in multiq_tune()
197 struct Qdisc *child = q->queues[ in multiq_tune()
[all...]
H A Dsch_prio.c26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member
57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
63 return q->queues[q->prio2band[0]]; in prio_classify()
65 return q->queues[band]; in prio_classify()
103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
117 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue()
137 qdisc_reset(q->queues[prio]); in prio_reset()
173 qdisc_put(q->queues[prio]); in prio_destroy()
180 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local
198 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in prio_tune()
[all …]
/linux/Documentation/devicetree/bindings/soc/ti/
H A Dkeystone-navigator-qmss.txt9 management of the packet queues. Packets are queued/de-queued by writing or
32 -- managed-queues : the actual queues managed by each queue manager
33 instance, specified as <"base queue #" "# of queues">.
51 - qpend : pool of qpend(interruptible) queues
52 - general-purpose : pool of general queues, primarily used
53 as free descriptor queues or the
54 transmit DMA queues.
55 - accumulator : pool of queues on PDSP accumulator channel
57 -- qrange : number of queues to use per queue range, specified as
58 <"base queue #" "# of queues">.
[all …]
/linux/tools/testing/selftests/net/
H A Dncdevmem.c161 struct netdev_queue_id *queues, in bind_rx_queue() argument
177 __netdev_bind_rx_req_set_queues(req, queues, n_queue_index); in bind_rx_queue()
243 struct netdev_queue_id *queues; in do_server() local
269 /* Configure RSS to divert all traffic from our devmem queues */ in do_server()
279 queues = malloc(sizeof(*queues) * num_queues); in do_server()
282 queues[i]._present.type = 1; in do_server()
283 queues[i]._present.id = 1; in do_server()
284 queues[i].type = NETDEV_QUEUE_TYPE_RX; in do_server()
285 queues[i].id = start_queue + i; in do_server()
288 if (bind_rx_queue(ifindex, buf, queues, num_queues, &ys)) in do_server()
[all …]
/linux/drivers/net/wireless/silabs/wfx/
H A Dqueue.c233 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; in wfx_tx_queues_get_skb() local
239 /* sort the queues */ in wfx_tx_queues_get_skb()
243 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb()
244 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb()
246 if (wfx_tx_queue_get_weight(queues[j]) < in wfx_tx_queues_get_skb()
247 wfx_tx_queue_get_weight(queues[j - 1])) in wfx_tx_queues_get_skb()
248 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb()
256 skb = skb_dequeue(&queues[i]->offchan); in wfx_tx_queues_get_skb()
264 atomic_inc(&queues[i]->pending_frames); in wfx_tx_queues_get_skb()
265 trace_queues_stats(wdev, queues[i]); in wfx_tx_queues_get_skb()
[all …]
/linux/tools/testing/selftests/drivers/net/hw/
H A Drss_ctx.py60 # Get Rx packet counts for all queues, as a simple list of integers
77 # - "target": required, which queues we expect to get iperf traffic
78 # - "empty": optional, which queues should see no traffic at all
79 # - "noise": optional, which queues we expect to see low traffic;
80 # used for queues of the main context, since some background
81 # OS activity may use those queues while we're testing
93 f"traffic on other queues ({name})':" + str(cnts))
96 f"traffic on inactive queues ({name}): " + str(cnts))
104 KsftSkipEx("Device has fewer than 3 queues (or doesn't support queue stats)")
148 # Check we only get traffic on the first 2 queues
[all …]
/linux/Documentation/block/
H A Dblk-mq.rst37 spawns multiple queues with individual entry points local to the CPU, removing
49 blk-mq has two group of queues: software staging queues and hardware dispatch
50 queues. When the request arrives at the block layer, it will try the shortest
56 Then, after the requests are processed by software queues, they will be placed
62 Software staging queues
65 The block IO subsystem adds requests in the software staging queues
71 the number of queues is defined by a per-CPU or per-node basis.
93 requests from different queues, otherwise there would be cache trashing and a
99 queue (a.k.a. run the hardware queue), the software queues mapped to that
102 Hardware dispatch queues
[all …]
/linux/drivers/net/ethernet/intel/idpf/
H A Dvirtchnl2.h92 * In the split queue model, hardware uses transmit completion queues to post
94 * descriptor queues to post descriptors to hardware.
96 * queue, while software uses receive buffer queues to post buffers to hardware.
457 * @max_rx_q: Maximum number of supported Rx queues.
458 * @max_tx_q: Maximum number of supported Tx queues.
459 * @max_rx_bufq: Maximum number of supported buffer queues.
460 * @max_tx_complq: Maximum number of supported completion queues.
531 * @num_queues: Number of queues in the chunk.
550 * queues.
567 * @num_tx_q: Number of Tx queues.
[all …]
H A Didpf.h173 * @max_rxq: Maximum number of RX queues supported
174 * @max_txq: Maixmum number of TX queues supported
175 * @max_bufq: In splitq, maximum number of buffer queues supported
176 * @max_complq: In splitq, maximum number of completion queues supported
226 * @IDPF_VPORT_DEL_QUEUES: To send delete queues message
252 * @num_txq: Number of allocated TX queues
253 * @num_complq: Number of allocated completion queues
262 * @num_rxq: Number of allocated RX queues
263 * @num_bufq: Number of allocated buffer queues
265 * to complete all buffer descriptors for all buffer queues in
[all …]
/linux/drivers/nvme/target/
H A Dloop.c30 struct nvme_loop_queue *queues; member
71 return queue - queue->ctrl->queues; in nvme_loop_queue_idx()
176 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event()
198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod()
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx()
242 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx()
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) in nvme_loop_destroy_admin_queue()
275 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue()
292 kfree(ctrl->queues); in nvme_loop_free_ctrl()
303 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_destroy_io_queues()
[all …]
/linux/drivers/scsi/aacraid/
H A Dcomminit.c237 * Fill in addresses of the Comm Area Headers and Queues in aac_alloc_comm()
373 struct aac_entry * queues; in aac_comm_init() local
375 struct aac_queue_block * comm = dev->queues; in aac_comm_init()
394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init()
397 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init()
399 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init()
403 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init()
406 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init()
410 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init()
413 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init()
[all …]
/linux/drivers/media/platform/nxp/imx8-isi/
H A Dimx8-isi-m2m.c52 /* Protects the m2m vb2 queues */
58 } queues; member
85 return &ctx->queues.out; in mxc_isi_m2m_ctx_qdata()
87 return &ctx->queues.cap; in mxc_isi_m2m_ctx_qdata()
112 src_vbuf->sequence = ctx->queues.out.sequence++; in mxc_isi_m2m_frame_write_done()
113 dst_vbuf->sequence = ctx->queues.cap.sequence++; in mxc_isi_m2m_frame_write_done()
135 .width = ctx->queues.out.format.width, in mxc_isi_m2m_device_run()
136 .height = ctx->queues.out.format.height, in mxc_isi_m2m_device_run()
139 .width = ctx->queues.cap.format.width, in mxc_isi_m2m_device_run()
140 .height = ctx->queues.cap.format.height, in mxc_isi_m2m_device_run()
[all …]
/linux/drivers/scsi/snic/
H A Dvnic_resource.h13 RES_TYPE_WQ, /* Work queues */
14 RES_TYPE_RQ, /* Receive queues */
15 RES_TYPE_CQ, /* Completion queues */
31 RES_TYPE_MQ_WQ, /* MQ Work queues */
32 RES_TYPE_MQ_RQ, /* MQ Receive queues */
33 RES_TYPE_MQ_CQ, /* MQ Completion queues */
/linux/Documentation/arch/arm/keystone/
H A Dknav-qmss.rst15 management of the packet queues. Packets are queued/de-queued by writing or
24 knav qmss driver provides a set of APIs to drivers to open/close qmss queues,
25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
31 Accumulator QMSS queues using PDSP firmware
34 queue or multiple contiguous queues. drivers/soc/ti/knav_qmss_acc.c is the
37 1 or 32 queues per channel. More description on the firmware is available in
56 Use of accumulated queues requires the firmware image to be present in the
57 file system. The driver doesn't acc queues to the supported queue range if
/linux/drivers/scsi/fnic/
H A Dvnic_resource.h15 RES_TYPE_WQ, /* Work queues */
16 RES_TYPE_RQ, /* Receive queues */
17 RES_TYPE_CQ, /* Completion queues */
33 RES_TYPE_MQ_WQ, /* MQ Work queues */
34 RES_TYPE_MQ_RQ, /* MQ Receive queues */
35 RES_TYPE_MQ_CQ, /* MQ Completion queues */
/linux/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_lib.c216 /* FCoE uses a linear block of queues so just assigning 1:1 */ in ixgbe_cache_ring_sriov()
237 /* FCoE uses a linear block of queues so just assigning 1:1 */ in ixgbe_cache_ring_sriov()
302 int queues; in ixgbe_xdp_queues() local
304 queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids); in ixgbe_xdp_queues()
305 return adapter->xdp_prog ? queues : 0; in ixgbe_xdp_queues()
317 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
320 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
321 * and VM pools where appropriate. Also assign queues based on DCB
343 /* limit VMDq instances on the PF by number of Tx queues */ in ixgbe_set_dcb_sriov_queues()
360 /* queues in the remaining pools are available for FCoE */ in ixgbe_set_dcb_sriov_queues()
[all …]
/linux/tools/testing/selftests/tc-testing/tc-tests/qdiscs/
H A Dtaprio.json4 "name": "Add taprio Qdisc to multi-queue device (8 queues)",
15 "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 flags 0x1 clockid CLOCK_TAI",
37 "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 sched-entry S 03 300000 sched-entry S 04 400000 flags 0x1 clockid CLOCK_TAI",
59 "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 flags 0x1 txtime-delay 500000 clockid CLOCK_TAI",
80 "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 flags 0x1 clockid CLOCK_TAI"
104 "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 flags 0x1 clockid CLOCK_TAI",
126 "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 flags 0x1 clockid CLOCK_TAI",
148 "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 300 sched-entry S 02 1700 clockid CLOCK_TAI",
170 "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 clockid CLOCK_TAI",
191 "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues
[all...]
/linux/drivers/net/ethernet/cisco/enic/
H A Dvnic_resource.h21 RES_TYPE_WQ, /* Work queues */
22 RES_TYPE_RQ, /* Receive queues */
23 RES_TYPE_CQ, /* Completion queues */
39 RES_TYPE_MQ_WQ, /* MQ Work queues */
40 RES_TYPE_MQ_RQ, /* MQ Receive queues */
41 RES_TYPE_MQ_CQ, /* MQ Completion queues */
/linux/Documentation/networking/device_drivers/ethernet/google/
H A Dgve.rst47 - Transmit and Receive Queues
109 The handler for the management irq simply queues the service task in
115 the queues associated with that interrupt.
118 and poll the queues.
120 GQI Traffic Queues
122 GQI queues are composed of a descriptor ring and a buffer and are assigned to a
145 DQO Traffic Queues
149 - TX and RX buffers queues, which send descriptors to the device, use MMIO
152 - RX and TX completion queues, which receive descriptors from the device, use a
160 queues are not overrun. This can be accomplished by limiting the number of
[all …]
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device_queue_manager.h91 * @exeute_queues: Dispatches the queues list to the H/W.
99 * @start: Initializes the resources/modules the device needs for queues
109 * @halt: This routine unmaps queues from runlist and set halt status to true
110 * so no more queues will be mapped to runlist until unhalt.
112 * @unhalt: This routine unset halt status to flase and maps queues back to
122 * @process_termination: Clears all process queues belongs to that device.
124 * @evict_process_queues: Evict all active queues of a process
126 * @restore_process_queues: Restore all evicted queues of a process
131 * @reset_queues: reset queues which consume RAS poison
231 * This struct is a base class for the kfd queues scheduler in the
[all …]

12345678910>>...59