Home
last modified time | relevance | path

Searched full:rings (Results 1 – 25 of 472) sorted by relevance

12345678910>>...19

/linux/tools/testing/selftests/drivers/net/
H A Dhds.py16 rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
19 if 'tcp-data-split' not in rings:
21 return rings['tcp-data-split']
37 rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
41 if 'tx' not in rings:
45 ethtool(f"--disable-netlink -G {cfg.ifname} tx {rings['tx'] // 2}")
47 ethtool(f"--disable-netlink -G {cfg.ifname} tx {rings['tx'] * 2}")
48 defer(ethtool, f"-G {cfg.ifname} tx {rings['tx']}")
57 rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
60 if 'hds-thresh' not in rings:
[all …]
/linux/io_uring/
H A Dregister.c161 /* Restrictions allowed only if rings started disabled */ in io_register_restrictions()
374 struct io_rings *rings;
440 n.rings = io_region_get_ptr(&n.ring_region); in io_register_resize_rings()
443 * At this point n.rings is shared with userspace, just like o.rings in io_register_resize_rings()
450 WRITE_ONCE(n.rings->sq_ring_mask, p.sq_entries - 1); in io_register_resize_rings()
451 WRITE_ONCE(n.rings->cq_ring_mask, p.cq_entries - 1); in io_register_resize_rings()
452 WRITE_ONCE(n.rings->sq_ring_entries, p.sq_entries); in io_register_resize_rings()
453 WRITE_ONCE(n.rings->cq_ring_entries, p.cq_entries); in io_register_resize_rings()
495 * existing rings beyon in io_register_resize_rings()
376 struct io_rings *rings; global() member
[all...]
H A Dio_uring.c202 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); in __io_cqring_events()
207 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); in __io_cqring_events_user()
599 /* IOPOLL rings only need to wake up if it's also SQPOLL */ in __io_cq_unlock_post()
665 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); in __io_cqring_overflow_flush()
672 if (ctx->rings) in io_cqring_overflow_kill()
725 struct io_rings *r = ctx->rings; in io_cqring_add_overflow()
738 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); in io_cqring_add_overflow()
781 struct io_uring_cqe *cqe = &ctx->rings->cqes[off]; in io_fill_nop_cqe()
799 struct io_rings *rings in io_cqe_cache_refill() local
2368 struct io_rings *rings = ctx->rings; io_commit_sqring() local
2646 struct io_rings *rings = ctx->rings; io_cqring_wait() local
2778 struct io_rings *rings; rings_size() local
3598 struct io_rings *rings; io_allocate_scq_urings() local
[all...]
/linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
H A Dflowring.c142 if (flow->rings[i] == NULL) in brcmf_flowring_create()
160 flow->rings[i] = ring; in brcmf_flowring_create()
172 ring = flow->rings[flowid]; in brcmf_flowring_tid()
192 ring = flow->rings[flowid]; in brcmf_flowring_block()
201 if ((flow->rings[i]) && (i != flowid)) { in brcmf_flowring_block()
202 ring = flow->rings[i]; in brcmf_flowring_block()
212 flow->rings[flowid]->blocked = blocked; in brcmf_flowring_block()
236 ring = flow->rings[flowid]; in brcmf_flowring_delete()
247 flow->rings[flowid] = NULL; in brcmf_flowring_delete()
264 ring = flow->rings[flowid]; in brcmf_flowring_enqueue()
[all …]
/linux/Documentation/mhi/
H A Dmhi.rst58 Transfer rings: Used by the host to schedule work items for a channel. The
59 transfer rings are organized as a circular queue of Transfer Descriptors (TD).
64 Event rings: Used by the device to send completion and state transition messages
70 Command rings: Used by the host to send MHI commands to the device. The command
71 rings are organized as a circular queue of Command Descriptors (CD).
81 Two unidirectional channels with their associated transfer rings form a
87 Transfer rings
91 Transfer Descriptors (TD). TDs are managed through transfer rings, which are
101 Below is the basic usage of transfer rings:
110 buffer information, increments the WP to the next element and rings the
[all …]
/linux/net/9p/
H A Dtrans_xen.c57 struct xen_9pfs_dataring *rings; member
134 ring = &priv->rings[num]; in p9_xen_request()
282 struct xen_9pfs_dataring *ring = &priv->rings[i]; in xen_9pfs_front_free()
286 if (!priv->rings[i].intf) in xen_9pfs_front_free()
288 if (priv->rings[i].irq > 0) in xen_9pfs_front_free()
289 unbind_from_irqhandler(priv->rings[i].irq, ring); in xen_9pfs_front_free()
290 if (priv->rings[i].data.in) { in xen_9pfs_front_free()
292 j < (1 << priv->rings[i].intf->ring_order); in xen_9pfs_front_free()
296 ref = priv->rings[i].intf->ref[j]; in xen_9pfs_front_free()
299 free_pages_exact(priv->rings[i].data.in, in xen_9pfs_front_free()
[all …]
/linux/drivers/soc/ti/
H A Dk3-ringacc.c153 * @use_count: Use count for shared rings
187 * struct k3_ringacc - Rings accelerator descriptor
194 * @rm_gp_range: general purpose rings range from tisci
198 * @rings: array of rings descriptors (struct @k3_ring)
200 * @req_lock: protect rings allocation
202 * @tisci_ring_ops: ti-sci rings ops
211 u32 num_rings; /* number of rings in Ringacc module */
219 struct k3_ring *rings; member
221 struct mutex req_lock; /* protect rings allocation */
232 * struct k3_ringacc_soc_data - Rings accelerator SoC data
[all …]
/linux/tools/net/ynl/samples/
H A Dethtool.c16 struct ethtool_rings_get_list *rings; in main() local
42 rings = ethtool_rings_get_dump(ys, &rreq); in main()
43 if (!rings) in main()
46 printf("Rings:\n"); in main()
47 ynl_dump_foreach(rings, dev) { in main()
55 ethtool_rings_get_list_free(rings); in main()
/linux/Documentation/devicetree/bindings/soc/ti/
H A Dk3-ringacc.yaml56 ti,num-rings:
58 description: Number of rings supported by RA
60 ti,sci-rm-range-gp-rings:
69 - ti,num-rings
70 - ti,sci-rm-range-gp-rings
90 ti,num-rings = <818>;
91 ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
/linux/Documentation/networking/
H A Daf_xdp.rst24 syscall. Associated with each XSK are two rings: the RX ring and the
26 packets on the TX ring. These rings are registered and sized with the
28 to have at least one of these rings for each socket. An RX or TX
37 one of the rings references a frame by referencing its addr. The addr
42 UMEM also has two rings: the FILL ring and the COMPLETION ring. The
50 TX ring. In summary, the RX and FILL rings are used for the RX path
51 and the TX and COMPLETION rings are used for the TX path.
59 corresponding two rings, sets the XDP_SHARED_UMEM flag in the bind
65 process has to create its own socket with associated RX and TX rings,
67 reason that there is only one set of FILL and COMPLETION rings per
[all …]
/linux/drivers/crypto/intel/qat/qat_common/
H A Dadf_transport.c267 ring = &bank->rings[ring_num]; in adf_create_ring()
338 adf_handle_response(&bank->rings[i]); in adf_ring_response_handler()
404 /* Allocate the rings in the bank */ in adf_init_bank()
406 bank->rings = kzalloc_node(size, GFP_KERNEL, in adf_init_bank()
408 if (!bank->rings) in adf_init_bank()
425 ring = &bank->rings[i]; in adf_init_bank()
436 "Invalid tx rings mask config\n"); in adf_init_bank()
439 tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; in adf_init_bank()
456 ring = &bank->rings[i]; in adf_init_bank()
460 kfree(bank->rings); in adf_init_bank()
[all …]
/linux/tools/testing/selftests/net/
H A Dtoeplitz.c104 static struct ring_state rings[RSS_MAX_CPUS]; variable
242 /* simple test: sleep once unconditionally and then process all rings */
250 do {} while (recv_block(&rings[i])); in process_rings()
355 /* block packets until all rings are added to the fanout group: in create_ring()
404 rings[i].cpu = i; in setup_rings()
405 rings[i].fd = create_ring(&rings[i].mmap); in setup_rings()
408 /* accept packets once all rings in the fanout group are up */ in setup_rings()
410 set_filter(rings[i].fd); in setup_rings()
418 if (munmap(rings[i].mmap, ring_block_nr * ring_block_sz)) in cleanup_rings()
420 if (close(rings[i].fd)) in cleanup_rings()
H A Dpsock_fanout.c263 static int sock_fanout_read(int fds[], char *rings[], const int expect[]) in sock_fanout_read() argument
267 ret[0] = sock_fanout_read_ring(fds[0], rings[0]); in sock_fanout_read()
268 ret[1] = sock_fanout_read_ring(fds[1], rings[1]); in sock_fanout_read()
482 char *rings[2]; in test_datapath() local
501 rings[0] = sock_fanout_open_ring(fds[0]); in test_datapath()
502 rings[1] = sock_fanout_open_ring(fds[1]); in test_datapath()
505 sock_fanout_read(fds, rings, expect0); in test_datapath()
510 ret = sock_fanout_read(fds, rings, expect1); in test_datapath()
515 ret |= sock_fanout_read(fds, rings, expect2); in test_datapath()
517 if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) || in test_datapath()
[all …]
/linux/drivers/net/ethernet/intel/igc/
H A Digc_dump.c111 /* igc_rings_dump - Tx-rings and Rx-rings */
133 netdev_info(netdev, "TX Rings Summary\n"); in igc_rings_dump()
149 /* Print TX Rings */ in igc_rings_dump()
153 netdev_info(netdev, "TX Rings Dump\n"); in igc_rings_dump()
209 /* Print RX Rings Summary */ in igc_rings_dump()
211 netdev_info(netdev, "RX Rings Summary\n"); in igc_rings_dump()
219 /* Print RX Rings */ in igc_rings_dump()
223 netdev_info(netdev, "RX Rings Dump\n"); in igc_rings_dump()
/linux/drivers/crypto/caam/
H A DKconfig40 Enables the driver module for Job Rings which are part of
55 Select size of Job Rings as a power of 2, within the
123 number of job rings assigned to the kernel. The number of portals
125 job rings.
/linux/drivers/net/ethernet/cavium/liquidio/
H A Docteon_device.c80 /* Num of desc for rx rings */
83 /* Num of desc for tx rings */
109 /* Num of desc for rx rings */
112 /* Num of desc for tx rings */
188 /* Num of desc for rx rings */
191 /* Num of desc for tx rings */
217 /* Num of desc for rx rings */
220 /* Num of desc for tx rings */
246 /* Num of desc for rx rings */
249 /* Num of desc for tx rings */
[all …]
/linux/include/linux/soc/ti/
H A Dk3-ringacc.h108 * used to access ring memory. Sopported only for rings in
129 * k3_ringacc_ring_reset - ring reset for DMA rings
132 * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings
133 * which are read by K3 UDMA, like TX or Free Host PD rings.
259 * struct struct k3_ringacc_init_data - Initialization data for DMA rings
/linux/drivers/crypto/inside-secure/
H A Dsafexcel.c29 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
36 * Map all interfaces/rings to register index 0 in eip197_trc_cache_setupvirt()
51 for (i = 0; i < priv->config.rings; i++) { in eip197_trc_cache_setupvirt()
510 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_cdesc_rings()
558 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_rdesc_rings()
600 priv->config.pes, priv->config.rings); in safexcel_hw_init()
660 /* enable HIA input interface arbiter and rings */ in safexcel_hw_init()
662 GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
719 /* Command Descriptor Rings prepare */ in safexcel_hw_init()
720 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
[all …]
/linux/include/linux/
H A Dmhi_ep.h93 * @event_lock: Lock for protecting event rings
98 * @wq: Dedicated workqueue for handling rings and state changes
101 * @cmd_ring_work: Worker for processing command rings
102 * @ch_ring_work: Worker for processing channel rings
113 * @event_rings: Number of event rings supported by the endpoint controller
114 * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
/linux/drivers/block/xen-blkback/
H A Dxenbus.c84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file) in xen_update_blkif_status()
110 ring = &blkif->rings[i]; in xen_update_blkif_status()
124 ring = &blkif->rings[i]; in xen_update_blkif_status()
134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), in xen_blkif_alloc_rings()
136 if (!blkif->rings) in xen_blkif_alloc_rings()
140 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_alloc_rings()
274 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_disconnect()
335 * blkif->rings was allocated in connect_ring, so we should free it in in xen_blkif_disconnect()
338 kfree(blkif->rings); in xen_blkif_disconnect()
339 blkif->rings = NULL; in xen_blkif_disconnect()
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ring_mux.h34 * struct amdgpu_mux_entry - the entry recording software rings copying information.
74 /*the lock for copy data from different software rings*/
85 * struct amdgpu_mux_chunk - save the location of indirect buffer's package on softare rings.
/linux/drivers/net/ethernet/aquantia/atlantic/
H A Daq_vec.h8 /* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings.
9 * Declaration of functions for Rx and Tx rings.
/linux/tools/perf/pmu-events/arch/x86/broadwellde/
H A Dother.json23 "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
27 …ription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
/linux/tools/perf/pmu-events/arch/x86/broadwellx/
H A Dother.json23 "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
27 …ription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
/linux/tools/perf/pmu-events/arch/x86/broadwell/
H A Dother.json23 "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
27 …ription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",

12345678910>>...19