Lines Matching +full:tx +full:- +full:slots

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
48 * Some fields should be cache-aligned to reduce contention.
56 * --- Netmap data structures ---
68 +---->+---------------+
71 +----------------+ / | other fields |
75 | | / +---------------+
77 | txring_ofs[0] | (rel.to nifp)--' | flags, ptr |
78 | txring_ofs[1] | +---------------+
79 (tx+htx entries) (num_slots entries)
80 | txring_ofs[t] | | buf_idx, len | slot[n-1]
81 +----------------+ | flags, ptr |
82 | rxring_ofs[0] | +---------------+
86 +----------------+
92 * There is one netmap_ring per physical NIC ring, plus at least one tx/rx ring
93 * pair attached to the host stack (these pairs are unused for non-NIC ports).
96 * so that zero-copy can be implemented between them.
99 * The netmap_ring is the userspace-visible replica of the NIC ring.
100 * Each slot has the index of a buffer (MTU-sized and residing in the
101 * mmapped region), its length and some flags. An extra 64-bit pointer
102 * is provided for user-supplied buffers in the tx path.
113 * mapped, nifp->ni_bufs_head will be the index of the first buffer.
132 * netmap:foo^k the k-th host rings pair
134 * netmap:foo-k the k-th NIC rings pair
147 * NIC TX queue in netmap mode.
152 * ifconfig on FreeBSD or ethtool -K on Linux) for an interface that is being
195 * Slot released to the kernel (i.e. before ring->head) with
197 * thus restoring the host-NIC connection for these slots.
209 * (VALE tx rings only) data is in a userspace buffer,
217 * Set on all but the last slot of a multi-segment packet.
222 /* (monitor ports only) the packet comes from the TX
234 #define NS_RFRAGS(_slot) ( ((_slot)->flags >> 8) & 0xff)
246 * Netmap representation of a TX or RX ring (also known as "queue").
247 * This is a queue implemented as a fixed-size circular array.
250 * In TX rings:
257 * [head .. tail-1] can be used for new packets to send;
258 * 'head' and 'cur' must be incremented as slots are filled
261 * for new transmissions. XXX todo (2014-03-12)
270 * [head .. tail-1] contain received packets;
271 * 'head' and 'cur' must be incremented as slots are consumed
277 * The netmap_ring, and all slots and buffers in the range
278 * [head .. tail-1] are owned by the user program;
282 * Other slots and buffers are reserved for use by the kernel
291 const uint32_t num_slots; /* number of slots in the ring. */
294 const uint16_t dir; /* 0: tx, 1: rx */
305 * in the slots used to contain an offset in the buffer.
313 * account when specifying buffer-offsets in TX slots.
324 /* the slots follow. This struct has variable size */
325 struct netmap_slot slot[0]; /* array of slots. */
349 * Check if space is available in the ring. We use ring->head, which
351 * possible that the applications moves ring->cur ahead of ring->tail
352 * (e.g., by setting ring->cur <== ring->tail), if it wants more slots
359 return (ring->head == ring->tail); in nm_ring_empty()
381 * Physical NICs can have different numbers of tx and rx rings.
386 const uint32_t ni_tx_rings; /* number of HW tx rings */
390 const uint32_t ni_host_tx_rings; /* number of SW tx rings */
396 * - NIC tx rings (ni_tx_rings);
397 * - host tx rings (ni_host_tx_rings);
398 * - NIC rx rings (ni_rx_rings);
399 * - host rx ring (ni_host_rx_rings);
431 * Pointer to a command-specific struct, described by one
444 * On input, non-zero values may be used to reconfigure the port
454 * If nr_mode == NR_REG_ONE_NIC (only a single couple of TX/RX
455 * rings), indicate which NIC TX and/or RX ring is to be bound
456 * (0..nr_*x_rings-1).
461 * NR_NO_TX_POLL can be OR-ed to make select()/poll() push
462 * packets on tx rings only if POLLOUT is set.
465 * NR_DO_RX_POLL can be OR-ed to make select()/poll() release
468 * Note that this is the opposite of TX because it
482 * If two ports the same region zero-copy is possible.
506 * (e.g. because they contain arrays). For fixed-size options this
519 uint64_t nr_options; /* command-specific options */
534 /* Set the port header length (was virtio-net header length). */
536 /* Get the port header length (was virtio-net header length). */
548 /* Start an in-kernel loop that syncs the rings periodically or
552 /* Stops the thread executing the in-kernel loop. The thread
561 * from user-space allocated memory pools (e.g. hugepages).
565 /* ON NETMAP_REQ_SYNC_KLOOP_START, ask netmap to use eventfd-based
572 * struct netmap_ring header, but rather using an user-provided
578 * if the TX and/or RX rings are synced in the context of the VM exit.
584 * slots of the registered rings to be used as an offset field
601 uint32_t nr_tx_slots; /* slots in tx rings */
602 uint32_t nr_rx_slots; /* slots in rx rings */
603 uint16_t nr_tx_rings; /* number of tx rings */
605 uint16_t nr_host_tx_rings; /* number of host tx rings */
623 /* Applications set this flag if they are able to deal with virtio-net headers,
624 * that is send/receive frames that start with a virtio-net header.
654 /* The ioctl commands to sync TX/RX netmap rings.
655 * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues,
658 #define NIOCTXSYNC _IO('i', 148) /* sync tx queues */
664 * slots per ring, id of the memory allocator, etc. The netmap
670 uint32_t nr_tx_slots; /* slots in tx rings */
671 uint32_t nr_rx_slots; /* slots in rx rings */
672 uint16_t nr_tx_rings; /* number of tx rings */
674 uint16_t nr_host_tx_rings; /* number of host tx rings */
733 uint32_t nr_tx_slots; /* slots in tx rings */
734 uint32_t nr_rx_slots; /* slots in rx rings */
735 uint16_t nr_tx_rings; /* number of tx rings */
778 * Start an in-kernel loop that syncs the rings periodically or on
792 /* A CSB entry for the application --> kernel direction. */
796 uint32_t appl_need_kick; /* AW+ KR+ kern --> appl notification enable */
797 uint32_t sync_flags; /* AW+ KR+ the flags of the appl [tx|rx]sync() */
801 /* A CSB entry for the application <-- kernel direction. */
805 uint32_t kern_need_kick; /* AR+ KW+ appl-->kern notification enable */
819 * effect of a store-store barrier and a load-store barrier, in nm_stst_barrier()
826 * effect of a load-load barrier and a store-load barrier, in nm_ldld_barrier()
863 /* Application side of sync-kloop: Write ring pointers (cur, head) to the CSB.
869 /* Issue a first store-store barrier to make sure writes to the in nm_sync_kloop_appl_write()
870 * netmap ring do not overcome updates on atok->cur and atok->head. */ in nm_sync_kloop_appl_write()
890 * wmb() <-----------> rmb() in nm_sync_kloop_appl_write()
894 atok->cur = cur; in nm_sync_kloop_appl_write()
896 atok->head = head; in nm_sync_kloop_appl_write()
899 /* Application side of sync-kloop: Read kring pointers (hwcur, hwtail) from
910 *hwtail = ktoa->hwtail; in nm_sync_kloop_appl_read()
912 *hwcur = ktoa->hwcur; in nm_sync_kloop_appl_read()
914 /* Make sure that loads from ktoa->hwtail and ktoa->hwcur are not delayed in nm_sync_kloop_appl_read()
938 /* Notifier for the application --> kernel loop direction. */
940 /* Notifier for the kernel loop --> application direction. */
961 /* Array of CSB entries for application --> kernel communication
965 /* Array of CSB entries for kernel --> application communication
974 * going to put into the offset slot-fields. Any larger value
979 /* optional initial offset value, to be set in all slots. */
988 * (base of the buffer plus offset) in the TX slots.