Lines Matching +full:txrx +full:-

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo
5 * Copyright (C) 2013-2016 Universita` di Pisa
92 #define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */
102 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
103 #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid)
104 #define MBUF_TRANSMIT(na, ifp, m) ((na)->if_transmit(ifp, m))
105 #define GEN_TX_MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
106 #define GEN_TX_MBUF_NA(m) ((struct netmap_adapter *)(m)->m_ext.ext_arg1)
116 #define MBUF_REFCNT(m) ((m)->m_ext.ext_count)
117 #define SET_MBUF_REFCNT(m, x) (m)->m_ext.ext_count = x
144 #define MBUF_LEN(m) ((m)->len)
148 m->priority = NM_MAGIC_PRIORITY_TX; \
149 (((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp)); \
154 #define GEN_TX_MBUF_IFP(m) ((if_t)skb_shinfo(m)->destructor_arg)
158 #define NM_MTX_T struct mutex /* OS-specific sleepable lock */
176 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
184 #define NM_MTX_T KGUARDED_MUTEX /* OS-specific mutex (sleepable) */
196 //Definition of internal driver-to-driver ioctl codes
219 #endif /* end - platform-specific code */
250 nm_prinf_int("%03d.%06d [%4d] %-25s " format "\n",\
259 nm_prerr_int("%03d.%06d [%4d] %-25s " format "\n",\
285 /* os-specific NM_SELINFO_T initialization/destruction functions */
314 /* os specific attach/detach enter/exit-netmap-mode routines */
335 enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX }; enum
338 nm_txrx2str(enum txrx t) in nm_txrx2str()
343 static __inline enum txrx
344 nm_txrx_swap(enum txrx t) in nm_txrx_swap()
363 * It corresponds to ring->head
367 * On RX, hwcur->hwtail are receive buffers
369 * ring->head, hwtail is advanced on incoming packets,
370 * and a wakeup is generated when hwtail passes ring->cur
371 * On TX, hwcur->rcur have been filled by the sender
372 * but not sent yet to the NIC; rcur->hwtail are available
373 * for new transmissions, and hwtail->hwcur-1 are pending
383 * The following fields are used to implement lock-free copy of packets
390 * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1
452 * the NIC ring that corresponds to the kernel-owned part of
513 enum txrx tx; /* kind of ring (tx or rx) */
537 /* mask for the offset-related part of the ptr field in the slots */
539 /* maximum user-specified offset, as stipulated at bind time.
551 * the netmap buffers because of non-zero offsets, or because
577 /* circular list of zero-copy monitors */
600 return kring->nr_pending_mode == NKR_NETMAP_ON && in nm_kring_pending_on()
601 kring->nr_mode == NKR_NETMAP_OFF; in nm_kring_pending_on()
608 return kring->nr_pending_mode == NKR_NETMAP_OFF && in nm_kring_pending_off()
609 kring->nr_mode == NKR_NETMAP_ON; in nm_kring_pending_off()
624 return unlikely (i == 0) ? lim : i - 1; in nm_prev()
634 +-----------------+ +-----------------+
637 +-----------------+ +-----------------+
638 head->| owned by user |<-hwcur | not sent to nic |<-hwcur
640 +-----------------+ | |
641 cur->| available to | | |
642 | user, not read | +-----------------+
643 | yet | cur->| (being |
646 +-----------------+ + ------ +
647 tail->| |<-hwtail | |<-hwlease
650 +-----------------+ ... | | ...
651 | |<-hwlease +-----------------+
652 | | tail->| |<-hwtail
656 +-----------------+ +-----------------+
720 * is netmap-capable. So we always use the following trick:
757 int active_fds; /* number of user-space descriptors using this
799 /*---- callbacks for this netmap adapter -----*/
817 * the purpose of this callback is to fill the kring->hwbuf_len
818 * (l) and kring->buf_align fields. The l value is most important
825 * If m is known to be <= b - o, the callback may also choose the
828 * will see this value in the ring->buf_align field. Misaligned
847 * but for NIC/host ports attached to a switch (or vice-versa)
850 * kring->nm_notify.
880 * to/from the switch, to perform adapter-specific
935 nma_get_ndesc(struct netmap_adapter *na, enum txrx t) in nma_get_ndesc()
937 return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc); in nma_get_ndesc()
941 nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v) in nma_set_ndesc()
944 na->num_tx_desc = v; in nma_set_ndesc()
946 na->num_rx_desc = v; in nma_set_ndesc()
950 nma_get_nrings(struct netmap_adapter *na, enum txrx t) in nma_get_nrings()
952 return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings); in nma_get_nrings()
956 nma_get_host_nrings(struct netmap_adapter *na, enum txrx t) in nma_get_host_nrings()
958 return (t == NR_TX ? na->num_host_tx_rings : na->num_host_rx_rings); in nma_get_host_nrings()
962 nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v) in nma_set_nrings()
965 na->num_tx_rings = v; in nma_set_nrings()
967 na->num_rx_rings = v; in nma_set_nrings()
971 nma_set_host_nrings(struct netmap_adapter *na, enum txrx t, u_int v) in nma_set_host_nrings()
974 na->num_host_tx_rings = v; in nma_set_host_nrings()
976 na->num_host_rx_rings = v; in nma_set_host_nrings()
980 NMR(struct netmap_adapter *na, enum txrx t) in NMR()
982 return (t == NR_TX ? na->tx_rings : na->rx_rings); in NMR()
993 #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY)
995 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
1049 * - mit implements rx interrupt mitigation;
1059 /* Is the transmission path controlled by a netmap-aware
1066 netmap_real_rings(struct netmap_adapter *na, enum txrx t) in netmap_real_rings()
1069 !!(na->na_flags & NAF_HOST_RINGS) * nma_get_host_nrings(na, t); in netmap_real_rings()
1074 netmap_all_rings(struct netmap_adapter *na, enum txrx t) in netmap_all_rings()
1092 * +------+ +------+ +-----+ +------+ +------+
1093 * |tx_rings->| |\ /| |----| |<-tx_rings|
1094 * | | +------+ \ / +-----+ +------+ | |
1097 * | | +------+/ \+-----+ +------+ | |
1098 * |rx_rings->| | | |----| |<-rx_rings|
1099 * | | +------+ +-----+ +------+ | |
1100 * +------+ +------+
1102 * - packets coming from the bridge go to the brwap rx rings,
1106 * - packets coming from the outside go to the hwna rx rings,
1115 * cross-links the hwna host rings in the same way as shown above.
1117 * - packets coming from the bridge and directed to the host stack
1121 * - packets coming from the host stack are still handled by the
1140 /* we overwrite the hwna->na_vp pointer, so we save
1193 int space = k->nr_hwtail - k->nr_hwcur; in nm_kr_rxspace()
1195 space += k->nkr_num_slots; in nm_kr_rxspace()
1196 nm_prdis("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail); in nm_kr_rxspace()
1209 return kring->rhead == kring->nr_hwtail; in nm_kr_txempty()
1222 return kring->rcur == kring->nr_hwtail; in nm_kr_wouldblock()
1237 NM_ATOMIC_CLEAR(&kr->nr_busy); in nm_kr_put()
1250 * - the function returns NM_KR_BUSY
1251 * - the function returns NM_KR_STOPPED and sets the POLLERR bit in *perr
1252 * (if non-null)
1266 stopped = kr->nkr_stopped; in nm_kr_tryget()
1270 busy = NM_ATOMIC_TEST_AND_SET(&kr->nr_busy); in nm_kr_tryget()
1275 stopped = kr->nkr_stopped; in nm_kr_tryget()
1280 if (unlikely(nm_iszombie(kr->na))) { in nm_kr_tryget()
1292 * Otherwise, any non-zero value will do. in nm_kr_tryget()
1314 kr->nkr_stopped = stopped; in nm_kr_stop()
1315 while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy)) in nm_kr_stop()
1322 kr->nkr_stopped = 0; in nm_kr_start()
1351 enum txrx tx, u_int n, u_int new_cur);
1363 NM_IRQ_COMPLETED = -1,
1368 NM_IRQ_RESCHED = -2,
1379 #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp)
1380 #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp)
1381 #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp)
1382 #define netmap_bdg_idx(_vp) ((_vp)->bdg_port)
1388 #define netmap_bdg_idx(_vp) -1
1394 return na && na->na_flags & NAF_NETMAP_ON; in nm_netmap_on()
1400 return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE); in nm_native_on()
1404 netmap_kring_on(struct netmap_adapter *na, u_int q, enum txrx t) in netmap_kring_on()
1411 if (t == NR_RX && q < na->num_rx_rings) in netmap_kring_on()
1412 kring = na->rx_rings[q]; in netmap_kring_on()
1413 else if (t == NR_TX && q < na->num_tx_rings) in netmap_kring_on()
1414 kring = na->tx_rings[q]; in netmap_kring_on()
1418 return (kring->nr_mode == NKR_NETMAP_ON) ? kring : NULL; in netmap_kring_on()
1424 return na == NULL || (na->na_flags & NAF_ZOMBIE); in nm_iszombie()
1437 * The user-space ring pointers (head/cur/tail) are shared through
1460 kring->ring_id, nm_i, slot->buf_idx, len); \
1474 (l_) = NETMAP_BUF_SIZE(na_) - (o_); \
1478 /*---------------------------------------------------------------*/
1487 * - if the na points to an ifp, mark the ifp as netmap capable
1489 * - provide defaults for the setup callbacks and the memory allocator
1492 /* fill priv->np_[tr]xq{first,last} using the ringid and flags information
1505 * leasing-related data structures
1521 void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped);
1572 ((strchr(hdr->nr_name, '{') != NULL || strchr(hdr->nr_name, '}') != NULL) ? EOPNOTSUPP : 0)
1581 …(((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? …
1589 …(((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? …
1631 nm_prinf("getting %p:%s -> %d", __na, (__na)->name, (__na)->na_refcount); \
1642 nm_prinf("putting %p:%s -> %d", __na, (__na)->name, (__na)->na_refcount - 1); \
1658 #define NETMAP_BUF_BASE(_na) ((_na)->na_lut.lut[0].vaddr)
1659 #define NETMAP_BUF_SIZE(_na) ((_na)->na_lut.objsize)
1693 * os-specific and must be defined in glue code.
1713 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
1718 NA(ifp)->magic = \
1728 #define NM_IS_NATIVE(ifp) (NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor)
1736 return (-1); in nm_iommu_group_id()
1749 return (-1); in nm_numa_domain()
1750 domain = if_getnumadomain(na->ifp); in nm_numa_domain()
1752 domain = -1; in nm_numa_domain()
1802 #include <linux/dma-mapping.h>
1806 * dma_map_single(&pdev->dev, virt_addr, len, direction)
1807 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction)
1810 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l];
1812 buffer_info->time_stamp = jiffies;
1813 buffer_info->mapped_as_page = false;
1814 buffer_info->length = len;
1815 //buffer_info->next_to_watch = l;
1817 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1819 buffer_info->dma = dma_map_single(&adapter->pdev->dev,
1822 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1827 tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX
1836 *map = dma_map_single(na->pdev, buf, size, in netmap_load_map()
1838 if (dma_mapping_error(na->pdev, *map)) { in netmap_load_map()
1851 dma_unmap_single(na->pdev, *map, sz, in netmap_unload_map()
1859 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t) in netmap_sync_map_cpu()
1862 dma_sync_single_for_cpu(na->pdev, *map, sz, in netmap_sync_map_cpu()
1869 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t) in netmap_sync_map_dev()
1872 dma_sync_single_for_device(na->pdev, *map, sz, in netmap_sync_map_dev()
1884 dma_unmap_single(na->pdev, *map, sz, in netmap_reload_map()
1888 *map = dma_map_single(na->pdev, buf, sz, in netmap_reload_map()
1905 int n = kr->nkr_num_slots; in netmap_idx_n2k()
1907 if (likely(kr->nkr_hwofs == 0)) { in netmap_idx_n2k()
1911 idx += kr->nkr_hwofs; in netmap_idx_n2k()
1917 return idx - n; in netmap_idx_n2k()
1924 int n = kr->nkr_num_slots; in netmap_idx_k2n()
1926 if (likely(kr->nkr_hwofs == 0)) { in netmap_idx_k2n()
1930 idx -= kr->nkr_hwofs; in netmap_idx_k2n()
1936 return idx - n; in netmap_idx_k2n()
1940 /* Entries of the look-up table. */
1947 /* dma-mapping in linux can assign a buffer a different address
1949 * physical-address look-up table for each na.
1974 struct lut_entry *lut = na->na_lut.lut; in NMB()
1975 uint32_t i = slot->buf_idx; in NMB()
1976 return (unlikely(i >= na->na_lut.objtotal)) ? in NMB()
1983 uint32_t i = slot->buf_idx; in PNMB()
1984 struct lut_entry *lut = na->na_lut.lut; in PNMB()
1985 struct plut_entry *plut = na->na_lut.plut; in PNMB()
1986 void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr; in PNMB()
1989 …*pp = (i >= na->na_lut.objtotal) ? (uint64_t)plut[0].paddr.QuadPart : (uint64_t)plut[i].paddr.Quad… in PNMB()
1991 *pp = (i >= na->na_lut.objtotal) ? plut[0].paddr : plut[i].paddr; in PNMB()
2000 slot->ptr = (slot->ptr & ~kring->offset_mask) | in nm_write_offset()
2001 (offset & kring->offset_mask); in nm_write_offset()
2007 uint64_t offset = (slot->ptr & kring->offset_mask); in nm_get_offset()
2008 if (unlikely(offset > kring->offset_max)) in nm_get_offset()
2009 offset = kring->offset_max; in nm_get_offset()
2016 void *addr = NMB(kring->na, slot); in NMB_O()
2023 void *addr = PNMB(kring->na, slot, pp); in PNMB_O()
2076 * Array of CSB entries for application --> kernel communication
2079 /* Array of CSB entries for kernel --> application communication
2093 struct netmap_adapter *na = np->np_na; in nm_kring_pending()
2094 enum txrx t; in nm_kring_pending()
2098 for (i = np->np_qfirst[t]; i < np->np_qlast[t]; i++) { in nm_kring_pending()
2100 if (kring->nr_mode != kring->nr_pending_mode) { in nm_kring_pending()
2110 nm_si_user(struct netmap_priv_d *priv, enum txrx t) in nm_si_user()
2112 return (priv->np_na != NULL && in nm_si_user()
2113 (priv->np_qlast[t] - priv->np_qfirst[t] > 1)); in nm_si_user()
2154 * build a queue of descriptors, in an OS-specific way.
2155 * The payload is at addr, if non-null, and the routine should send or queue
2158 * At the end, if head is non-null, there will be an additional call
2159 * to the function with addr = NULL; this should tell the OS-specific
2164 void *m; /* os-specific mbuf-like object */
2165 void *head, *tail; /* tailq, if the OS-specific routine needs to build one */
2180 if (gna->prev) in netmap_generic_getifp()
2181 return gna->prev->ifp; in netmap_generic_getifp()
2183 return gna->up.up.ifp; in netmap_generic_getifp()
2321 struct nm_kctx; /* OS-specific kernel context - opaque */
2392 struct netmap_ring *ring = kring->ring; in ptnet_sync_tail()
2395 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur); in ptnet_sync_tail()
2398 ring->tail = kring->rtail = kring->nr_hwtail; in ptnet_sync_tail()
2407 * by the driver. We also attach a customly-provided external storage,
2417 uma_zfree(zone_clust, m->m_ext.ext_buf); in nm_generic_mbuf_dtor()
2421 (m)->m_ext.ext_free = (fn != NULL) ? \
2423 (m)->m_ext.ext_arg1 = na; \
2452 KASSERT((m->m_flags & M_EXT) != 0, in nm_os_mbuf_reinit()
2454 KASSERT(m->m_ext.ext_size == MCLBYTES, in nm_os_mbuf_reinit()
2456 m->m_ext.ext_size)); in nm_os_mbuf_reinit()
2458 buf = m->m_ext.ext_buf; in nm_os_mbuf_reinit()
2473 #define CSB_READ(csb, field, r) (get_user(r, &csb->field))
2474 #define CSB_WRITE(csb, field, v) (put_user(v, &csb->field))
2479 v = fuword32(&csb->field); \
2480 KASSERT(v != -1, ("%s: fuword32 failed", __func__)); \
2486 error = suword32(&csb->field, v); \