Lines Matching +full:txrx +full:-
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2011-2014 Matteo Landi
5 * Copyright (C) 2011-2016 Luigi Rizzo
6 * Copyright (C) 2011-2016 Giuseppe Lettieri
7 * Copyright (C) 2011-2016 Vincenzo Maffione
41 * i.e. user-accessible copies of the interface's queues.
77 - a spinlock on each ring, to handle producer/consumer races on
84 - an atomic variable to guarantee that there is at most one
96 - *xsync() should be protected against initializations of the card.
104 - a per-interface core_lock protecting access from the host stack
110 --- VALE SWITCH ---
123 On the rx ring, the per-port lock is grabbed initially to reserve
133 /* --- internals ----
144 * os-specific:
151 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
156 * In all cases, this action creates/takes-a-reference-to a
163 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
173 * - netmap_hw_adapter: [netmap.c]
176 * - transmissions (from the network stack) to netmap_transmit()
177 * - receive notifications to the nm_notify() callback for
182 * - netmap_generic_adapter: [netmap_generic.c]
188 * - netmap_vp_adapter [netmap_vale.c]
198 * - netmap_pipe_adapter [netmap_pipe.c]
202 * - netmap_monitor_adapter [netmap_monitor.c]
207 * - netmap_bwrap_adapter [netmap_vale.c]
211 * os-specific:
221 * os-specific:
249 * os-specific:
254 * ---- VALE_CTL -----
256 * VALE switches are controlled by issuing a NIOCREGIF with a non-null
282 * ---- DATAPATHS -----
284 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
288 * - tx from netmap userspace:
291 * kring->nm_sync() == DEVICE_netmap_txsync()
293 * na->nm_notify() == netmap_notify()
294 * - rx from netmap userspace:
297 * kring->nm_sync() == DEVICE_netmap_rxsync()
299 * na->nm_notify() == netmap_notify()
300 * - rx from host stack
304 * na->nm_notify == netmap_notify()
306 * kring->nm_sync() == netmap_rxsync_from_host
308 * - tx to host stack
310 * kring->nm_sync() == netmap_txsync_to_host
313 * FreeBSD: na->if_input() == ether_input()
317 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
321 * - tx from netmap userspace:
324 * kring->nm_sync() == generic_netmap_txsync()
327 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
328 * gna->save_start_xmit == orig. dev. start_xmit
329 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * na->nm_notify() == netmap_notify()
332 * - rx from netmap userspace:
334 * kring->nm_sync() == generic_netmap_rxsync()
339 * na->nm_notify() == netmap_notify()
340 * - rx from host stack
345 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
347 * na->nm_notify() == netmap_notify()
348 * - tx to host stack (same as native):
351 * -= VALE =-
355 * - VALE ports:
357 * kring->nm_sync() == netmap_vp_txsync()
359 * - system device with native support:
362 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
363 * kring->nm_sync() == DEVICE_netmap_rxsync()
365 * kring->nm_sync() == DEVICE_netmap_rxsync()
368 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
369 * kring->nm_sync() == netmap_rxsync_from_host()
372 * - system device with generic support:
375 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
376 * kring->nm_sync() == generic_netmap_rxsync()
378 * kring->nm_sync() == generic_netmap_rxsync()
381 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
382 * kring->nm_sync() == netmap_rxsync_from_host()
385 * (all cases) --> nm_bdg_flush()
386 * dest_na->nm_notify() == (see below)
390 * - VALE ports:
393 * kring->nm_sync() == netmap_vp_rxsync()
395 * na->nm_notify() == netmap_notify()
397 * - system device with native support:
399 * na->nm_notify() == netmap_bwrap_notify()
401 * kring->nm_sync() == DEVICE_netmap_txsync()
405 * kring->nm_sync() == netmap_txsync_to_host
408 * - system device with generic adapter:
410 * na->nm_notify() == netmap_bwrap_notify()
412 * kring->nm_sync() == generic_netmap_txsync()
416 * kring->nm_sync() == netmap_txsync_to_host
422 * OS-specific code that is used only within this file.
423 * Other OS-specific code that must be accessed by drivers
483 /* user-controlled variables */
509 /* We use by default netmap-aware qdiscs with generic netmap adapters,
512 * 1) it prevents non-fifo qdiscs to break the TX notification
516 * change skb->dev, like bridge, veth, ...
529 /* Non-zero to enable checksum offloading in NIC drivers */
532 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
569 "Number of per-ring slots for emulated netmap mode");
578 0, "Allow ptnet devices to use virtio-net headers");
595 mtx_lock(&kr->q_lock); in netmap_disable_ring()
596 mtx_unlock(&kr->q_lock); in netmap_disable_ring()
602 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped) in netmap_set_ring()
607 NMR(na, t)[ring_id]->nkr_stopped = 0; in netmap_set_ring()
616 enum txrx t; in netmap_set_all_rings()
622 nm_prinf("%s: %sable all rings", na->name, in netmap_set_all_rings()
647 * Convenience function used in drivers. Re-enables rxsync and txsync on the
665 na->na_flags |= NAF_ZOMBIE; in netmap_make_zombie()
675 if (na->na_flags & NAF_ZOMBIE) { in netmap_undo_zombie()
677 na->na_flags &= ~NAF_ZOMBIE; in netmap_undo_zombie()
710 * packet-dump function, user-supplied or static buffer.
765 if (na->ifp && !nm_is_bwrap(na)) { in netmap_update_config()
766 strlcpy(na->name, if_name(na->ifp), sizeof(na->name)); in netmap_update_config()
770 if (na->nm_config == NULL || in netmap_update_config()
771 na->nm_config(na, &info)) { in netmap_update_config()
773 info.num_tx_rings = na->num_tx_rings; in netmap_update_config()
774 info.num_tx_descs = na->num_tx_desc; in netmap_update_config()
775 info.num_rx_rings = na->num_rx_rings; in netmap_update_config()
776 info.num_rx_descs = na->num_rx_desc; in netmap_update_config()
777 info.rx_buf_maxsize = na->rx_buf_maxsize; in netmap_update_config()
780 if (na->num_tx_rings == info.num_tx_rings && in netmap_update_config()
781 na->num_tx_desc == info.num_tx_descs && in netmap_update_config()
782 na->num_rx_rings == info.num_rx_rings && in netmap_update_config()
783 na->num_rx_desc == info.num_rx_descs && in netmap_update_config()
784 na->rx_buf_maxsize == info.rx_buf_maxsize) in netmap_update_config()
786 if (na->active_fds == 0) { in netmap_update_config()
787 na->num_tx_rings = info.num_tx_rings; in netmap_update_config()
788 na->num_tx_desc = info.num_tx_descs; in netmap_update_config()
789 na->num_rx_rings = info.num_rx_rings; in netmap_update_config()
790 na->num_rx_desc = info.num_rx_descs; in netmap_update_config()
791 na->rx_buf_maxsize = info.rx_buf_maxsize; in netmap_update_config()
795 na->name, na->num_tx_rings, na->num_tx_desc, in netmap_update_config()
796 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize); in netmap_update_config()
801 na->name, info.num_tx_rings, info.num_tx_descs, in netmap_update_config()
814 kring->hwbuf_len = target; in netmap_default_bufcfg()
815 kring->buf_align = 0; /* no alignment */ in netmap_default_bufcfg()
822 * +----------+
823 * na->tx_rings ----->| | \
824 * | | } na->num_tx_ring
826 * +----------+
828 * na->rx_rings ----> +----------+
830 * | | } na->num_rx_rings
832 * +----------+
834 * +----------+
835 * na->tailroom ----->| | \
838 * +----------+
850 enum txrx t; in netmap_krings_create()
853 if (na->tx_rings != NULL) { in netmap_krings_create()
867 na->tx_rings = nm_os_malloc((size_t)len); in netmap_krings_create()
868 if (na->tx_rings == NULL) { in netmap_krings_create()
872 na->rx_rings = na->tx_rings + n[NR_TX]; in netmap_krings_create()
873 na->tailroom = na->rx_rings + n[NR_RX]; in netmap_krings_create()
876 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom); in netmap_krings_create()
878 na->tx_rings[i] = kring; in netmap_krings_create()
891 kring->notify_na = na; in netmap_krings_create()
892 kring->ring_id = i; in netmap_krings_create()
893 kring->tx = t; in netmap_krings_create()
894 kring->nkr_num_slots = ndesc; in netmap_krings_create()
895 kring->nr_mode = NKR_NETMAP_OFF; in netmap_krings_create()
896 kring->nr_pending_mode = NKR_NETMAP_OFF; in netmap_krings_create()
898 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync); in netmap_krings_create()
899 kring->nm_bufcfg = na->nm_bufcfg; in netmap_krings_create()
900 if (kring->nm_bufcfg == NULL) in netmap_krings_create()
901 kring->nm_bufcfg = netmap_default_bufcfg; in netmap_krings_create()
903 if (!(na->na_flags & NAF_HOST_RINGS)) in netmap_krings_create()
904 kring->nr_kflags |= NKR_FAKERING; in netmap_krings_create()
905 kring->nm_sync = (t == NR_TX ? in netmap_krings_create()
908 kring->nm_bufcfg = netmap_default_bufcfg; in netmap_krings_create()
910 kring->nm_notify = na->nm_notify; in netmap_krings_create()
911 kring->rhead = kring->rcur = kring->nr_hwcur = 0; in netmap_krings_create()
915 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0); in netmap_krings_create()
916 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name, in netmap_krings_create()
919 kring->name, kring->rhead, kring->rcur, kring->rtail); in netmap_krings_create()
920 err = nm_os_selinfo_init(&kring->si, kring->name); in netmap_krings_create()
925 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF); in netmap_krings_create()
926 kring->na = na; /* setting this field marks the mutex as initialized */ in netmap_krings_create()
928 err = nm_os_selinfo_init(&na->si[t], na->name); in netmap_krings_create()
944 struct netmap_kring **kring = na->tx_rings; in netmap_krings_delete()
945 enum txrx t; in netmap_krings_delete()
947 if (na->tx_rings == NULL) { in netmap_krings_delete()
954 nm_os_selinfo_uninit(&na->si[t]); in netmap_krings_delete()
957 for ( ; kring != na->tailroom; kring++) { in netmap_krings_delete()
958 if ((*kring)->na != NULL) in netmap_krings_delete()
959 mtx_destroy(&(*kring)->q_lock); in netmap_krings_delete()
960 nm_os_selinfo_uninit(&(*kring)->si); in netmap_krings_delete()
962 nm_os_free(na->tx_rings); in netmap_krings_delete()
963 na->tx_rings = na->rx_rings = na->tailroom = NULL; in netmap_krings_delete()
979 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue; in netmap_hw_krings_delete()
990 if (na->nm_mem_prev) { in netmap_mem_restore()
991 netmap_mem_put(na->nm_mem); in netmap_mem_restore()
992 na->nm_mem = na->nm_mem_prev; in netmap_mem_restore()
993 na->nm_mem_prev = NULL; in netmap_mem_restore()
1000 netmap_mem_deref(na->nm_mem, na); in netmap_mem_drop()
1002 if (na->active_fds <= 0) { in netmap_mem_drop()
1013 enum txrx t; in netmap_update_hostrings_mode()
1021 kring->nr_mode = kring->nr_pending_mode; in netmap_update_hostrings_mode()
1037 struct netmap_adapter *na = priv->np_na; in netmap_do_unregif()
1040 na->active_fds--; in netmap_do_unregif()
1047 if (na->active_fds <= 0) { in netmap_do_unregif()
1055 if (na->active_fds <= 0 || nm_kring_pending(priv)) { in netmap_do_unregif()
1057 na->nm_register(na, 0); in netmap_do_unregif()
1064 if (na->active_fds <= 0) { /* last instance */ in netmap_do_unregif()
1080 nm_prinf("deleting last instance for %s", na->name); in netmap_do_unregif()
1086 na->nm_krings_delete(na); in netmap_do_unregif()
1089 if (na->na_flags & NAF_HOST_RINGS) { in netmap_do_unregif()
1090 na->num_host_tx_rings = 1; in netmap_do_unregif()
1091 na->num_host_rx_rings = 1; in netmap_do_unregif()
1093 na->num_host_tx_rings = 0; in netmap_do_unregif()
1094 na->num_host_rx_rings = 0; in netmap_do_unregif()
1101 netmap_mem_if_delete(na, priv->np_nifp); in netmap_do_unregif()
1105 priv->np_na = NULL; in netmap_do_unregif()
1106 priv->np_nifp = NULL; in netmap_do_unregif()
1117 priv->np_refs = 1; in netmap_priv_new()
1134 struct netmap_adapter *na = priv->np_na; in netmap_priv_delete()
1137 if (--priv->np_refs > 0) { in netmap_priv_delete()
1144 netmap_unget_na(na, priv->np_ifp); in netmap_priv_delete()
1170 * - Before releasing buffers on hw RX rings, the application can mark
1175 * - Before releasing buffers on the host RX ring, the application can
1178 * from doing the same task in user-space.
1180 * Transparent forwarding can be enabled per-ring, by setting the NR_FORWARD
1183 * The transfer NIC --> host is relatively easy, just encapsulate
1184 * into mbufs and we are done. The host --> NIC side is slightly
1224 * Scan the buffers from hwcur to ring->head, and put a copy of those
1232 u_int const lim = kring->nkr_num_slots - 1; in netmap_grab_packets()
1233 u_int const head = kring->rhead; in netmap_grab_packets()
1235 struct netmap_adapter *na = kring->na; in netmap_grab_packets()
1237 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) { in netmap_grab_packets()
1239 struct netmap_slot *slot = &kring->ring->slot[n]; in netmap_grab_packets()
1241 if ((slot->flags & NS_FORWARD) == 0 && !force) in netmap_grab_packets()
1243 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) { in netmap_grab_packets()
1244 nm_prlim(5, "bad pkt at %d len %d", n, slot->len); in netmap_grab_packets()
1247 slot->flags &= ~NS_FORWARD; // XXX needed ? in netmap_grab_packets()
1249 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL); in netmap_grab_packets()
1260 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) && in _nm_may_forward()
1261 kring->na->na_flags & NAF_HOST_RINGS && in _nm_may_forward()
1262 kring->tx == NR_RX); in _nm_may_forward()
1269 kring->ring_id != kring->na->num_rx_rings; in nm_may_forward_up()
1277 kring->ring_id == kring->na->num_rx_rings; in nm_may_forward_down()
1282 * kring->nr_hwcur and kring->rhead.
1283 * Called under kring->rx_queue.lock on the sw rx ring.
1295 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings]; in netmap_sw_to_nic()
1296 struct netmap_slot *rxslot = kring->ring->slot; in netmap_sw_to_nic()
1297 u_int i, rxcur = kring->nr_hwcur; in netmap_sw_to_nic()
1298 u_int const head = kring->rhead; in netmap_sw_to_nic()
1299 u_int const src_lim = kring->nkr_num_slots - 1; in netmap_sw_to_nic()
1303 for (i = 0; i < na->num_tx_rings; i++) { in netmap_sw_to_nic()
1304 struct netmap_kring *kdst = na->tx_rings[i]; in netmap_sw_to_nic()
1305 struct netmap_ring *rdst = kdst->ring; in netmap_sw_to_nic()
1306 u_int const dst_lim = kdst->nkr_num_slots - 1; in netmap_sw_to_nic()
1308 /* XXX do we trust ring or kring->rcur,rtail ? */ in netmap_sw_to_nic()
1312 u_int dst_head = rdst->head; in netmap_sw_to_nic()
1315 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd) in netmap_sw_to_nic()
1320 dst = &rdst->slot[dst_head]; in netmap_sw_to_nic()
1324 src->buf_idx = dst->buf_idx; in netmap_sw_to_nic()
1325 src->flags = NS_BUF_CHANGED; in netmap_sw_to_nic()
1327 dst->buf_idx = tmp.buf_idx; in netmap_sw_to_nic()
1328 dst->len = tmp.len; in netmap_sw_to_nic()
1329 dst->flags = NS_BUF_CHANGED; in netmap_sw_to_nic()
1331 rdst->head = rdst->cur = nm_next(dst_head, dst_lim); in netmap_sw_to_nic()
1348 struct netmap_adapter *na = kring->na; in netmap_txsync_to_host()
1349 u_int const lim = kring->nkr_num_slots - 1; in netmap_txsync_to_host()
1350 u_int const head = kring->rhead; in netmap_txsync_to_host()
1359 kring->nr_hwcur = head; in netmap_txsync_to_host()
1360 kring->nr_hwtail = head + lim; in netmap_txsync_to_host()
1361 if (kring->nr_hwtail > lim) in netmap_txsync_to_host()
1362 kring->nr_hwtail -= lim + 1; in netmap_txsync_to_host()
1364 netmap_send_up(na->ifp, &q); in netmap_txsync_to_host()
1371 * They have been put in kring->rx_queue by netmap_transmit().
1372 * We protect access to the kring using kring->rx_queue.lock
1375 * for transparent-mode forwarding, then sets the NR_FORWARD
1381 struct netmap_adapter *na = kring->na; in netmap_rxsync_from_host()
1382 struct netmap_ring *ring = kring->ring; in netmap_rxsync_from_host()
1384 u_int const lim = kring->nkr_num_slots - 1; in netmap_rxsync_from_host()
1385 u_int const head = kring->rhead; in netmap_rxsync_from_host()
1387 struct mbq *q = &kring->rx_queue, fq; in netmap_rxsync_from_host()
1399 nm_i = kring->nr_hwtail; in netmap_rxsync_from_host()
1400 stop_i = nm_prev(kring->nr_hwcur, lim); in netmap_rxsync_from_host()
1403 struct netmap_slot *slot = &ring->slot[nm_i]; in netmap_rxsync_from_host()
1410 slot->len = len; in netmap_rxsync_from_host()
1411 slot->flags = 0; in netmap_rxsync_from_host()
1415 kring->nr_hwtail = nm_i; in netmap_rxsync_from_host()
1421 nm_i = kring->nr_hwcur; in netmap_rxsync_from_host()
1426 kring->nr_kflags |= NR_FORWARD; in netmap_rxsync_from_host()
1430 kring->nr_hwcur = head; in netmap_rxsync_from_host()
1457 * -------------------------------------------------------
1489 || prev_na->na_flags & NAF_FORCE_NATIVE in netmap_get_hw_na()
1494 || prev_na->na_next_pipe > 0 in netmap_get_hw_na()
1517 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))). in netmap_get_hw_na()
1529 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) && in netmap_get_hw_na()
1530 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) { in netmap_get_hw_na()
1531 (*na)->nm_mem_prev = (*na)->nm_mem; in netmap_get_hw_na()
1532 (*na)->nm_mem = netmap_mem_get(nmd); in netmap_get_hw_na()
1560 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; in netmap_get_na()
1568 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { in netmap_get_na()
1572 if (req->nr_mode == NR_REG_PIPE_MASTER || in netmap_get_na()
1573 req->nr_mode == NR_REG_PIPE_SLAVE) { in netmap_get_na()
1584 if (nmd == NULL && req->nr_mem_id) { in netmap_get_na()
1585 nmd = netmap_mem_find(req->nr_mem_id); in netmap_get_na()
1630 *ifp = ifunit_ref(hdr->nr_name); in netmap_get_na()
1647 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) { in netmap_get_na()
1648 if (req->nr_host_tx_rings) in netmap_get_na()
1649 (*na)->num_host_tx_rings = req->nr_host_tx_rings; in netmap_get_na()
1650 if (req->nr_host_rx_rings) in netmap_get_na()
1651 (*na)->num_host_rx_rings = req->nr_host_rx_rings; in netmap_get_na()
1653 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings, in netmap_get_na()
1654 (*na)->num_host_rx_rings); in netmap_get_na()
1688 kring->name, \
1689 head, cur, ring->tail, \
1690 kring->rhead, kring->rcur, kring->rtail, \
1691 kring->nr_hwcur, kring->nr_hwtail); \
1692 return kring->nkr_num_slots; \
1698 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1712 u_int head = NM_ACCESS_ONCE(ring->head); in nm_txsync_prologue()
1713 u_int cur = NM_ACCESS_ONCE(ring->cur); in nm_txsync_prologue()
1714 u_int n = kring->nkr_num_slots; in nm_txsync_prologue()
1717 kring->name, in nm_txsync_prologue()
1718 kring->nr_hwcur, kring->nr_hwtail, in nm_txsync_prologue()
1719 ring->head, ring->cur, ring->tail); in nm_txsync_prologue()
1721 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n || in nm_txsync_prologue()
1722 kring->rtail >= n || kring->nr_hwtail >= n); in nm_txsync_prologue()
1728 * 0 A rhead B rtail C n-1 in nm_txsync_prologue()
1729 * 0 D rtail E rhead F n-1 in nm_txsync_prologue()
1733 if (kring->rtail >= kring->rhead) { in nm_txsync_prologue()
1735 NM_FAIL_ON(head < kring->rhead || head > kring->rtail); in nm_txsync_prologue()
1737 NM_FAIL_ON(cur < head || cur > kring->rtail); in nm_txsync_prologue()
1740 NM_FAIL_ON(head > kring->rtail && head < kring->rhead); in nm_txsync_prologue()
1743 if (head <= kring->rtail) { in nm_txsync_prologue()
1745 NM_FAIL_ON(cur < head || cur > kring->rtail); in nm_txsync_prologue()
1748 NM_FAIL_ON(cur > kring->rtail && cur < head); in nm_txsync_prologue()
1751 if (ring->tail != kring->rtail) { in nm_txsync_prologue()
1752 nm_prlim(5, "%s tail overwritten was %d need %d", kring->name, in nm_txsync_prologue()
1753 ring->tail, kring->rtail); in nm_txsync_prologue()
1754 ring->tail = kring->rtail; in nm_txsync_prologue()
1756 kring->rhead = head; in nm_txsync_prologue()
1757 kring->rcur = cur; in nm_txsync_prologue()
1764 * Returns ring->head if ok, kring->nkr_num_slots on error.
1776 uint32_t const n = kring->nkr_num_slots; in nm_rxsync_prologue()
1780 kring->name, in nm_rxsync_prologue()
1781 kring->nr_hwcur, kring->nr_hwtail, in nm_rxsync_prologue()
1782 ring->head, ring->cur, ring->tail); in nm_rxsync_prologue()
1786 * - head is not an issue because the previous value is hwcur; in nm_rxsync_prologue()
1787 * - cur could in principle go back, however it does not matter in nm_rxsync_prologue()
1790 cur = kring->rcur = NM_ACCESS_ONCE(ring->cur); in nm_rxsync_prologue()
1791 head = kring->rhead = NM_ACCESS_ONCE(ring->head); in nm_rxsync_prologue()
1793 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n); in nm_rxsync_prologue()
1796 if (kring->nr_hwtail >= kring->nr_hwcur) { in nm_rxsync_prologue()
1798 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail); in nm_rxsync_prologue()
1800 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); in nm_rxsync_prologue()
1803 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail); in nm_rxsync_prologue()
1805 if (head <= kring->nr_hwtail) { in nm_rxsync_prologue()
1807 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); in nm_rxsync_prologue()
1810 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail); in nm_rxsync_prologue()
1813 if (ring->tail != kring->rtail) { in nm_rxsync_prologue()
1815 kring->name, in nm_rxsync_prologue()
1816 ring->tail, kring->rtail); in nm_rxsync_prologue()
1817 ring->tail = kring->rtail; in nm_rxsync_prologue()
1838 struct netmap_ring *ring = kring->ring; in netmap_ring_reinit()
1839 u_int i, lim = kring->nkr_num_slots - 1; in netmap_ring_reinit()
1843 nm_prlim(10, "called for %s", kring->name); in netmap_ring_reinit()
1845 kring->rhead = ring->head; in netmap_ring_reinit()
1846 kring->rcur = ring->cur; in netmap_ring_reinit()
1847 kring->rtail = ring->tail; in netmap_ring_reinit()
1849 if (ring->cur > lim) in netmap_ring_reinit()
1851 if (ring->head > lim) in netmap_ring_reinit()
1853 if (ring->tail > lim) in netmap_ring_reinit()
1856 u_int idx = ring->slot[i].buf_idx; in netmap_ring_reinit()
1857 u_int len = ring->slot[i].len; in netmap_ring_reinit()
1858 if (idx < 2 || idx >= kring->na->na_lut.objtotal) { in netmap_ring_reinit()
1860 ring->slot[i].buf_idx = 0; in netmap_ring_reinit()
1861 ring->slot[i].len = 0; in netmap_ring_reinit()
1862 } else if (len > NETMAP_BUF_SIZE(kring->na)) { in netmap_ring_reinit()
1863 ring->slot[i].len = 0; in netmap_ring_reinit()
1869 nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d", in netmap_ring_reinit()
1870 kring->name, in netmap_ring_reinit()
1871 ring->cur, kring->nr_hwcur, in netmap_ring_reinit()
1872 ring->tail, kring->nr_hwtail); in netmap_ring_reinit()
1873 ring->head = kring->rhead = kring->nr_hwcur; in netmap_ring_reinit()
1874 ring->cur = kring->rcur = kring->nr_hwcur; in netmap_ring_reinit()
1875 ring->tail = kring->rtail = kring->nr_hwtail; in netmap_ring_reinit()
1883 * [priv->np_txqfirst, priv->np_txqlast) and
1884 * [priv->np_rxqfirst, priv->np_rxqlast)
1890 struct netmap_adapter *na = priv->np_na; in netmap_interp_ringid()
1891 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body; in netmap_interp_ringid()
1893 enum txrx t; in netmap_interp_ringid()
1895 u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode, in netmap_interp_ringid()
1896 nr_ringid = reg->nr_ringid; in netmap_interp_ringid()
1900 priv->np_qfirst[t] = priv->np_qlast[t] = 0; in netmap_interp_ringid()
1906 priv->np_qfirst[t] = 0; in netmap_interp_ringid()
1907 priv->np_qlast[t] = nma_get_nrings(na, t); in netmap_interp_ringid()
1909 priv->np_qfirst[t], priv->np_qlast[t]); in netmap_interp_ringid()
1913 if (!(na->na_flags & NAF_HOST_RINGS)) { in netmap_interp_ringid()
1917 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ? in netmap_interp_ringid()
1919 priv->np_qlast[t] = netmap_all_rings(na, t); in netmap_interp_ringid()
1922 priv->np_qfirst[t], priv->np_qlast[t]); in netmap_interp_ringid()
1925 if (nr_ringid >= na->num_tx_rings && in netmap_interp_ringid()
1926 nr_ringid >= na->num_rx_rings) { in netmap_interp_ringid()
1934 priv->np_qfirst[t] = j; in netmap_interp_ringid()
1935 priv->np_qlast[t] = j + 1; in netmap_interp_ringid()
1937 priv->np_qfirst[t], priv->np_qlast[t]); in netmap_interp_ringid()
1940 if (!(na->na_flags & NAF_HOST_RINGS)) { in netmap_interp_ringid()
1944 if (nr_ringid >= na->num_host_tx_rings && in netmap_interp_ringid()
1945 nr_ringid >= na->num_host_rx_rings) { in netmap_interp_ringid()
1953 priv->np_qfirst[t] = nma_get_nrings(na, t) + j; in netmap_interp_ringid()
1954 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1; in netmap_interp_ringid()
1956 priv->np_qfirst[t], priv->np_qlast[t]); in netmap_interp_ringid()
1963 priv->np_flags = nr_flags; in netmap_interp_ringid()
1965 /* Allow transparent forwarding mode in the host --> nic in netmap_interp_ringid()
1967 if (priv->np_qfirst[NR_TX] == 0 && in netmap_interp_ringid()
1968 priv->np_qlast[NR_TX] >= na->num_tx_rings) { in netmap_interp_ringid()
1969 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN; in netmap_interp_ringid()
1974 na->name, in netmap_interp_ringid()
1975 priv->np_qfirst[NR_TX], in netmap_interp_ringid()
1976 priv->np_qlast[NR_TX], in netmap_interp_ringid()
1977 priv->np_qfirst[NR_RX], in netmap_interp_ringid()
1978 priv->np_qlast[NR_RX], in netmap_interp_ringid()
1992 struct netmap_adapter *na = priv->np_na; in netmap_set_ringid()
1993 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body; in netmap_set_ringid()
1995 enum txrx t; in netmap_set_ringid()
2002 priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1; in netmap_set_ringid()
2011 na->si_users[t]++; in netmap_set_ringid()
2019 struct netmap_adapter *na = priv->np_na; in netmap_unset_ringid()
2020 enum txrx t; in netmap_unset_ringid()
2024 na->si_users[t]--; in netmap_unset_ringid()
2025 priv->np_qfirst[t] = priv->np_qlast[t] = 0; in netmap_unset_ringid()
2027 priv->np_flags = 0; in netmap_unset_ringid()
2028 priv->np_txpoll = 0; in netmap_unset_ringid()
2029 priv->np_kloop_state = 0; in netmap_unset_ringid()
2033 ((i_) < (p_)->np_qlast[(t_)])
2035 (within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
2038 (i_) = (p_)->np_qfirst[(t_)]; \
2041 ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); \
2043 (++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
2053 struct netmap_adapter *na = priv->np_na; in netmap_krings_get()
2056 int excl = (priv->np_flags & NR_EXCLUSIVE); in netmap_krings_get()
2057 enum txrx t; in netmap_krings_get()
2061 na->name, in netmap_krings_get()
2062 priv->np_qfirst[NR_TX], in netmap_krings_get()
2063 priv->np_qlast[NR_TX], in netmap_krings_get()
2064 priv->np_qfirst[NR_RX], in netmap_krings_get()
2065 priv->np_qlast[NR_RX]); in netmap_krings_get()
2072 if ((kring->nr_kflags & NKR_EXCLUSIVE) || in netmap_krings_get()
2073 (kring->users && excl)) in netmap_krings_get()
2075 nm_prdis("ring %s busy", kring->name); in netmap_krings_get()
2084 kring->users++; in netmap_krings_get()
2086 kring->nr_kflags |= NKR_EXCLUSIVE; in netmap_krings_get()
2087 kring->nr_pending_mode = NKR_NETMAP_ON; in netmap_krings_get()
2102 int excl = (priv->np_flags & NR_EXCLUSIVE); in netmap_krings_put()
2103 enum txrx t; in netmap_krings_put()
2106 na->name, in netmap_krings_put()
2107 priv->np_qfirst[NR_TX], in netmap_krings_put()
2108 priv->np_qlast[NR_TX], in netmap_krings_put()
2109 priv->np_qfirst[NR_RX], in netmap_krings_put()
2110 priv->np_qlast[MR_RX]); in netmap_krings_put()
2114 kring->nr_kflags &= ~NKR_EXCLUSIVE; in netmap_krings_put()
2115 kring->users--; in netmap_krings_put()
2116 if (kring->users == 0) in netmap_krings_put()
2117 kring->nr_pending_mode = NKR_NETMAP_OFF; in netmap_krings_put()
2124 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]); in nm_priv_rx_enabled()
2133 (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok; in netmap_csb_validate()
2135 (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa; in netmap_csb_validate()
2136 enum txrx t; in netmap_csb_validate()
2142 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) { in netmap_csb_validate()
2149 num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t]; in netmap_csb_validate()
2155 if (!(priv->np_flags & NR_EXCLUSIVE)) { in netmap_csb_validate()
2174 if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) { in netmap_csb_validate()
2183 /* Application --> kernel direction. */ in netmap_csb_validate()
2186 /* Kernel --> application direction. */ in netmap_csb_validate()
2197 priv->np_csb_atok_base = csb_atok_base; in netmap_csb_validate()
2198 priv->np_csb_ktoa_base = csb_ktoa_base; in netmap_csb_validate()
2204 NMR(priv->np_na, t)[i + priv->np_qfirst[t]]; in netmap_csb_validate()
2213 CSB_WRITE(csb_atok, head, kring->rhead); in netmap_csb_validate()
2214 CSB_WRITE(csb_atok, cur, kring->rcur); in netmap_csb_validate()
2217 CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur); in netmap_csb_validate()
2218 CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail); in netmap_csb_validate()
2222 "hwcur %u, hwtail %u", kring->name, in netmap_csb_validate()
2223 kring->rhead, kring->rcur, kring->nr_hwcur, in netmap_csb_validate()
2224 kring->nr_hwtail); in netmap_csb_validate()
2238 if (mtu <= na->rx_buf_maxsize) { in netmap_buf_size_validate()
2253 * the maximum per-slot size. */ in netmap_buf_size_validate()
2254 if (!(na->na_flags & NAF_MOREFRAG)) { in netmap_buf_size_validate()
2258 if_name(na->ifp)); in netmap_buf_size_validate()
2260 } else if (nbs < na->rx_buf_maxsize) { in netmap_buf_size_validate()
2263 ">= %u", if_name(na->ifp), in netmap_buf_size_validate()
2264 na->rx_buf_maxsize); in netmap_buf_size_validate()
2271 if_name(na->ifp), mtu, nbs); in netmap_buf_size_validate()
2284 struct netmap_adapter *na = priv->np_na; in netmap_offsets_init()
2289 enum txrx t; in netmap_offsets_init()
2297 if (!(na->na_flags & NAF_OFFSETS)) { in netmap_offsets_init()
2300 na->name); in netmap_offsets_init()
2306 max_offset = opt->nro_max_offset; in netmap_offsets_init()
2307 min_gap = opt->nro_min_gap; in netmap_offsets_init()
2308 initial_offset = opt->nro_initial_offset; in netmap_offsets_init()
2309 bits = opt->nro_offset_bits; in netmap_offsets_init()
2325 mask = (1ULL << bits) - 1; in netmap_offsets_init()
2354 struct netmap_ring *ring = kring->ring; in netmap_offsets_init()
2360 if (kring->offset_mask) { in netmap_offsets_init()
2361 if ((kring->offset_mask & mask) != mask || in netmap_offsets_init()
2362 kring->offset_max < max_offset) { in netmap_offsets_init()
2367 kring->name, in netmap_offsets_init()
2368 (unsigned long long)kring->offset_mask, in netmap_offsets_init()
2369 (unsigned long long)kring->offset_max); in netmap_offsets_init()
2373 mask = kring->offset_mask; in netmap_offsets_init()
2374 max_offset = kring->offset_max; in netmap_offsets_init()
2376 kring->offset_mask = mask; in netmap_offsets_init()
2377 *(uint64_t *)(uintptr_t)&ring->offset_mask = mask; in netmap_offsets_init()
2378 kring->offset_max = max_offset; in netmap_offsets_init()
2379 kring->offset_gap = min_gap; in netmap_offsets_init()
2388 if (!initial_offset || kring->users > 1) in netmap_offsets_init()
2391 for (j = 0; j < kring->nkr_num_slots; j++) { in netmap_offsets_init()
2392 struct netmap_slot *slot = ring->slot + j; in netmap_offsets_init()
2399 opt->nro_opt.nro_status = error; in netmap_offsets_init()
2401 opt->nro_max_offset = max_offset; in netmap_offsets_init()
2416 enum txrx t; in netmap_compute_buf_len()
2421 struct netmap_adapter *na = priv->np_na; in netmap_compute_buf_len()
2428 if (kring->users > 1) in netmap_compute_buf_len()
2437 * case the user-declared 'offset_gap' is taken as the in netmap_compute_buf_len()
2442 target = NETMAP_BUF_SIZE(kring->na) - in netmap_compute_buf_len()
2443 kring->offset_max; in netmap_compute_buf_len()
2454 if (!kring->offset_gap) in netmap_compute_buf_len()
2455 kring->offset_gap = in netmap_compute_buf_len()
2456 NETMAP_BUF_SIZE(kring->na); in netmap_compute_buf_len()
2458 if (kring->offset_gap < target) in netmap_compute_buf_len()
2459 target = kring->offset_gap; in netmap_compute_buf_len()
2460 error = kring->nm_bufcfg(kring, target); in netmap_compute_buf_len()
2464 *(uint64_t *)(uintptr_t)&kring->ring->buf_align = kring->buf_align; in netmap_compute_buf_len()
2466 if (mtu && t == NR_RX && kring->hwbuf_len < mtu) { in netmap_compute_buf_len()
2467 if (!(na->na_flags & NAF_MOREFRAG)) { in netmap_compute_buf_len()
2471 na->name); in netmap_compute_buf_len()
2479 kring->name, mtu, in netmap_compute_buf_len()
2480 (unsigned long long)kring->hwbuf_len); in netmap_compute_buf_len()
2489 * possibly move the interface to netmap-mode.
2495 * na->nm_config() [by netmap_update_config]
2503 * na->nm_krings_create()
2517 * cross-link them
2527 * na->nm_register(, 1)
2551 * cross-link the bwrap and hwna rings,
2566 priv->np_na = na; /* store the reference */ in netmap_do_regif()
2567 error = netmap_mem_finalize(na->nm_mem, na); in netmap_do_regif()
2571 if (na->active_fds == 0) { in netmap_do_regif()
2574 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut); in netmap_do_regif()
2577 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal, in netmap_do_regif()
2578 na->na_lut.objsize); in netmap_do_regif()
2589 if (na->active_fds == 0) { in netmap_do_regif()
2592 * perform sanity checks and create the in-kernel view in netmap_do_regif()
2595 if (na->ifp && nm_priv_rx_enabled(priv)) { in netmap_do_regif()
2597 unsigned mtu = nm_os_ifnet_mtu(na->ifp); in netmap_do_regif()
2600 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na)); in netmap_do_regif()
2602 if (na->rx_buf_maxsize == 0) { in netmap_do_regif()
2603 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name); in netmap_do_regif()
2617 error = na->nm_krings_create(na); in netmap_do_regif()
2657 error = na->nm_register(na, 1); in netmap_do_regif()
2664 na->active_fds++; in netmap_do_regif()
2669 * check for priv->np_nifp != NULL without locking in netmap_do_regif()
2672 priv->np_nifp = nifp; in netmap_do_regif()
2682 if (na->active_fds == 0) in netmap_do_regif()
2683 na->nm_krings_delete(na); in netmap_do_regif()
2685 if (na->active_fds == 0) in netmap_do_regif()
2686 memset(&na->na_lut, 0, sizeof(na->na_lut)); in netmap_do_regif()
2690 priv->np_na = NULL; in netmap_do_regif()
2706 kring->ring->tail = kring->rtail = kring->nr_hwtail; in nm_sync_finalize()
2709 kring->name, kring->nr_hwcur, kring->nr_hwtail, in nm_sync_finalize()
2710 kring->rhead, kring->rcur, kring->rtail); in nm_sync_finalize()
2717 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) { in ring_timestamp_set()
2718 microtime(&ring->ts); in ring_timestamp_set()
2730 * - NIOCCTRL device control API
2731 * - NIOCTXSYNC sync TX rings
2732 * - NIOCRXSYNC sync RX rings
2733 * - SIOCGIFADDR just for convenience
2734 * - NIOCGINFO deprecated (legacy API)
2735 * - NIOCREGIF deprecated (legacy API)
2751 enum txrx t; in netmap_ioctl()
2757 if (hdr->nr_version < NETMAP_MIN_API || in netmap_ioctl()
2758 hdr->nr_version > NETMAP_MAX_API) { in netmap_ioctl()
2760 hdr->nr_version, NETMAP_API); in netmap_ioctl()
2764 /* Make a kernel-space copy of the user-space nr_body. in netmap_ioctl()
2767 * kernel-space counterparts. The original pointers are in netmap_ioctl()
2775 /* Sanitize hdr->nr_name. */ in netmap_ioctl()
2776 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0'; in netmap_ioctl()
2778 switch (hdr->nr_reqtype) { in netmap_ioctl()
2781 (struct nmreq_register *)(uintptr_t)hdr->nr_body; in netmap_ioctl()
2790 if (priv->np_nifp != NULL) { /* thread already registered */ in netmap_ioctl()
2801 nmd = netmap_mem_ext_create(e->nro_usrptr, in netmap_ioctl()
2802 &e->nro_info, &error); in netmap_ioctl()
2803 opt->nro_status = error; in netmap_ioctl()
2809 if (nmd == NULL && req->nr_mem_id) { in netmap_ioctl()
2811 nmd = netmap_mem_find(req->nr_mem_id); in netmap_ioctl()
2815 hdr->nr_name, req->nr_mem_id); in netmap_ioctl()
2831 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) { in netmap_ioctl()
2833 "not accept it", na->virt_hdr_len); in netmap_ioctl()
2848 opt->nro_status = error; in netmap_ioctl()
2855 nifp = priv->np_nifp; in netmap_ioctl()
2858 req->nr_rx_rings = na->num_rx_rings; in netmap_ioctl()
2859 req->nr_tx_rings = na->num_tx_rings; in netmap_ioctl()
2860 req->nr_rx_slots = na->num_rx_desc; in netmap_ioctl()
2861 req->nr_tx_slots = na->num_tx_desc; in netmap_ioctl()
2862 req->nr_host_tx_rings = na->num_host_tx_rings; in netmap_ioctl()
2863 req->nr_host_rx_rings = na->num_host_rx_rings; in netmap_ioctl()
2864 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags, in netmap_ioctl()
2865 &req->nr_mem_id); in netmap_ioctl()
2871 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM; in netmap_ioctl()
2874 priv->np_si[t] = nm_si_user(priv, t) ? in netmap_ioctl()
2875 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si; in netmap_ioctl()
2878 if (req->nr_extra_bufs) { in netmap_ioctl()
2881 req->nr_extra_bufs); in netmap_ioctl()
2882 req->nr_extra_bufs = netmap_extra_alloc(na, in netmap_ioctl()
2883 &nifp->ni_bufs_head, req->nr_extra_bufs); in netmap_ioctl()
2885 nm_prinf("got %d extra buffers", req->nr_extra_bufs); in netmap_ioctl()
2887 nifp->ni_bufs_head = 0; in netmap_ioctl()
2889 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp); in netmap_ioctl()
2898 priv->np_ifp = ifp; in netmap_ioctl()
2914 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body; in netmap_ioctl()
2921 if (hdr->nr_name[0] != '\0') { in netmap_ioctl()
2927 regreq.nr_tx_slots = req->nr_tx_slots; in netmap_ioctl()
2928 regreq.nr_rx_slots = req->nr_rx_slots; in netmap_ioctl()
2929 regreq.nr_tx_rings = req->nr_tx_rings; in netmap_ioctl()
2930 regreq.nr_rx_rings = req->nr_rx_rings; in netmap_ioctl()
2931 regreq.nr_host_tx_rings = req->nr_host_tx_rings; in netmap_ioctl()
2932 regreq.nr_host_rx_rings = req->nr_host_rx_rings; in netmap_ioctl()
2933 regreq.nr_mem_id = req->nr_mem_id; in netmap_ioctl()
2936 hdr->nr_reqtype = NETMAP_REQ_REGISTER; in netmap_ioctl()
2937 hdr->nr_body = (uintptr_t)®req; in netmap_ioctl()
2939 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */ in netmap_ioctl()
2940 hdr->nr_body = (uintptr_t)req; /* reset nr_body */ in netmap_ioctl()
2946 nmd = na->nm_mem; /* get memory allocator */ in netmap_ioctl()
2948 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1); in netmap_ioctl()
2952 hdr->nr_name, in netmap_ioctl()
2953 req->nr_mem_id ? req->nr_mem_id : 1); in netmap_ioctl()
2960 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags, in netmap_ioctl()
2961 &req->nr_mem_id); in netmap_ioctl()
2967 req->nr_rx_rings = na->num_rx_rings; in netmap_ioctl()
2968 req->nr_tx_rings = na->num_tx_rings; in netmap_ioctl()
2969 req->nr_rx_slots = na->num_rx_desc; in netmap_ioctl()
2970 req->nr_tx_slots = na->num_tx_desc; in netmap_ioctl()
2971 req->nr_host_tx_rings = na->num_host_tx_rings; in netmap_ioctl()
2972 req->nr_host_rx_rings = na->num_host_rx_rings; in netmap_ioctl()
2993 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; in netmap_ioctl()
3000 /* For now we only support virtio-net headers, and only for in netmap_ioctl()
3002 * for the virtio-net header are 0 (no header), 10 and 12. */ in netmap_ioctl()
3003 if (req->nr_hdr_len != 0 && in netmap_ioctl()
3004 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) && in netmap_ioctl()
3005 req->nr_hdr_len != 12) { in netmap_ioctl()
3007 nm_prerr("invalid hdr_len %u", req->nr_hdr_len); in netmap_ioctl()
3012 hdr->nr_reqtype = NETMAP_REQ_REGISTER; in netmap_ioctl()
3013 hdr->nr_body = (uintptr_t)®req; in netmap_ioctl()
3015 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET; in netmap_ioctl()
3016 hdr->nr_body = (uintptr_t)req; in netmap_ioctl()
3020 na->virt_hdr_len = req->nr_hdr_len; in netmap_ioctl()
3021 if (na->virt_hdr_len) { in netmap_ioctl()
3022 vpna->mfs = NETMAP_BUF_SIZE(na); in netmap_ioctl()
3025 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na); in netmap_ioctl()
3035 /* Get vnet-header length for this netmap port */ in netmap_ioctl()
3037 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; in netmap_ioctl()
3046 hdr->nr_reqtype = NETMAP_REQ_REGISTER; in netmap_ioctl()
3047 hdr->nr_body = (uintptr_t)®req; in netmap_ioctl()
3049 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET; in netmap_ioctl()
3050 hdr->nr_body = (uintptr_t)req; in netmap_ioctl()
3052 req->nr_hdr_len = na->virt_hdr_len; in netmap_ioctl()
3070 error = nm_vi_destroy(hdr->nr_name); in netmap_ioctl()
3082 * hdr->nr_name. */ in netmap_ioctl()
3084 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body; in netmap_ioctl()
3091 regreq.nr_mem_id = req->nr_mem_id; in netmap_ioctl()
3094 hdr->nr_reqtype = NETMAP_REQ_REGISTER; in netmap_ioctl()
3095 hdr->nr_body = (uintptr_t)®req; in netmap_ioctl()
3097 hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */ in netmap_ioctl()
3098 hdr->nr_body = (uintptr_t)req; /* reset nr_body */ in netmap_ioctl()
3104 nmd = na->nm_mem; /* grab the memory allocator */ in netmap_ioctl()
3136 opt->nro_status = error; in netmap_ioctl()
3157 * user-space pointer. */ in netmap_ioctl()
3164 if (unlikely(priv->np_nifp == NULL)) { in netmap_ioctl()
3170 if (unlikely(priv->np_csb_atok_base)) { in netmap_ioctl()
3176 na = priv->np_na; /* we have a reference */ in netmap_ioctl()
3181 qfirst = priv->np_qfirst[t]; in netmap_ioctl()
3182 qlast = priv->np_qlast[t]; in netmap_ioctl()
3183 sync_flags = priv->np_sync_flags; in netmap_ioctl()
3187 struct netmap_ring *ring = kring->ring; in netmap_ioctl()
3197 i, ring->cur, in netmap_ioctl()
3198 kring->nr_hwcur); in netmap_ioctl()
3199 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { in netmap_ioctl()
3201 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) { in netmap_ioctl()
3206 i, ring->cur, in netmap_ioctl()
3207 kring->nr_hwcur); in netmap_ioctl()
3209 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { in netmap_ioctl()
3216 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) { in netmap_ioctl()
3225 netmap_send_up(na->ifp, &q); in netmap_ioctl()
3303 return rv - sizeof(struct nmreq_option); in nmreq_opt_size_by_type()
3307 * nmreq_copyin: create an in-kernel version of the request.
3311 * hdr -> +-------+ buf
3312 * | | +---------------+
3313 * +-------+ |usr body ptr |
3314 * |options|-. +---------------+
3315 * +-------+ | |usr options ptr|
3316 * |body |--------->+---------------+
3317 * +-------+ | | |
3320 * | +---------------+
3322 * | +---------------+
3323 * | .---| |\
3324 * | | +---------------+ |
3325 * | .------| | |
3326 * | | | +---------------+ \ option table
3328 * | | | +---------------+ | type
3330 * | | | +---------------+/
3332 * `-|----->+---------------+
3335 * | | .-| nro_next |
3336 * | | | +---------------+
3338 * | `-`>+---------------+
3341 * | .-| nro_next |
3342 * | | +---------------+
3345 * | .-| |
3346 * `----->+---------------+
3348 * `>+---------------+
3352 * +---------------+
3355 * with in-kernel valid pointers inside the buf. The original user
3374 if (hdr->nr_reserved) { in nmreq_copyin()
3383 hdr->nr_reserved = nr_body_is_user; in nmreq_copyin()
3386 rqsz = nmreq_size_by_type(hdr->nr_reqtype); in nmreq_copyin()
3391 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) || in nmreq_copyin()
3392 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) { in nmreq_copyin()
3419 *ptrs++ = hdr->nr_body; in nmreq_copyin()
3420 *ptrs++ = hdr->nr_options; in nmreq_copyin()
3422 /* overwrite the user pointer with the in-kernel one */ in nmreq_copyin()
3423 hdr->nr_body = (uintptr_t)p; in nmreq_copyin()
3424 /* prepare the options-list pointers and temporarily terminate in nmreq_copyin()
3425 * the in-kernel list, in case we have to jump to out_restore in nmreq_copyin()
3427 next = (struct nmreq_option **)&hdr->nr_options; in nmreq_copyin()
3429 hdr->nr_options = 0; in nmreq_copyin()
3444 if (p - ker + sizeof(uint64_t*) + sizeof(*src) > bufsz) { in nmreq_copyin()
3449 hdr->nr_options = 0; in nmreq_copyin()
3462 *ptrs = opt->nro_next; in nmreq_copyin()
3463 /* append the option to the in-kernel list */ in nmreq_copyin()
3465 /* temporarily teminate the in-kernel list, in case we have to in nmreq_copyin()
3468 nsrc = (struct nmreq_option *)opt->nro_next; in nmreq_copyin()
3469 opt->nro_next = 0; in nmreq_copyin()
3471 opt->nro_status = 0; in nmreq_copyin()
3474 if (opt->nro_reqtype < 1) { in nmreq_copyin()
3476 nm_prinf("invalid option type: %u", opt->nro_reqtype); in nmreq_copyin()
3477 opt->nro_status = EINVAL; in nmreq_copyin()
3482 if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) { in nmreq_copyin()
3483 /* opt->nro_status will be set to EOPNOTSUPP */ in nmreq_copyin()
3490 if (opt_tab[opt->nro_reqtype] != NULL) { in nmreq_copyin()
3492 nm_prinf("duplicate option: %u", opt->nro_reqtype); in nmreq_copyin()
3493 opt->nro_status = EINVAL; in nmreq_copyin()
3494 opt_tab[opt->nro_reqtype]->nro_status = EINVAL; in nmreq_copyin()
3498 opt_tab[opt->nro_reqtype] = opt; in nmreq_copyin()
3501 optsz = nmreq_opt_size_by_type(opt->nro_reqtype, in nmreq_copyin()
3502 opt->nro_size); in nmreq_copyin()
3504 if ((optsz > NETMAP_REQ_MAXSIZE) || (opt->nro_size > NETMAP_REQ_MAXSIZE) in nmreq_copyin()
3521 next = (struct nmreq_option **)&opt->nro_next; in nmreq_copyin()
3528 for (src = (struct nmreq_option *)hdr->nr_options; src; in nmreq_copyin()
3529 src = (struct nmreq_option *)src->nro_next) { in nmreq_copyin()
3530 src->nro_status = EOPNOTSUPP; in nmreq_copyin()
3544 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart; in nmreq_copyout()
3549 if (!hdr->nr_reserved) in nmreq_copyout()
3553 ptrs = (uint64_t *)ker - 2; in nmreq_copyout()
3555 hdr->nr_body = *ptrs++; in nmreq_copyout()
3556 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; in nmreq_copyout()
3557 hdr->nr_options = *ptrs; in nmreq_copyout()
3561 bodysz = nmreq_size_by_type(hdr->nr_reqtype); in nmreq_copyout()
3562 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz); in nmreq_copyout()
3570 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options; in nmreq_copyout()
3576 next = src->nro_next; in nmreq_copyout()
3577 ptrs = (uint64_t *)src - 1; in nmreq_copyout()
3578 src->nro_next = *ptrs; in nmreq_copyout()
3588 if (!rerror && !src->nro_status) { in nmreq_copyout()
3589 optsz = nmreq_opt_size_by_type(src->nro_reqtype, in nmreq_copyout()
3590 src->nro_size); in nmreq_copyout()
3605 hdr->nr_reserved = 0; in nmreq_copyout()
3615 if (!hdr->nr_options) in nmreq_getoption()
3618 opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) - in nmreq_getoption()
3631 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt; in nmreq_checkoptions()
3632 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next) in nmreq_checkoptions()
3633 if (opt->nro_status == EOPNOTSUPP) in nmreq_checkoptions()
3646 * Device-dependent parts (locking and sync of tx/rx rings)
3673 * packets to forward (host RX ring --> NIC) during the rx in netmap_poll()
3679 int sync_flags = priv->np_sync_flags; in netmap_poll()
3683 if (unlikely(priv->np_nifp == NULL)) { in netmap_poll()
3688 na = priv->np_na; in netmap_poll()
3693 if (unlikely(priv->np_csb_atok_base)) { in netmap_poll()
3699 nm_prinf("device %s events 0x%x", na->name, events); in netmap_poll()
3716 si[NR_RX] = priv->np_si[NR_RX]; in netmap_poll()
3717 si[NR_TX] = priv->np_si[NR_TX]; in netmap_poll()
3727 const enum txrx t = NR_TX; in netmap_poll()
3728 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { in netmap_poll()
3730 if (kring->ring->cur != kring->ring->tail) { in netmap_poll()
3740 const enum txrx t = NR_RX; in netmap_poll()
3743 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { in netmap_poll()
3745 if (kring->ring->cur == kring->ring->tail in netmap_poll()
3746 || kring->rhead != kring->ring->head) { in netmap_poll()
3769 * If we want to push packets out (priv->np_txpoll) or in netmap_poll()
3774 if (priv->np_txpoll || want_tx) { in netmap_poll()
3782 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) { in netmap_poll()
3785 kring = na->tx_rings[i]; in netmap_poll()
3786 ring = kring->ring; in netmap_poll()
3794 if (!send_down && !want_tx && ring->head == kring->nr_hwcur) in netmap_poll()
3800 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { in netmap_poll()
3804 if (kring->nm_sync(kring, sync_flags)) in netmap_poll()
3816 found = kring->rcur != kring->rtail; in netmap_poll()
3822 kring->nm_notify(kring, 0); in netmap_poll()
3844 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) { in netmap_poll()
3847 kring = na->rx_rings[i]; in netmap_poll()
3848 ring = kring->ring; in netmap_poll()
3853 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { in netmap_poll()
3857 /* now we can use kring->rcur, rtail */ in netmap_poll()
3870 kring->nr_kflags &= ~NR_FORWARD; in netmap_poll()
3871 if (kring->nm_sync(kring, sync_flags)) in netmap_poll()
3875 send_down |= (kring->nr_kflags & NR_FORWARD); in netmap_poll()
3877 found = kring->rcur != kring->rtail; in netmap_poll()
3883 kring->nm_notify(kring, 0); in netmap_poll()
3903 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and in netmap_poll()
3904 * ring->head) marked with NS_FORWARD on hw rx rings are passed up in netmap_poll()
3909 netmap_send_up(na->ifp, &q); in netmap_poll()
3921 enum txrx t; in nma_intr_enable()
3927 int on = !(kring->nr_kflags & NKR_NOINTR); in nma_intr_enable()
3933 kring->nr_kflags &= ~NKR_NOINTR; in nma_intr_enable()
3935 kring->nr_kflags |= NKR_NOINTR; in nma_intr_enable()
3944 if (!na->nm_intr) { in nma_intr_enable()
3946 na->name); in nma_intr_enable()
3947 return -1; in nma_intr_enable()
3950 na->nm_intr(na, onoff); in nma_intr_enable()
3956 /*-------------------- driver support routines -------------------*/
3962 struct netmap_adapter *na = kring->notify_na; in netmap_notify()
3963 enum txrx t = kring->tx; in netmap_notify()
3965 nm_os_selwakeup(&kring->si); in netmap_notify()
3970 if (na->si_users[t] > 0) in netmap_notify()
3971 nm_os_selwakeup(&na->si[t]); in netmap_notify()
3983 if (!na->rx_buf_maxsize) { in netmap_attach_common()
3985 na->rx_buf_maxsize = PAGE_SIZE; in netmap_attach_common()
3989 if (na->na_flags & NAF_HOST_RINGS && na->ifp) { in netmap_attach_common()
3990 na->if_input = if_getinputfn(na->ifp); /* for netmap_send_up */ in netmap_attach_common()
3992 na->pdev = na; /* make sure netmap_mem_map() is called */ in netmap_attach_common()
3994 if (na->na_flags & NAF_HOST_RINGS) { in netmap_attach_common()
3995 if (na->num_host_rx_rings == 0) in netmap_attach_common()
3996 na->num_host_rx_rings = 1; in netmap_attach_common()
3997 if (na->num_host_tx_rings == 0) in netmap_attach_common()
3998 na->num_host_tx_rings = 1; in netmap_attach_common()
4000 if (na->nm_krings_create == NULL) { in netmap_attach_common()
4005 na->nm_krings_create = netmap_hw_krings_create; in netmap_attach_common()
4006 na->nm_krings_delete = netmap_hw_krings_delete; in netmap_attach_common()
4008 if (na->nm_notify == NULL) in netmap_attach_common()
4009 na->nm_notify = netmap_notify; in netmap_attach_common()
4010 na->active_fds = 0; in netmap_attach_common()
4012 if (na->nm_mem == NULL) { in netmap_attach_common()
4014 na->nm_mem = netmap_mem_get_allocator(na); in netmap_attach_common()
4016 if (na->nm_bdg_attach == NULL) in netmap_attach_common()
4020 na->nm_bdg_attach = netmap_default_bdg_attach; in netmap_attach_common()
4025 /* Wrapper for the register callback provided netmap-enabled
4045 na->na_flags &= ~NAF_NETMAP_ON; in netmap_hw_reg()
4050 error = hwna->nm_hw_register(na, onoff); in netmap_hw_reg()
4061 if (na->ifp == NULL) in netmap_hw_dtor()
4064 NM_DETACH_NA(na->ifp); in netmap_hw_dtor()
4088 if (arg == NULL || arg->ifp == NULL) { in netmap_attach_ext()
4090 nm_prerr("either arg or arg->ifp is NULL"); in netmap_attach_ext()
4094 if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) { in netmap_attach_ext()
4097 arg->name, arg->num_tx_rings, arg->num_rx_rings); in netmap_attach_ext()
4101 ifp = arg->ifp; in netmap_attach_ext()
4114 hwna->up = *arg; in netmap_attach_ext()
4115 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE; in netmap_attach_ext()
4116 strlcpy(hwna->up.name, if_name(ifp), sizeof(hwna->up.name)); in netmap_attach_ext()
4118 hwna->nm_hw_register = hwna->up.nm_register; in netmap_attach_ext()
4119 hwna->up.nm_register = netmap_hw_reg; in netmap_attach_ext()
4121 if (netmap_attach_common(&hwna->up)) { in netmap_attach_ext()
4125 netmap_adapter_get(&hwna->up); in netmap_attach_ext()
4127 NM_ATTACH_NA(ifp, &hwna->up); in netmap_attach_ext()
4131 if (arg->nm_dtor == NULL) { in netmap_attach_ext()
4132 hwna->up.nm_dtor = netmap_hw_dtor; in netmap_attach_ext()
4136 hwna->up.num_tx_rings, hwna->up.num_tx_desc, in netmap_attach_ext()
4137 hwna->up.num_rx_rings, hwna->up.num_rx_desc); in netmap_attach_ext()
4161 refcount_acquire(&na->na_refcount); in NM_DBG()
4172 if (!refcount_release(&na->na_refcount)) in NM_DBG()
4175 if (na->nm_dtor) in NM_DBG()
4176 na->nm_dtor(na); in NM_DBG()
4178 if (na->tx_rings) { /* XXX should not happen */ in NM_DBG()
4181 na->nm_krings_delete(na); in NM_DBG()
4184 if (na->nm_mem) in NM_DBG()
4185 netmap_mem_put(na->nm_mem); in NM_DBG()
4200 for (i = na->num_rx_rings; i < lim; i++) { in netmap_hw_krings_create()
4201 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue); in netmap_hw_krings_create()
4203 nm_prdis("initialized sw rx queue %d", na->num_rx_rings); in netmap_hw_krings_create()
4211 * Called on module unload by the netmap-enabled drivers
4233 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) { in netmap_detach()
4234 na->na_flags |= NAF_ZOMBIE; in netmap_detach()
4272 if (i >= na->num_host_rx_rings) { in netmap_transmit()
4273 i = i % na->num_host_rx_rings; in netmap_transmit()
4278 // if we follow the down/configure/up protocol -gl in netmap_transmit()
4279 // mtx_lock(&na->core_lock); in netmap_transmit()
4282 nm_prerr("%s not in netmap mode anymore", na->name); in netmap_transmit()
4288 if (txr >= na->num_tx_rings) { in netmap_transmit()
4289 txr %= na->num_tx_rings; in netmap_transmit()
4293 if (tx_kring->nr_mode == NKR_NETMAP_OFF) { in netmap_transmit()
4297 q = &kring->rx_queue; in netmap_transmit()
4301 nm_prerr("%s from_host, drop packet size %d > %d", na->name, in netmap_transmit()
4308 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name); in netmap_transmit()
4314 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name); in netmap_transmit()
4330 busy = kring->nr_hwtail - kring->nr_hwcur; in netmap_transmit()
4332 busy += kring->nkr_num_slots; in netmap_transmit()
4333 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) { in netmap_transmit()
4334 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name, in netmap_transmit()
4335 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q)); in netmap_transmit()
4338 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q)); in netmap_transmit()
4351 kring->nm_notify(kring, 0); in netmap_transmit()
4371 * In any case, adjust kring->nr_mode.
4374 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, in netmap_reset()
4386 if (n >= na->num_tx_rings) in netmap_reset()
4388 kring = na->tx_rings[n]; in netmap_reset()
4397 new_hwofs = kring->rhead; in netmap_reset()
4398 new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1); in netmap_reset()
4400 if (n >= na->num_rx_rings) in netmap_reset()
4402 kring = na->rx_rings[n]; in netmap_reset()
4411 new_hwofs = kring->nr_hwtail; in netmap_reset()
4412 new_hwtail = kring->nr_hwtail; in netmap_reset()
4414 if (kring->nr_pending_mode == NKR_NETMAP_OFF) { in netmap_reset()
4415 kring->nr_mode = NKR_NETMAP_OFF; in netmap_reset()
4419 nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name, in netmap_reset()
4420 kring->nr_hwcur, kring->rhead, in netmap_reset()
4421 kring->nr_hwtail, new_hwtail, in netmap_reset()
4422 kring->nkr_hwofs, new_hwofs); in netmap_reset()
4424 kring->nr_hwcur = kring->rhead; in netmap_reset()
4425 kring->nr_hwtail = new_hwtail; in netmap_reset()
4426 kring->nkr_hwofs = new_hwofs; in netmap_reset()
4433 kring->nr_mode = NKR_NETMAP_ON; in netmap_reset()
4434 kring->nm_notify(kring, 0); in netmap_reset()
4435 return kring->ring->slot; in netmap_reset()
4442 * "work_done" is non-null on the RX path, NULL for the TX path.
4447 * - for a netmap file descriptor, do a selwakeup on the individual
4450 * - for a nic connected to a switch, call the proper forwarding routine
4457 enum txrx t = (work_done ? NR_RX : NR_TX); in netmap_common_irq()
4470 if (kring->nr_mode == NKR_NETMAP_OFF) { in netmap_common_irq()
4475 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ? in netmap_common_irq()
4479 return kring->nm_notify(kring, 0); in netmap_common_irq()
4485 * "work_done" is non-null on the RX path, NULL for the TX path.
4513 if (na->na_flags & NAF_SKIP_INTR) { in netmap_rx_irq()
4525 if_t ifp = na->ifp; in nm_set_native_flags()
4529 if (na->active_fds > 0) { in nm_set_native_flags()
4533 na->na_flags |= NAF_NETMAP_ON; in nm_set_native_flags()
4541 if_t ifp = na->ifp; in nm_clear_native_flags()
4545 if (na->active_fds > 0) { in nm_clear_native_flags()
4552 na->na_flags &= ~NAF_NETMAP_ON; in nm_clear_native_flags()
4558 enum txrx t; in netmap_krings_mode_commit()
4567 kring->nr_mode = NKR_NETMAP_ON; in netmap_krings_mode_commit()
4569 kring->nr_mode = NKR_NETMAP_OFF; in netmap_krings_mode_commit()