Lines Matching full:na
163 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
286 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
293 * na->nm_notify() == netmap_notify()
299 * na->nm_notify() == netmap_notify()
304 * na->nm_notify == netmap_notify()
307 * netmap_rxsync_from_host(na, NULL, NULL)
311 * netmap_txsync_to_host(na)
313 * FreeBSD: na->if_input() == ether_input()
319 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
329 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * na->nm_notify() == netmap_notify()
339 * na->nm_notify() == netmap_notify()
347 * na->nm_notify() == netmap_notify()
362 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
368 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
375 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
381 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
395 * na->nm_notify() == netmap_notify()
399 * na->nm_notify() == netmap_bwrap_notify()
410 * na->nm_notify() == netmap_bwrap_notify()
602 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped) in netmap_set_ring() argument
605 netmap_disable_ring(NMR(na, t)[ring_id], stopped); in netmap_set_ring()
607 NMR(na, t)[ring_id]->nkr_stopped = 0; in netmap_set_ring() local
611 /* stop or enable all the rings of na */
613 netmap_set_all_rings(struct netmap_adapter *na, int stopped) in netmap_set_all_rings() argument
618 if (!nm_netmap_on(na)) in netmap_set_all_rings()
622 nm_prinf("%s: %sable all rings", na->name, in netmap_set_all_rings()
626 for (i = 0; i < netmap_real_rings(na, t); i++) { in netmap_set_all_rings()
627 netmap_set_ring(na, i, t, stopped); in netmap_set_all_rings()
642 netmap_set_all_rings(NA(ifp), NM_KR_LOCKED); in netmap_disable_all_rings()
655 netmap_set_all_rings(NA(ifp), 0 /* enabled */); in netmap_enable_all_rings()
663 struct netmap_adapter *na = NA(ifp); in netmap_make_zombie() local
664 netmap_set_all_rings(na, NM_KR_LOCKED); in netmap_make_zombie()
665 na->na_flags |= NAF_ZOMBIE; in netmap_make_zombie()
666 netmap_set_all_rings(na, 0); in netmap_make_zombie()
674 struct netmap_adapter *na = NA(ifp); in netmap_undo_zombie() local
675 if (na->na_flags & NAF_ZOMBIE) { in netmap_undo_zombie()
676 netmap_set_all_rings(na, NM_KR_LOCKED); in netmap_undo_zombie()
677 na->na_flags &= ~NAF_ZOMBIE; in netmap_undo_zombie()
678 netmap_set_all_rings(na, 0); in netmap_undo_zombie()
761 netmap_update_config(struct netmap_adapter *na) in netmap_update_config() argument
765 if (na->ifp && !nm_is_bwrap(na)) { in netmap_update_config()
766 strlcpy(na->name, if_name(na->ifp), sizeof(na->name)); in netmap_update_config()
770 if (na->nm_config == NULL || in netmap_update_config()
771 na->nm_config(na, &info)) { in netmap_update_config()
773 info.num_tx_rings = na->num_tx_rings; in netmap_update_config()
774 info.num_tx_descs = na->num_tx_desc; in netmap_update_config()
775 info.num_rx_rings = na->num_rx_rings; in netmap_update_config()
776 info.num_rx_descs = na->num_rx_desc; in netmap_update_config()
777 info.rx_buf_maxsize = na->rx_buf_maxsize; in netmap_update_config()
780 if (na->num_tx_rings == info.num_tx_rings && in netmap_update_config()
781 na->num_tx_desc == info.num_tx_descs && in netmap_update_config()
782 na->num_rx_rings == info.num_rx_rings && in netmap_update_config()
783 na->num_rx_desc == info.num_rx_descs && in netmap_update_config()
784 na->rx_buf_maxsize == info.rx_buf_maxsize) in netmap_update_config()
786 if (na->active_fds == 0) { in netmap_update_config()
787 na->num_tx_rings = info.num_tx_rings; in netmap_update_config()
788 na->num_tx_desc = info.num_tx_descs; in netmap_update_config()
789 na->num_rx_rings = info.num_rx_rings; in netmap_update_config()
790 na->num_rx_desc = info.num_rx_descs; in netmap_update_config()
791 na->rx_buf_maxsize = info.rx_buf_maxsize; in netmap_update_config()
795 na->name, na->num_tx_rings, na->num_tx_desc, in netmap_update_config()
796 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize); in netmap_update_config()
801 na->name, info.num_tx_rings, info.num_tx_descs, in netmap_update_config()
823 * na->tx_rings ----->| | \
824 * | | } na->num_tx_ring
828 * na->rx_rings ----> +----------+
830 * | | } na->num_rx_rings
835 * na->tailroom ----->| | \
845 netmap_krings_create(struct netmap_adapter *na, u_int tailroom) in netmap_krings_create() argument
853 if (na->tx_rings != NULL) { in netmap_krings_create()
860 n[NR_TX] = netmap_all_rings(na, NR_TX); in netmap_krings_create()
861 n[NR_RX] = netmap_all_rings(na, NR_RX); in netmap_krings_create()
867 na->tx_rings = nm_os_malloc((size_t)len); in netmap_krings_create()
868 if (na->tx_rings == NULL) { in netmap_krings_create()
872 na->rx_rings = na->tx_rings + n[NR_TX]; in netmap_krings_create()
873 na->tailroom = na->rx_rings + n[NR_RX]; in netmap_krings_create()
876 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom); in netmap_krings_create()
878 na->tx_rings[i] = kring; in netmap_krings_create()
887 ndesc = nma_get_ndesc(na, t); in netmap_krings_create()
889 kring = NMR(na, t)[i]; in netmap_krings_create()
891 kring->notify_na = na; in netmap_krings_create()
897 if (i < nma_get_nrings(na, t)) { in netmap_krings_create()
898 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync); in netmap_krings_create()
899 kring->nm_bufcfg = na->nm_bufcfg; in netmap_krings_create()
903 if (!(na->na_flags & NAF_HOST_RINGS)) in netmap_krings_create()
910 kring->nm_notify = na->nm_notify; in netmap_krings_create()
916 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name, in netmap_krings_create()
922 netmap_krings_delete(na); in netmap_krings_create()
926 kring->na = na; /* setting this field marks the mutex as initialized */ in netmap_krings_create()
928 err = nm_os_selinfo_init(&na->si[t], na->name); in netmap_krings_create()
930 netmap_krings_delete(na); in netmap_krings_create()
942 netmap_krings_delete(struct netmap_adapter *na) in netmap_krings_delete() argument
944 struct netmap_kring **kring = na->tx_rings; in netmap_krings_delete()
947 if (na->tx_rings == NULL) { in netmap_krings_delete()
954 nm_os_selinfo_uninit(&na->si[t]); in netmap_krings_delete()
957 for ( ; kring != na->tailroom; kring++) { in netmap_krings_delete()
958 if ((*kring)->na != NULL) in netmap_krings_delete()
962 nm_os_free(na->tx_rings); in netmap_krings_delete()
963 na->tx_rings = na->rx_rings = na->tailroom = NULL; in netmap_krings_delete()
974 netmap_hw_krings_delete(struct netmap_adapter *na) in netmap_hw_krings_delete() argument
976 u_int lim = netmap_real_rings(na, NR_RX), i; in netmap_hw_krings_delete()
978 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) { in netmap_hw_krings_delete()
979 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue; in netmap_hw_krings_delete()
984 netmap_krings_delete(na); in netmap_hw_krings_delete()
988 netmap_mem_restore(struct netmap_adapter *na) in netmap_mem_restore() argument
990 if (na->nm_mem_prev) { in netmap_mem_restore()
991 netmap_mem_put(na->nm_mem); in netmap_mem_restore()
992 na->nm_mem = na->nm_mem_prev; in netmap_mem_restore()
993 na->nm_mem_prev = NULL; in netmap_mem_restore()
998 netmap_mem_drop(struct netmap_adapter *na) in netmap_mem_drop() argument
1000 netmap_mem_deref(na->nm_mem, na); in netmap_mem_drop()
1002 if (na->active_fds <= 0) { in netmap_mem_drop()
1006 netmap_mem_restore(na); in netmap_mem_drop()
1011 netmap_update_hostrings_mode(struct netmap_adapter *na) in netmap_update_hostrings_mode() argument
1018 for (i = nma_get_nrings(na, t); in netmap_update_hostrings_mode()
1019 i < netmap_real_rings(na, t); i++) { in netmap_update_hostrings_mode()
1020 kring = NMR(na, t)[i]; in netmap_update_hostrings_mode()
1037 struct netmap_adapter *na = priv->np_na; in netmap_do_unregif() local
1040 na->active_fds--; in netmap_do_unregif()
1047 if (na->active_fds <= 0) { in netmap_do_unregif()
1051 netmap_monitor_stop(na); in netmap_do_unregif()
1055 if (na->active_fds <= 0 || nm_kring_pending(priv)) { in netmap_do_unregif()
1056 netmap_set_all_rings(na, NM_KR_LOCKED); in netmap_do_unregif()
1057 na->nm_register(na, 0); in netmap_do_unregif()
1058 netmap_set_all_rings(na, 0); in netmap_do_unregif()
1062 netmap_mem_rings_delete(na); in netmap_do_unregif()
1064 if (na->active_fds <= 0) { /* last instance */ in netmap_do_unregif()
1080 nm_prinf("deleting last instance for %s", na->name); in netmap_do_unregif()
1082 if (nm_netmap_on(na)) { in netmap_do_unregif()
1086 na->nm_krings_delete(na); in netmap_do_unregif()
1089 if (na->na_flags & NAF_HOST_RINGS) { in netmap_do_unregif()
1090 na->num_host_tx_rings = 1; in netmap_do_unregif()
1091 na->num_host_rx_rings = 1; in netmap_do_unregif()
1093 na->num_host_tx_rings = 0; in netmap_do_unregif()
1094 na->num_host_rx_rings = 0; in netmap_do_unregif()
1101 netmap_mem_if_delete(na, priv->np_nifp); in netmap_do_unregif()
1103 netmap_mem_drop(na); in netmap_do_unregif()
1134 struct netmap_adapter *na = priv->np_na; in netmap_priv_delete() local
1141 if (na) { in netmap_priv_delete()
1144 netmap_unget_na(na, priv->np_ifp); in netmap_priv_delete()
1235 struct netmap_adapter *na = kring->na; in netmap_grab_packets() local
1243 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) { in netmap_grab_packets()
1249 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL); in netmap_grab_packets()
1261 kring->na->na_flags & NAF_HOST_RINGS && in _nm_may_forward()
1269 kring->ring_id != kring->na->num_rx_rings; in nm_may_forward_up()
1277 kring->ring_id == kring->na->num_rx_rings; in nm_may_forward_down()
1293 netmap_sw_to_nic(struct netmap_adapter *na) in netmap_sw_to_nic() argument
1295 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings]; in netmap_sw_to_nic()
1303 for (i = 0; i < na->num_tx_rings; i++) { in netmap_sw_to_nic()
1304 struct netmap_kring *kdst = na->tx_rings[i]; in netmap_sw_to_nic()
1348 struct netmap_adapter *na = kring->na; in netmap_txsync_to_host() local
1364 netmap_send_up(na->ifp, &q); in netmap_txsync_to_host()
1381 struct netmap_adapter *na = kring->na; in netmap_rxsync_from_host() local
1405 m_copydata(m, 0, len, NMB(na, slot)); in netmap_rxsync_from_host()
1408 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL)); in netmap_rxsync_from_host()
1424 ret = netmap_sw_to_nic(na); in netmap_rxsync_from_host()
1445 * with *na containing the netmap adapter found.
1446 * Otherwise return an error code, with *na containing NULL.
1451 * then we unconditionally return the existing adapter into *na.
1452 * In all the other cases, we return (into *na) either native,
1458 * >0 * NA(ifp) NA(ifp)
1467 netmap_get_hw_na(if_t ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na) in netmap_get_hw_na() argument
1474 *na = NULL; /* default */ in netmap_get_hw_na()
1481 prev_na = NA(ifp); in netmap_get_hw_na()
1497 *na = prev_na; in netmap_get_hw_na()
1517 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))). in netmap_get_hw_na()
1518 * Consequently, if NA(ifp) is generic, we will enter one of in netmap_get_hw_na()
1526 *na = NA(ifp); in netmap_get_hw_na()
1529 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) && in netmap_get_hw_na()
1530 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) { in netmap_get_hw_na()
1531 (*na)->nm_mem_prev = (*na)->nm_mem; in netmap_get_hw_na()
1532 (*na)->nm_mem = netmap_mem_get(nmd); in netmap_get_hw_na()
1557 struct netmap_adapter **na, if_t *ifp, in netmap_get_na() argument
1565 *na = NULL; /* default return value */ in netmap_get_na()
1593 * All netmap_get_*_na() functions return an error and an na, in netmap_get_na()
1596 * error na in netmap_get_na()
1598 * !0 NULL type matches, but na creation/lookup failed in netmap_get_na()
1599 * 0 !NULL type matches and na created/found in netmap_get_na()
1602 error = netmap_get_null_na(hdr, na, nmd, create); in netmap_get_na()
1603 if (error || *na != NULL) in netmap_get_na()
1607 error = netmap_get_monitor_na(hdr, na, nmd, create); in netmap_get_na()
1608 if (error || *na != NULL) in netmap_get_na()
1612 error = netmap_get_pipe_na(hdr, na, nmd, create); in netmap_get_na()
1613 if (error || *na != NULL) in netmap_get_na()
1617 error = netmap_get_vale_na(hdr, na, nmd, create); in netmap_get_na()
1621 if (*na != NULL) /* valid match in netmap_get_bdg_na() */ in netmap_get_na()
1625 * This must be a hardware na, lookup the name in the system. in netmap_get_na()
1640 *na = ret; in netmap_get_na()
1647 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) { in netmap_get_na()
1649 (*na)->num_host_tx_rings = req->nr_host_tx_rings; in netmap_get_na()
1651 (*na)->num_host_rx_rings = req->nr_host_rx_rings; in netmap_get_na()
1653 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings, in netmap_get_na()
1654 (*na)->num_host_rx_rings); in netmap_get_na()
1673 netmap_unget_na(struct netmap_adapter *na, if_t ifp) in netmap_unget_na() argument
1677 if (na) in netmap_unget_na()
1678 netmap_adapter_put(na); in netmap_unget_na()
1858 if (idx < 2 || idx >= kring->na->na_lut.objtotal) { in netmap_ring_reinit()
1862 } else if (len > NETMAP_BUF_SIZE(kring->na)) { in netmap_ring_reinit()
1890 struct netmap_adapter *na = priv->np_na; in netmap_interp_ringid() local
1907 priv->np_qlast[t] = nma_get_nrings(na, t); in netmap_interp_ringid()
1913 if (!(na->na_flags & NAF_HOST_RINGS)) { in netmap_interp_ringid()
1918 nma_get_nrings(na, t) : 0); in netmap_interp_ringid()
1919 priv->np_qlast[t] = netmap_all_rings(na, t); in netmap_interp_ringid()
1925 if (nr_ringid >= na->num_tx_rings && in netmap_interp_ringid()
1926 nr_ringid >= na->num_rx_rings) { in netmap_interp_ringid()
1932 if (j >= nma_get_nrings(na, t)) in netmap_interp_ringid()
1940 if (!(na->na_flags & NAF_HOST_RINGS)) { in netmap_interp_ringid()
1944 if (nr_ringid >= na->num_host_tx_rings && in netmap_interp_ringid()
1945 nr_ringid >= na->num_host_rx_rings) { in netmap_interp_ringid()
1951 if (j >= nma_get_host_nrings(na, t)) in netmap_interp_ringid()
1953 priv->np_qfirst[t] = nma_get_nrings(na, t) + j; in netmap_interp_ringid()
1954 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1; in netmap_interp_ringid()
1968 priv->np_qlast[NR_TX] >= na->num_tx_rings) { in netmap_interp_ringid()
1974 na->name, in netmap_interp_ringid()
1992 struct netmap_adapter *na = priv->np_na; in netmap_set_ringid() local
2011 na->si_users[t]++; in netmap_set_ringid()
2019 struct netmap_adapter *na = priv->np_na; in netmap_unset_ringid() local
2024 na->si_users[t]--; in netmap_unset_ringid()
2053 struct netmap_adapter *na = priv->np_na; in netmap_krings_get() local
2061 na->name, in netmap_krings_get()
2106 na->name, in netmap_krings_put()
2232 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2235 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) { in netmap_buf_size_validate() argument
2236 unsigned nbs = NETMAP_BUF_SIZE(na); in netmap_buf_size_validate()
2238 if (mtu <= na->rx_buf_maxsize) { in netmap_buf_size_validate()
2254 if (!(na->na_flags & NAF_MOREFRAG)) { in netmap_buf_size_validate()
2258 if_name(na->ifp)); in netmap_buf_size_validate()
2260 } else if (nbs < na->rx_buf_maxsize) { in netmap_buf_size_validate()
2263 ">= %u", if_name(na->ifp), in netmap_buf_size_validate()
2264 na->rx_buf_maxsize); in netmap_buf_size_validate()
2271 if_name(na->ifp), mtu, nbs); in netmap_buf_size_validate()
2284 struct netmap_adapter *na = priv->np_na; in netmap_offsets_init() local
2297 if (!(na->na_flags & NAF_OFFSETS)) { in netmap_offsets_init()
2300 na->name); in netmap_offsets_init()
2327 if (max_offset > NETMAP_BUF_SIZE(na)) { in netmap_offsets_init()
2330 (unsigned long long)max_offset, NETMAP_BUF_SIZE(na)); in netmap_offsets_init()
2353 struct netmap_kring *kring = NMR(na, t)[i]; in netmap_offsets_init()
2421 struct netmap_adapter *na = priv->np_na; in netmap_compute_buf_len() local
2442 target = NETMAP_BUF_SIZE(kring->na) - in netmap_compute_buf_len()
2456 NETMAP_BUF_SIZE(kring->na); in netmap_compute_buf_len()
2467 if (!(na->na_flags & NAF_MOREFRAG)) { in netmap_compute_buf_len()
2471 na->name); in netmap_compute_buf_len()
2493 * The following na callbacks are called in the process:
2495 * na->nm_config() [by netmap_update_config]
2503 * na->nm_krings_create()
2527 * na->nm_register(, 1)
2559 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, in netmap_do_regif() argument
2566 priv->np_na = na; /* store the reference */ in netmap_do_regif()
2567 error = netmap_mem_finalize(na->nm_mem, na); in netmap_do_regif()
2571 if (na->active_fds == 0) { in netmap_do_regif()
2573 /* cache the allocator info in the na */ in netmap_do_regif()
2574 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut); in netmap_do_regif()
2577 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal, in netmap_do_regif()
2578 na->na_lut.objsize); in netmap_do_regif()
2581 netmap_update_config(na); in netmap_do_regif()
2589 if (na->active_fds == 0) { in netmap_do_regif()
2595 if (na->ifp && nm_priv_rx_enabled(priv)) { in netmap_do_regif()
2597 unsigned mtu = nm_os_ifnet_mtu(na->ifp); in netmap_do_regif()
2600 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na)); in netmap_do_regif()
2602 if (na->rx_buf_maxsize == 0) { in netmap_do_regif()
2603 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name); in netmap_do_regif()
2608 error = netmap_buf_size_validate(na, mtu); in netmap_do_regif()
2617 error = na->nm_krings_create(na); in netmap_do_regif()
2632 error = netmap_mem_rings_create(na); in netmap_do_regif()
2647 nifp = netmap_mem_if_new(na, priv); in netmap_do_regif()
2656 netmap_set_all_rings(na, NM_KR_LOCKED); in netmap_do_regif()
2657 error = na->nm_register(na, 1); in netmap_do_regif()
2658 netmap_set_all_rings(na, 0); in netmap_do_regif()
2664 na->active_fds++; in netmap_do_regif()
2677 netmap_mem_if_delete(na, nifp); in netmap_do_regif()
2680 netmap_mem_rings_delete(na); in netmap_do_regif()
2682 if (na->active_fds == 0) in netmap_do_regif()
2683 na->nm_krings_delete(na); in netmap_do_regif()
2685 if (na->active_fds == 0) in netmap_do_regif()
2686 memset(&na->na_lut, 0, sizeof(na->na_lut)); in netmap_do_regif()
2688 netmap_mem_drop(na); in netmap_do_regif()
2744 struct netmap_adapter *na = NULL; in netmap_ioctl() local
2822 error = netmap_get_na(hdr, &na, &ifp, nmd, in netmap_ioctl()
2826 if (NETMAP_OWNED_BY_KERN(na)) { in netmap_ioctl()
2831 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) { in netmap_ioctl()
2833 "not accept it", na->virt_hdr_len); in netmap_ioctl()
2838 error = netmap_do_regif(priv, na, hdr); in netmap_ioctl()
2858 req->nr_rx_rings = na->num_rx_rings; in netmap_ioctl()
2859 req->nr_tx_rings = na->num_tx_rings; in netmap_ioctl()
2860 req->nr_rx_slots = na->num_rx_desc; in netmap_ioctl()
2861 req->nr_tx_slots = na->num_tx_desc; in netmap_ioctl()
2862 req->nr_host_tx_rings = na->num_host_tx_rings; in netmap_ioctl()
2863 req->nr_host_rx_rings = na->num_host_rx_rings; in netmap_ioctl()
2864 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags, in netmap_ioctl()
2875 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si; in netmap_ioctl()
2882 req->nr_extra_bufs = netmap_extra_alloc(na, in netmap_ioctl()
2889 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp); in netmap_ioctl()
2901 netmap_unget_na(na, ifp); in netmap_ioctl()
2938 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */); in netmap_ioctl()
2942 na = NULL; in netmap_ioctl()
2946 nmd = na->nm_mem; /* get memory allocator */ in netmap_ioctl()
2964 if (na == NULL) /* only memory info */ in netmap_ioctl()
2966 netmap_update_config(na); in netmap_ioctl()
2967 req->nr_rx_rings = na->num_rx_rings; in netmap_ioctl()
2968 req->nr_tx_rings = na->num_tx_rings; in netmap_ioctl()
2969 req->nr_rx_slots = na->num_rx_desc; in netmap_ioctl()
2970 req->nr_tx_slots = na->num_tx_desc; in netmap_ioctl()
2971 req->nr_host_tx_rings = na->num_host_tx_rings; in netmap_ioctl()
2972 req->nr_host_rx_rings = na->num_host_rx_rings; in netmap_ioctl()
2974 netmap_unget_na(na, ifp); in netmap_ioctl()
3014 error = netmap_get_vale_na(hdr, &na, NULL, 0); in netmap_ioctl()
3017 if (na && !error) { in netmap_ioctl()
3019 (struct netmap_vp_adapter *)na; in netmap_ioctl()
3020 na->virt_hdr_len = req->nr_hdr_len; in netmap_ioctl()
3021 if (na->virt_hdr_len) { in netmap_ioctl()
3022 vpna->mfs = NETMAP_BUF_SIZE(na); in netmap_ioctl()
3025 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na); in netmap_ioctl()
3026 netmap_adapter_put(na); in netmap_ioctl()
3027 } else if (!na) { in netmap_ioctl()
3048 error = netmap_get_na(hdr, &na, &ifp, NULL, 0); in netmap_ioctl()
3051 if (na && !error) { in netmap_ioctl()
3052 req->nr_hdr_len = na->virt_hdr_len; in netmap_ioctl()
3054 netmap_unget_na(na, ifp); in netmap_ioctl()
3096 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */); in netmap_ioctl()
3100 na = NULL; in netmap_ioctl()
3104 nmd = na->nm_mem; /* grab the memory allocator */ in netmap_ioctl()
3112 error = netmap_mem_finalize(nmd, na); in netmap_ioctl()
3117 netmap_mem_drop(na); in netmap_ioctl()
3119 netmap_unget_na(na, ifp); in netmap_ioctl()
3176 na = priv->np_na; /* we have a reference */ in netmap_ioctl()
3180 krings = NMR(na, t); in netmap_ioctl()
3225 netmap_send_up(na->ifp, &q); in netmap_ioctl()
3656 struct netmap_adapter *na; in netmap_poll() local
3688 na = priv->np_na; in netmap_poll()
3690 if (unlikely(!nm_netmap_on(na))) in netmap_poll()
3699 nm_prinf("device %s events 0x%x", na->name, events); in netmap_poll()
3729 kring = NMR(na, t)[i]; in netmap_poll()
3744 kring = NMR(na, t)[i]; in netmap_poll()
3785 kring = na->tx_rings[i]; in netmap_poll()
3847 kring = na->rx_rings[i]; in netmap_poll()
3909 netmap_send_up(na->ifp, &q); in netmap_poll()
3918 nma_intr_enable(struct netmap_adapter *na, int onoff) in nma_intr_enable() argument
3925 for (i = 0; i < nma_get_nrings(na, t); i++) { in nma_intr_enable()
3926 struct netmap_kring *kring = NMR(na, t)[i]; in nma_intr_enable()
3944 if (!na->nm_intr) { in nma_intr_enable()
3946 na->name); in nma_intr_enable()
3950 na->nm_intr(na, onoff); in nma_intr_enable()
3962 struct netmap_adapter *na = kring->notify_na; in netmap_notify() local
3970 if (na->si_users[t] > 0) in netmap_notify()
3971 nm_os_selwakeup(&na->si[t]); in netmap_notify()
3981 netmap_attach_common(struct netmap_adapter *na) in netmap_attach_common() argument
3983 if (!na->rx_buf_maxsize) { in netmap_attach_common()
3985 na->rx_buf_maxsize = PAGE_SIZE; in netmap_attach_common()
3989 if (na->na_flags & NAF_HOST_RINGS && na->ifp) { in netmap_attach_common()
3990 na->if_input = if_getinputfn(na->ifp); /* for netmap_send_up */ in netmap_attach_common()
3992 na->pdev = na; /* make sure netmap_mem_map() is called */ in netmap_attach_common()
3994 if (na->na_flags & NAF_HOST_RINGS) { in netmap_attach_common()
3995 if (na->num_host_rx_rings == 0) in netmap_attach_common()
3996 na->num_host_rx_rings = 1; in netmap_attach_common()
3997 if (na->num_host_tx_rings == 0) in netmap_attach_common()
3998 na->num_host_tx_rings = 1; in netmap_attach_common()
4000 if (na->nm_krings_create == NULL) { in netmap_attach_common()
4005 na->nm_krings_create = netmap_hw_krings_create; in netmap_attach_common()
4006 na->nm_krings_delete = netmap_hw_krings_delete; in netmap_attach_common()
4008 if (na->nm_notify == NULL) in netmap_attach_common()
4009 na->nm_notify = netmap_notify; in netmap_attach_common()
4010 na->active_fds = 0; in netmap_attach_common()
4012 if (na->nm_mem == NULL) { in netmap_attach_common()
4014 na->nm_mem = netmap_mem_get_allocator(na); in netmap_attach_common()
4016 if (na->nm_bdg_attach == NULL) in netmap_attach_common()
4020 na->nm_bdg_attach = netmap_default_bdg_attach; in netmap_attach_common()
4027 * nm_iszombie(na) means that the driver module has been
4033 netmap_hw_reg(struct netmap_adapter *na, int onoff) in netmap_hw_reg() argument
4036 (struct netmap_hw_adapter*)na; in netmap_hw_reg()
4041 if (nm_iszombie(na)) { in netmap_hw_reg()
4044 } else if (na != NULL) { in netmap_hw_reg()
4045 na->na_flags &= ~NAF_NETMAP_ON; in netmap_hw_reg()
4050 error = hwna->nm_hw_register(na, onoff); in netmap_hw_reg()
4059 netmap_hw_dtor(struct netmap_adapter *na) in netmap_hw_dtor() argument
4061 if (na->ifp == NULL) in netmap_hw_dtor()
4064 NM_DETACH_NA(na->ifp); in netmap_hw_dtor()
4103 /* If NA(ifp) is not null but there is no valid netmap in netmap_attach_ext()
4141 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna); in netmap_attach_ext()
4155 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na) in NM_DBG()
4157 if (!na) { in NM_DBG()
4161 refcount_acquire(&na->na_refcount); in NM_DBG()
4167 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na) in NM_DBG()
4169 if (!na) in NM_DBG()
4172 if (!refcount_release(&na->na_refcount)) in NM_DBG()
4175 if (na->nm_dtor) in NM_DBG()
4176 na->nm_dtor(na); in NM_DBG()
4178 if (na->tx_rings) { /* XXX should not happen */ in NM_DBG()
4181 na->nm_krings_delete(na); in NM_DBG()
4183 netmap_pipe_dealloc(na); in NM_DBG()
4184 if (na->nm_mem) in NM_DBG()
4185 netmap_mem_put(na->nm_mem); in NM_DBG()
4186 bzero(na, sizeof(*na)); in NM_DBG()
4187 nm_os_free(na); in NM_DBG()
4194 netmap_hw_krings_create(struct netmap_adapter *na) in netmap_hw_krings_create() argument
4196 int ret = netmap_krings_create(na, 0); in netmap_hw_krings_create()
4199 u_int lim = netmap_real_rings(na, NR_RX), i; in netmap_hw_krings_create()
4200 for (i = na->num_rx_rings; i < lim; i++) { in netmap_hw_krings_create()
4201 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue); in netmap_hw_krings_create()
4203 nm_prdis("initialized sw rx queue %d", na->num_rx_rings); in netmap_hw_krings_create()
4216 struct netmap_adapter *na; in netmap_detach() local
4225 na = NA(ifp); in netmap_detach()
4226 netmap_set_all_rings(na, NM_KR_LOCKED); in netmap_detach()
4233 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) { in netmap_detach()
4234 na->na_flags |= NAF_ZOMBIE; in netmap_detach()
4239 * therefore, the put() above has deleted the na, since now NA(ifp) is in netmap_detach()
4254 * We rely on the OS to make sure that the ifp and na do not go
4262 struct netmap_adapter *na = NA(ifp); in netmap_transmit() local
4272 if (i >= na->num_host_rx_rings) { in netmap_transmit()
4273 i = i % na->num_host_rx_rings; in netmap_transmit()
4275 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i]; in netmap_transmit()
4279 // mtx_lock(&na->core_lock); in netmap_transmit()
4281 if (!nm_netmap_on(na)) { in netmap_transmit()
4282 nm_prerr("%s not in netmap mode anymore", na->name); in netmap_transmit()
4288 if (txr >= na->num_tx_rings) { in netmap_transmit()
4289 txr %= na->num_tx_rings; in netmap_transmit()
4291 tx_kring = NMR(na, NR_TX)[txr]; in netmap_transmit()
4294 return MBUF_TRANSMIT(na, ifp, m); in netmap_transmit()
4300 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */ in netmap_transmit()
4301 nm_prerr("%s from_host, drop packet size %d > %d", na->name, in netmap_transmit()
4302 len, NETMAP_BUF_SIZE(na)); in netmap_transmit()
4308 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name); in netmap_transmit()
4314 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name); in netmap_transmit()
4334 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name, in netmap_transmit()
4338 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q)); in netmap_transmit()
4374 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, in netmap_reset() argument
4380 if (!nm_native_on(na)) { in netmap_reset()
4386 if (n >= na->num_tx_rings) in netmap_reset()
4388 kring = na->tx_rings[n]; in netmap_reset()
4400 if (n >= na->num_rx_rings) in netmap_reset()
4402 kring = na->rx_rings[n]; in netmap_reset()
4454 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done) in netmap_common_irq() argument
4465 if (q >= nma_get_nrings(na, t)) in netmap_common_irq()
4468 kring = NMR(na, t)[q]; in netmap_common_irq()
4502 struct netmap_adapter *na = NA(ifp); in netmap_rx_irq() local
4510 if (!nm_netmap_on(na)) in netmap_rx_irq()
4513 if (na->na_flags & NAF_SKIP_INTR) { in netmap_rx_irq()
4518 return netmap_common_irq(na, q, work_done); in netmap_rx_irq()
4523 nm_set_native_flags(struct netmap_adapter *na) in nm_set_native_flags() argument
4525 if_t ifp = na->ifp; in nm_set_native_flags()
4529 if (na->active_fds > 0) { in nm_set_native_flags()
4533 na->na_flags |= NAF_NETMAP_ON; in nm_set_native_flags()
4535 netmap_update_hostrings_mode(na); in nm_set_native_flags()
4539 nm_clear_native_flags(struct netmap_adapter *na) in nm_clear_native_flags() argument
4541 if_t ifp = na->ifp; in nm_clear_native_flags()
4545 if (na->active_fds > 0) { in nm_clear_native_flags()
4549 netmap_update_hostrings_mode(na); in nm_clear_native_flags()
4552 na->na_flags &= ~NAF_NETMAP_ON; in nm_clear_native_flags()
4556 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff) in netmap_krings_mode_commit() argument
4563 for (i = 0; i < netmap_real_rings(na, t); i++) { in netmap_krings_mode_commit()
4564 struct netmap_kring *kring = NMR(na, t)[i]; in netmap_krings_mode_commit()