Lines Matching +full:reserved +full:- +full:cpu +full:- +full:vectors

1 /*-
2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
3 * All rights reserved.
103 * Enable mbuf vectors for compressing long mbuf chains
108 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
111 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
113 * - small packet forwarding which is just returning a single mbuf to
120 * - private structures
121 * - iflib private utility functions
122 * - ifnet functions
123 * - vlan registry and other exported functions
124 * - iflib public core functions
231 return (ctx->ifc_softc); in iflib_get_softc()
238 return (ctx->ifc_dev); in iflib_get_dev()
245 return (ctx->ifc_ifp); in iflib_get_ifp()
252 return (ctx->ifc_mediap); in iflib_get_media()
259 bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN); in iflib_set_mac()
266 return (&ctx->ifc_softc_ctx); in iflib_get_softc_ctx()
273 return (ctx->ifc_sctx); in iflib_get_sctx()
280 return (ctx->ifc_sysctl_extra_msix_vectors); in iflib_get_extra_msix_vectors_sysctl()
283 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
287 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
288 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
316 /* this should really scale with ring size - this is a fairly arbitrary value */
435 used = pidx - cidx; in get_inuse()
437 used = size - cidx + pidx; in get_inuse()
448 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq-
451 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
515 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; in pkt_info_zero()
516 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; in pkt_info_zero()
518 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; in pkt_info_zero()
519 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; in pkt_info_zero()
531 ri_pad->rxd_val[i] = 0; in rxd_info_zero()
532 ri_pad->rxd_val[i + 1] = 0; in rxd_info_zero()
533 ri_pad->rxd_val[i + 2] = 0; in rxd_info_zero()
534 ri_pad->rxd_val[i + 3] = 0; in rxd_info_zero()
537 ri_pad->rxd_val[RXD_INFO_SIZE - 1] = 0; in rxd_info_zero()
545 #define IF_BAD_DMA ((bus_addr_t)-1)
547 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
549 #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
550 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
551 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
552 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
554 #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_…
555 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
556 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
557 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
559 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
560 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
562 /* Our boot-time initialization hook */
755 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_num_rx_descs()
756 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_num_rx_descs()
757 uint16_t first_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; in iflib_num_rx_descs()
759 return (scctx->isc_nrxd[first_rxq]); in iflib_num_rx_descs()
765 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_num_tx_descs()
766 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_num_tx_descs()
767 uint16_t first_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; in iflib_num_tx_descs()
769 return (scctx->isc_ntxd[first_txq]); in iflib_num_tx_descs()
783 * device-specific sysctl variables:
815 if_t ifp = na->ifp; in iflib_netmap_register()
827 * ifp->if_transmit. This is done once the device has been stopped in iflib_netmap_register()
851 if_t ifp = na->ifp; in iflib_netmap_config()
853 iflib_rxq_t rxq = &ctx->ifc_rxqs[0]; in iflib_netmap_config()
854 iflib_fl_t fl = &rxq->ifr_fl[0]; in iflib_netmap_config()
856 info->num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; in iflib_netmap_config()
857 info->num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; in iflib_netmap_config()
858 info->num_tx_descs = iflib_num_tx_descs(ctx); in iflib_netmap_config()
859 info->num_rx_descs = iflib_num_rx_descs(ctx); in iflib_netmap_config()
860 info->rx_buf_maxsize = fl->ifl_buf_size; in iflib_netmap_config()
862 info->num_tx_rings, info->num_rx_rings, info->num_tx_descs, in iflib_netmap_config()
863 info->num_rx_descs, info->rx_buf_maxsize); in iflib_netmap_config()
871 struct netmap_adapter *na = kring->na; in netmap_fl_refill()
872 u_int const lim = kring->nkr_num_slots - 1; in netmap_fl_refill()
873 struct netmap_ring *ring = kring->ring; in netmap_fl_refill()
876 if_ctx_t ctx = rxq->ifr_ctx; in netmap_fl_refill()
877 iflib_fl_t fl = &rxq->ifr_fl[0]; in netmap_fl_refill()
889 * such a way to keep fl->ifl_pidx and kring->nr_hwcur in sync in netmap_fl_refill()
890 * (except for kring->nkr_hwofs). These may be less than in netmap_fl_refill()
891 * kring->nkr_num_slots if netmap_reset() was called while in netmap_fl_refill()
897 * (fl->ifl_pidx - 1) % N (included), to avoid the NIC tail/prod in netmap_fl_refill()
902 n = kring->nkr_num_slots - nm_kr_rxspace(kring); in netmap_fl_refill()
904 n = kring->rhead - kring->nr_hwcur; in netmap_fl_refill()
908 n += kring->nkr_num_slots; in netmap_fl_refill()
912 map = fl->ifl_sds.ifsd_map; in netmap_fl_refill()
913 nic_i = fl->ifl_pidx; in netmap_fl_refill()
921 MPASS(nm_i == kring->nr_hwtail); in netmap_fl_refill()
923 MPASS(nm_i == kring->nr_hwcur); in netmap_fl_refill()
931 for (i = 0; n > 0 && i < IFLIB_MAX_RX_REFRESH; n--, i++) { in netmap_fl_refill()
932 struct netmap_slot *slot = &ring->slot[nm_i]; in netmap_fl_refill()
941 fl->ifl_bus_addrs[i] = paddr + in netmap_fl_refill()
943 fl->ifl_rxd_idxs[i] = nic_i; in netmap_fl_refill()
946 netmap_load_map(na, fl->ifl_buf_tag, in netmap_fl_refill()
948 } else if (slot->flags & NS_BUF_CHANGED) { in netmap_fl_refill()
950 netmap_reload_map(na, fl->ifl_buf_tag, in netmap_fl_refill()
953 bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i], in netmap_fl_refill()
955 slot->flags &= ~NS_BUF_CHANGED; in netmap_fl_refill()
963 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); in netmap_fl_refill()
965 fl->ifl_pidx = nic_i; in netmap_fl_refill()
970 MPASS(nm_i == kring->rhead); in netmap_fl_refill()
971 kring->nr_hwcur = nm_i; in netmap_fl_refill()
973 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, in netmap_fl_refill()
975 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, in netmap_fl_refill()
988 * Userspace wants to send packets up to the one before kring->rhead,
989 * kernel knows kring->nr_hwcur is the first unsent packet.
1001 struct netmap_adapter *na = kring->na; in iflib_netmap_txsync()
1002 if_t ifp = na->ifp; in iflib_netmap_txsync()
1003 struct netmap_ring *ring = kring->ring; in iflib_netmap_txsync()
1006 u_int const lim = kring->nkr_num_slots - 1; in iflib_netmap_txsync()
1007 u_int const head = kring->rhead; in iflib_netmap_txsync()
1015 u_int report_frequency = kring->nkr_num_slots >> 1; in iflib_netmap_txsync()
1016 /* device-specific */ in iflib_netmap_txsync()
1018 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; in iflib_netmap_txsync()
1020 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, in iflib_netmap_txsync()
1045 nm_i = kring->nr_hwcur; in iflib_netmap_txsync()
1048 int nic_i_start = -1, flags = 0; in iflib_netmap_txsync()
1050 pi.ipi_segs = txq->ift_segs; in iflib_netmap_txsync()
1051 pi.ipi_qsidx = kring->ring_id; in iflib_netmap_txsync()
1054 __builtin_prefetch(&ring->slot[nm_i]); in iflib_netmap_txsync()
1055 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); in iflib_netmap_txsync()
1056 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); in iflib_netmap_txsync()
1059 struct netmap_slot *slot = &ring->slot[nm_i]; in iflib_netmap_txsync()
1061 u_int len = slot->len; in iflib_netmap_txsync()
1065 flags |= (slot->flags & NS_REPORT || in iflib_netmap_txsync()
1083 if (!(slot->flags & NS_MOREFRAG)) { in iflib_netmap_txsync()
1091 ctx->isc_txd_encap(ctx->ifc_softc, &pi); in iflib_netmap_txsync()
1098 /* Reinit per-packet info for the next one. */ in iflib_netmap_txsync()
1100 nic_i_start = -1; in iflib_netmap_txsync()
1104 __builtin_prefetch(&ring->slot[nm_i + 1]); in iflib_netmap_txsync()
1105 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); in iflib_netmap_txsync()
1106 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); in iflib_netmap_txsync()
1110 if (slot->flags & NS_BUF_CHANGED) { in iflib_netmap_txsync()
1112 netmap_reload_map(na, txq->ift_buf_tag, in iflib_netmap_txsync()
1113 txq->ift_sds.ifsd_map[nic_i], addr); in iflib_netmap_txsync()
1116 bus_dmamap_sync(txq->ift_buf_tag, in iflib_netmap_txsync()
1117 txq->ift_sds.ifsd_map[nic_i], in iflib_netmap_txsync()
1120 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED | NS_MOREFRAG); in iflib_netmap_txsync()
1124 kring->nr_hwcur = nm_i; in iflib_netmap_txsync()
1127 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, in iflib_netmap_txsync()
1131 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); in iflib_netmap_txsync()
1139 * trigger a per-tx-queue timer to try again later. in iflib_netmap_txsync()
1141 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { in iflib_netmap_txsync()
1144 nic_i = txq->ift_cidx_processed; in iflib_netmap_txsync()
1145 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); in iflib_netmap_txsync()
1149 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) in iflib_netmap_txsync()
1150 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { in iflib_netmap_txsync()
1151 callout_reset_sbt_on(&txq->ift_netmap_timer, in iflib_netmap_txsync()
1154 txq->ift_netmap_timer.c_cpu, 0); in iflib_netmap_txsync()
1169 * On call, kring->rhead is the first packet that userspace wants
1170 * to keep, and kring->rcur is the wakeup point.
1171 * The kernel has previously reported packets up to kring->rtail.
1179 struct netmap_adapter *na = kring->na; in iflib_netmap_rxsync()
1180 struct netmap_ring *ring = kring->ring; in iflib_netmap_rxsync()
1181 if_t ifp = na->ifp; in iflib_netmap_rxsync()
1185 u_int const lim = kring->nkr_num_slots - 1; in iflib_netmap_rxsync()
1186 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; in iflib_netmap_rxsync()
1190 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_netmap_rxsync()
1191 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_netmap_rxsync()
1192 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; in iflib_netmap_rxsync()
1193 iflib_fl_t fl = &rxq->ifr_fl[0]; in iflib_netmap_rxsync()
1202 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, in iflib_netmap_rxsync()
1214 * nic_i = fl->ifl_cidx; in iflib_netmap_rxsync()
1215 * nm_i = kring->nr_hwtail (previous) in iflib_netmap_rxsync()
1217 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size in iflib_netmap_rxsync()
1219 * fl->ifl_cidx is set to 0 on a ring reinit in iflib_netmap_rxsync()
1222 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim); in iflib_netmap_rxsync()
1223 bool have_rxcq = sctx->isc_flags & IFLIB_HAS_RXCQ; in iflib_netmap_rxsync()
1232 cidxp = &rxq->ifr_cq_cidx; in iflib_netmap_rxsync()
1234 cidxp = &fl->ifl_cidx; in iflib_netmap_rxsync()
1235 avail = ctx->isc_rxd_available(ctx->ifc_softc, in iflib_netmap_rxsync()
1236 rxq->ifr_id, *cidxp, USHRT_MAX); in iflib_netmap_rxsync()
1238 nic_i = fl->ifl_cidx; in iflib_netmap_rxsync()
1240 MPASS(nm_i == kring->nr_hwtail); in iflib_netmap_rxsync()
1241 for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) { in iflib_netmap_rxsync()
1243 ri.iri_frags = rxq->ifr_frags; in iflib_netmap_rxsync()
1244 ri.iri_qsidx = kring->ring_id; in iflib_netmap_rxsync()
1245 ri.iri_ifp = ctx->ifc_ifp; in iflib_netmap_rxsync()
1248 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); in iflib_netmap_rxsync()
1251 ring->slot[nm_i].len = 0; in iflib_netmap_rxsync()
1252 ring->slot[nm_i].flags = 0; in iflib_netmap_rxsync()
1254 ring->slot[nm_i].len = ri.iri_frags[i].irf_len; in iflib_netmap_rxsync()
1255 if (i == (ri.iri_nfrags - 1)) { in iflib_netmap_rxsync()
1256 ring->slot[nm_i].len -= crclen; in iflib_netmap_rxsync()
1257 ring->slot[nm_i].flags = 0; in iflib_netmap_rxsync()
1263 ring->slot[nm_i].flags = NS_MOREFRAG; in iflib_netmap_rxsync()
1266 bus_dmamap_sync(fl->ifl_buf_tag, in iflib_netmap_rxsync()
1267 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); in iflib_netmap_rxsync()
1269 fl->ifl_cidx = nic_i = nm_next(nic_i, lim); in iflib_netmap_rxsync()
1274 while (*cidxp >= scctx->isc_nrxd[0]) in iflib_netmap_rxsync()
1275 *cidxp -= scctx->isc_nrxd[0]; in iflib_netmap_rxsync()
1285 kring->nr_hwtail = nm_i; in iflib_netmap_rxsync()
1287 kring->nr_kflags &= ~NKR_PENDINTR; in iflib_netmap_rxsync()
1291 * (kring->nr_hwcur to head excluded), in iflib_netmap_rxsync()
1295 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size in iflib_netmap_rxsync()
1308 if_ctx_t ctx = if_getsoftc(na->ifp); in iflib_netmap_intr()
1326 na.ifp = ctx->ifc_ifp; in iflib_netmap_attach()
1328 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); in iflib_netmap_attach()
1329 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); in iflib_netmap_attach()
1338 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; in iflib_netmap_attach()
1339 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; in iflib_netmap_attach()
1346 struct netmap_adapter *na = NA(ctx->ifc_ifp); in iflib_netmap_txq_init()
1349 slot = netmap_reset(na, NR_TX, txq->ift_id, 0); in iflib_netmap_txq_init()
1352 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { in iflib_netmap_txq_init()
1360 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); in iflib_netmap_txq_init()
1361 netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i], in iflib_netmap_txq_init()
1370 struct netmap_adapter *na = NA(ctx->ifc_ifp); in iflib_netmap_rxq_init()
1374 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); in iflib_netmap_rxq_init()
1377 kring = na->rx_rings[rxq->ifr_id]; in iflib_netmap_rxq_init()
1386 if_ctx_t ctx = txq->ift_ctx; in iflib_netmap_timer()
1392 netmap_tx_irq(ctx->ifc_ifp, txq->ift_id); in iflib_netmap_timer()
1440 fl = &rxq->ifr_fl[flid]; in iru_init()
1441 iru->iru_paddrs = fl->ifl_bus_addrs; in iru_init()
1442 iru->iru_idxs = fl->ifl_rxd_idxs; in iru_init()
1443 iru->iru_qsidx = rxq->ifr_id; in iru_init()
1444 iru->iru_buf_size = fl->ifl_buf_size; in iru_init()
1445 iru->iru_flidx = fl->ifl_id; in iru_init()
1458 BUS_SPACE_MAXADDR : (1ULL << (width)) - 1ULL)
1464 device_t dev = ctx->ifc_dev; in iflib_dma_alloc_align()
1467 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(ctx->ifc_softc_ctx.isc_dma_width); in iflib_dma_alloc_align()
1480 &dma->idi_tag); in iflib_dma_alloc_align()
1488 err = bus_dmamem_alloc(dma->idi_tag, (void **)&dma->idi_vaddr, in iflib_dma_alloc_align()
1489 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); in iflib_dma_alloc_align()
1497 dma->idi_paddr = IF_BAD_DMA; in iflib_dma_alloc_align()
1498 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, in iflib_dma_alloc_align()
1499 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); in iflib_dma_alloc_align()
1500 if (err || dma->idi_paddr == IF_BAD_DMA) { in iflib_dma_alloc_align()
1507 dma->idi_size = size; in iflib_dma_alloc_align()
1511 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); in iflib_dma_alloc_align()
1513 bus_dma_tag_destroy(dma->idi_tag); in iflib_dma_alloc_align()
1515 dma->idi_tag = NULL; in iflib_dma_alloc_align()
1523 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_dma_alloc()
1525 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); in iflib_dma_alloc()
1527 return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags)); in iflib_dma_alloc()
1549 if (dma->idi_tag == NULL) in iflib_dma_free()
1551 if (dma->idi_paddr != IF_BAD_DMA) { in iflib_dma_free()
1552 bus_dmamap_sync(dma->idi_tag, dma->idi_map, in iflib_dma_free()
1554 bus_dmamap_unload(dma->idi_tag, dma->idi_map); in iflib_dma_free()
1555 dma->idi_paddr = IF_BAD_DMA; in iflib_dma_free()
1557 if (dma->idi_vaddr != NULL) { in iflib_dma_free()
1558 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); in iflib_dma_free()
1559 dma->idi_vaddr = NULL; in iflib_dma_free()
1561 bus_dma_tag_destroy(dma->idi_tag); in iflib_dma_free()
1562 dma->idi_tag = NULL; in iflib_dma_free()
1579 struct grouptask *gtask = info->ifi_task; in iflib_fast_intr()
1583 if (info->ifi_filter != NULL) { in iflib_fast_intr()
1584 result = info->ifi_filter(info->ifi_filter_arg); in iflib_fast_intr()
1597 struct grouptask *gtask = info->ifi_task; in iflib_fast_intr_rxtx()
1599 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; in iflib_fast_intr_rxtx()
1607 if (info->ifi_filter != NULL) { in iflib_fast_intr_rxtx()
1608 result = info->ifi_filter(info->ifi_filter_arg); in iflib_fast_intr_rxtx()
1613 ctx = rxq->ifr_ctx; in iflib_fast_intr_rxtx()
1614 sc = ctx->ifc_softc; in iflib_fast_intr_rxtx()
1616 intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY); in iflib_fast_intr_rxtx()
1617 MPASS(rxq->ifr_ntxqirq); in iflib_fast_intr_rxtx()
1618 for (i = 0; i < rxq->ifr_ntxqirq; i++) { in iflib_fast_intr_rxtx()
1619 txqid = rxq->ifr_txqid[i]; in iflib_fast_intr_rxtx()
1620 txq = &ctx->ifc_txqs[txqid]; in iflib_fast_intr_rxtx()
1621 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, in iflib_fast_intr_rxtx()
1623 if (!ctx->isc_txd_credits_update(sc, txqid, false)) { in iflib_fast_intr_rxtx()
1630 GROUPTASK_ENQUEUE(&txq->ift_task); in iflib_fast_intr_rxtx()
1632 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) in iflib_fast_intr_rxtx()
1633 cidx = rxq->ifr_cq_cidx; in iflib_fast_intr_rxtx()
1635 cidx = rxq->ifr_fl[0].ifl_cidx; in iflib_fast_intr_rxtx()
1642 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); in iflib_fast_intr_rxtx()
1654 if_ctx_t ctx = info->ifi_ctx; in iflib_fast_intr_ctx()
1658 if (info->ifi_filter != NULL) { in iflib_fast_intr_ctx()
1659 result = info->ifi_filter(info->ifi_filter_arg); in iflib_fast_intr_ctx()
1664 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_admin_task); in iflib_fast_intr_ctx()
1675 device_t dev = ctx->ifc_dev; in _iflib_irq_alloc()
1679 if (ctx->ifc_flags & IFC_LEGACY) in _iflib_irq_alloc()
1689 irq->ii_res = res; in _iflib_irq_alloc()
1690 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); in _iflib_irq_alloc()
1701 irq->ii_tag = tag; in _iflib_irq_alloc()
1708 * mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1717 if_ctx_t ctx = txq->ift_ctx; in iflib_txsd_alloc()
1718 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_txsd_alloc()
1719 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_txsd_alloc()
1720 device_t dev = ctx->ifc_dev; in iflib_txsd_alloc()
1726 nsegments = scctx->isc_tx_nsegments; in iflib_txsd_alloc()
1727 ntsosegments = scctx->isc_tx_tso_segments_max; in iflib_txsd_alloc()
1728 tsomaxsize = scctx->isc_tx_tso_size_max; in iflib_txsd_alloc()
1729 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) in iflib_txsd_alloc()
1731 MPASS(scctx->isc_ntxd[0] > 0); in iflib_txsd_alloc()
1732 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); in iflib_txsd_alloc()
1734 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { in iflib_txsd_alloc()
1736 MPASS(sctx->isc_tso_maxsize >= tsomaxsize); in iflib_txsd_alloc()
1739 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width); in iflib_txsd_alloc()
1749 sctx->isc_tx_maxsize, /* maxsize */ in iflib_txsd_alloc()
1751 sctx->isc_tx_maxsegsize, /* maxsegsize */ in iflib_txsd_alloc()
1755 &txq->ift_buf_tag))) { in iflib_txsd_alloc()
1758 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); in iflib_txsd_alloc()
1761 tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0; in iflib_txsd_alloc()
1769 sctx->isc_tso_maxsegsize, /* maxsegsize */ in iflib_txsd_alloc()
1773 &txq->ift_tso_buf_tag))) { in iflib_txsd_alloc()
1780 if (!(txq->ift_sds.ifsd_m = in iflib_txsd_alloc()
1782 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { in iflib_txsd_alloc()
1791 if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc( in iflib_txsd_alloc()
1792 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], in iflib_txsd_alloc()
1799 if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc( in iflib_txsd_alloc()
1800 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], in iflib_txsd_alloc()
1807 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { in iflib_txsd_alloc()
1808 err = bus_dmamap_create(txq->ift_buf_tag, 0, in iflib_txsd_alloc()
1809 &txq->ift_sds.ifsd_map[i]); in iflib_txsd_alloc()
1816 err = bus_dmamap_create(txq->ift_tso_buf_tag, 0, in iflib_txsd_alloc()
1817 &txq->ift_sds.ifsd_tso_map[i]); in iflib_txsd_alloc()
1835 if (txq->ift_sds.ifsd_map != NULL) { in iflib_txsd_destroy()
1836 map = txq->ift_sds.ifsd_map[i]; in iflib_txsd_destroy()
1837 bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE); in iflib_txsd_destroy()
1838 bus_dmamap_unload(txq->ift_buf_tag, map); in iflib_txsd_destroy()
1839 bus_dmamap_destroy(txq->ift_buf_tag, map); in iflib_txsd_destroy()
1840 txq->ift_sds.ifsd_map[i] = NULL; in iflib_txsd_destroy()
1843 if (txq->ift_sds.ifsd_tso_map != NULL) { in iflib_txsd_destroy()
1844 map = txq->ift_sds.ifsd_tso_map[i]; in iflib_txsd_destroy()
1845 bus_dmamap_sync(txq->ift_tso_buf_tag, map, in iflib_txsd_destroy()
1847 bus_dmamap_unload(txq->ift_tso_buf_tag, map); in iflib_txsd_destroy()
1848 bus_dmamap_destroy(txq->ift_tso_buf_tag, map); in iflib_txsd_destroy()
1849 txq->ift_sds.ifsd_tso_map[i] = NULL; in iflib_txsd_destroy()
1856 if_ctx_t ctx = txq->ift_ctx; in iflib_txq_destroy()
1858 for (int i = 0; i < txq->ift_size; i++) in iflib_txq_destroy()
1861 if (txq->ift_br != NULL) { in iflib_txq_destroy()
1862 ifmp_ring_free(txq->ift_br); in iflib_txq_destroy()
1863 txq->ift_br = NULL; in iflib_txq_destroy()
1866 mtx_destroy(&txq->ift_mtx); in iflib_txq_destroy()
1868 if (txq->ift_sds.ifsd_map != NULL) { in iflib_txq_destroy()
1869 free(txq->ift_sds.ifsd_map, M_IFLIB); in iflib_txq_destroy()
1870 txq->ift_sds.ifsd_map = NULL; in iflib_txq_destroy()
1872 if (txq->ift_sds.ifsd_tso_map != NULL) { in iflib_txq_destroy()
1873 free(txq->ift_sds.ifsd_tso_map, M_IFLIB); in iflib_txq_destroy()
1874 txq->ift_sds.ifsd_tso_map = NULL; in iflib_txq_destroy()
1876 if (txq->ift_sds.ifsd_m != NULL) { in iflib_txq_destroy()
1877 free(txq->ift_sds.ifsd_m, M_IFLIB); in iflib_txq_destroy()
1878 txq->ift_sds.ifsd_m = NULL; in iflib_txq_destroy()
1880 if (txq->ift_buf_tag != NULL) { in iflib_txq_destroy()
1881 bus_dma_tag_destroy(txq->ift_buf_tag); in iflib_txq_destroy()
1882 txq->ift_buf_tag = NULL; in iflib_txq_destroy()
1884 if (txq->ift_tso_buf_tag != NULL) { in iflib_txq_destroy()
1885 bus_dma_tag_destroy(txq->ift_tso_buf_tag); in iflib_txq_destroy()
1886 txq->ift_tso_buf_tag = NULL; in iflib_txq_destroy()
1888 if (txq->ift_ifdi != NULL) { in iflib_txq_destroy()
1889 free(txq->ift_ifdi, M_IFLIB); in iflib_txq_destroy()
1898 mp = &txq->ift_sds.ifsd_m[i]; in iflib_txsd_free()
1902 if (txq->ift_sds.ifsd_map != NULL) { in iflib_txsd_free()
1903 bus_dmamap_sync(txq->ift_buf_tag, in iflib_txsd_free()
1904 txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); in iflib_txsd_free()
1905 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]); in iflib_txsd_free()
1907 if (txq->ift_sds.ifsd_tso_map != NULL) { in iflib_txsd_free()
1908 bus_dmamap_sync(txq->ift_tso_buf_tag, in iflib_txsd_free()
1909 txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE); in iflib_txsd_free()
1910 bus_dmamap_unload(txq->ift_tso_buf_tag, in iflib_txsd_free()
1911 txq->ift_sds.ifsd_tso_map[i]); in iflib_txsd_free()
1921 if_ctx_t ctx = txq->ift_ctx; in iflib_txq_setup()
1922 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_txq_setup()
1923 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_txq_setup()
1928 txq->ift_qstatus = IFLIB_QUEUE_IDLE; in iflib_txq_setup()
1930 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; in iflib_txq_setup()
1933 txq->ift_cidx_processed = 0; in iflib_txq_setup()
1934 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; in iflib_txq_setup()
1935 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; in iflib_txq_setup()
1937 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) in iflib_txq_setup()
1938 bzero((void *)di->idi_vaddr, di->idi_size); in iflib_txq_setup()
1940 IFDI_TXQ_SETUP(ctx, txq->ift_id); in iflib_txq_setup()
1941 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) in iflib_txq_setup()
1942 bus_dmamap_sync(di->idi_tag, di->idi_map, in iflib_txq_setup()
1961 if_ctx_t ctx = rxq->ifr_ctx; in iflib_rxsd_alloc()
1962 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_rxsd_alloc()
1963 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_rxsd_alloc()
1964 device_t dev = ctx->ifc_dev; in iflib_rxsd_alloc()
1969 MPASS(scctx->isc_nrxd[0] > 0); in iflib_rxsd_alloc()
1970 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); in iflib_rxsd_alloc()
1972 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width); in iflib_rxsd_alloc()
1974 fl = rxq->ifr_fl; in iflib_rxsd_alloc()
1975 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { in iflib_rxsd_alloc()
1976 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ in iflib_rxsd_alloc()
1983 sctx->isc_rx_maxsize, /* maxsize */ in iflib_rxsd_alloc()
1984 sctx->isc_rx_nsegments, /* nsegments */ in iflib_rxsd_alloc()
1985 sctx->isc_rx_maxsegsize, /* maxsegsize */ in iflib_rxsd_alloc()
1989 &fl->ifl_buf_tag); in iflib_rxsd_alloc()
1997 if (!(fl->ifl_sds.ifsd_m = in iflib_rxsd_alloc()
1999 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { in iflib_rxsd_alloc()
2007 if (!(fl->ifl_sds.ifsd_cl = in iflib_rxsd_alloc()
2009 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { in iflib_rxsd_alloc()
2017 if (!(fl->ifl_sds.ifsd_ba = in iflib_rxsd_alloc()
2019 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { in iflib_rxsd_alloc()
2029 if (!(fl->ifl_sds.ifsd_map = in iflib_rxsd_alloc()
2030 …(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOW… in iflib_rxsd_alloc()
2036 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { in iflib_rxsd_alloc()
2037 err = bus_dmamap_create(fl->ifl_buf_tag, 0, in iflib_rxsd_alloc()
2038 &fl->ifl_sds.ifsd_map[i]); in iflib_rxsd_alloc()
2067 cb_arg->error = error; in _rxq_refill_cb()
2068 cb_arg->seg = segs[0]; in _rxq_refill_cb()
2069 cb_arg->nseg = nseg; in _rxq_refill_cb()
2073 * iflib_fl_refill - refill an rxq free-buffer list
2078 * (Re)populate an rxq free-buffer list with up to @count new packet buffers.
2095 MPASS(count <= fl->ifl_size - fl->ifl_credits - 1); in iflib_fl_refill()
2097 sd_m = fl->ifl_sds.ifsd_m; in iflib_fl_refill()
2098 sd_map = fl->ifl_sds.ifsd_map; in iflib_fl_refill()
2099 sd_cl = fl->ifl_sds.ifsd_cl; in iflib_fl_refill()
2100 sd_ba = fl->ifl_sds.ifsd_ba; in iflib_fl_refill()
2101 pidx = fl->ifl_pidx; in iflib_fl_refill()
2103 frag_idx = fl->ifl_fragidx; in iflib_fl_refill()
2104 credits = fl->ifl_credits; in iflib_fl_refill()
2109 MPASS(credits + n <= fl->ifl_size); in iflib_fl_refill()
2111 if (pidx < fl->ifl_cidx) in iflib_fl_refill()
2112 MPASS(pidx + n <= fl->ifl_cidx); in iflib_fl_refill()
2113 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) in iflib_fl_refill()
2114 MPASS(fl->ifl_gen == 0); in iflib_fl_refill()
2115 if (pidx > fl->ifl_cidx) in iflib_fl_refill()
2116 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); in iflib_fl_refill()
2121 iru_init(&iru, fl->ifl_rxq, fl->ifl_id); in iflib_fl_refill()
2122 while (n-- > 0) { in iflib_fl_refill()
2130 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, in iflib_fl_refill()
2133 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); in iflib_fl_refill()
2136 cl = uma_zalloc(fl->ifl_zone, M_NOWAIT); in iflib_fl_refill()
2142 err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx], in iflib_fl_refill()
2143 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, in iflib_fl_refill()
2146 uma_zfree(fl->ifl_zone, cl); in iflib_fl_refill()
2153 fl->ifl_cl_enqueued++; in iflib_fl_refill()
2158 bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx], in iflib_fl_refill()
2167 bit_set(fl->ifl_rx_bitmap, frag_idx); in iflib_fl_refill()
2169 fl->ifl_m_enqueued++; in iflib_fl_refill()
2173 fl->ifl_rxd_idxs[i] = frag_idx; in iflib_fl_refill()
2174 fl->ifl_bus_addrs[i] = bus_addr; in iflib_fl_refill()
2177 MPASS(credits <= fl->ifl_size); in iflib_fl_refill()
2178 if (++idx == fl->ifl_size) { in iflib_fl_refill()
2180 fl->ifl_gen = 1; in iflib_fl_refill()
2187 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); in iflib_fl_refill()
2188 fl->ifl_pidx = idx; in iflib_fl_refill()
2189 fl->ifl_credits = credits; in iflib_fl_refill()
2195 if (n < count - 1) { in iflib_fl_refill()
2199 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); in iflib_fl_refill()
2200 fl->ifl_pidx = idx; in iflib_fl_refill()
2201 fl->ifl_credits = credits; in iflib_fl_refill()
2204 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, in iflib_fl_refill()
2206 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, in iflib_fl_refill()
2207 fl->ifl_id, fl->ifl_pidx); in iflib_fl_refill()
2208 if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) { in iflib_fl_refill()
2209 fl->ifl_fragidx = frag_idx + 1; in iflib_fl_refill()
2210 if (fl->ifl_fragidx == fl->ifl_size) in iflib_fl_refill()
2211 fl->ifl_fragidx = 0; in iflib_fl_refill()
2213 fl->ifl_fragidx = frag_idx; in iflib_fl_refill()
2217 return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY); in iflib_fl_refill()
2229 * driver to the NIC (RDT - 1 is thus the last valid one). in iflib_fl_refill_all()
2234 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; in iflib_fl_refill_all()
2236 …int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - in iflib_fl_refill_all()
2239 MPASS(fl->ifl_credits <= fl->ifl_size); in iflib_fl_refill_all()
2253 in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH); in iflib_in_detach()
2261 iflib_dma_info_t idi = fl->ifl_ifdi; in iflib_fl_bufs_free()
2265 for (i = 0; i < fl->ifl_size; i++) { in iflib_fl_bufs_free()
2266 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; in iflib_fl_bufs_free()
2267 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; in iflib_fl_bufs_free()
2270 sd_map = fl->ifl_sds.ifsd_map[i]; in iflib_fl_bufs_free()
2271 bus_dmamap_sync(fl->ifl_buf_tag, sd_map, in iflib_fl_bufs_free()
2273 bus_dmamap_unload(fl->ifl_buf_tag, sd_map); in iflib_fl_bufs_free()
2274 uma_zfree(fl->ifl_zone, *sd_cl); in iflib_fl_bufs_free()
2285 fl->ifl_m_dequeued++; in iflib_fl_bufs_free()
2286 fl->ifl_cl_dequeued++; in iflib_fl_bufs_free()
2290 for (i = 0; i < fl->ifl_size; i++) { in iflib_fl_bufs_free()
2291 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); in iflib_fl_bufs_free()
2292 MPASS(fl->ifl_sds.ifsd_m[i] == NULL); in iflib_fl_bufs_free()
2298 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; in iflib_fl_bufs_free()
2299 bzero(idi->idi_vaddr, idi->idi_size); in iflib_fl_bufs_free()
2310 iflib_rxq_t rxq = fl->ifl_rxq; in iflib_fl_setup()
2311 if_ctx_t ctx = rxq->ifr_ctx; in iflib_fl_setup()
2312 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_fl_setup()
2315 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); in iflib_fl_setup()
2321 MPASS(fl->ifl_credits == 0); in iflib_fl_setup()
2322 qidx = rxq->ifr_fl_offset + fl->ifl_id; in iflib_fl_setup()
2323 if (scctx->isc_rxd_buf_size[qidx] != 0) in iflib_fl_setup()
2324 fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx]; in iflib_fl_setup()
2326 fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz; in iflib_fl_setup()
2328 * ifl_buf_size may be a driver-supplied value, so pull it up in iflib_fl_setup()
2331 fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size); in iflib_fl_setup()
2332 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) in iflib_fl_setup()
2333 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; in iflib_fl_setup()
2334 fl->ifl_cltype = m_gettype(fl->ifl_buf_size); in iflib_fl_setup()
2335 fl->ifl_zone = m_getzone(fl->ifl_buf_size); in iflib_fl_setup()
2338 * Avoid pre-allocating zillions of clusters to an idle card in iflib_fl_setup()
2343 MPASS(fl->ifl_size > 0); in iflib_fl_setup()
2344 (void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size - 1)); in iflib_fl_setup()
2345 if (min(128, fl->ifl_size - 1) != fl->ifl_credits) in iflib_fl_setup()
2351 MPASS(fl->ifl_ifdi != NULL); in iflib_fl_setup()
2352 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, in iflib_fl_setup()
2368 if (rxq->ifr_fl != NULL) { in iflib_rx_sds_free()
2369 for (i = 0; i < rxq->ifr_nfl; i++) { in iflib_rx_sds_free()
2370 fl = &rxq->ifr_fl[i]; in iflib_rx_sds_free()
2371 if (fl->ifl_buf_tag != NULL) { in iflib_rx_sds_free()
2372 if (fl->ifl_sds.ifsd_map != NULL) { in iflib_rx_sds_free()
2373 for (j = 0; j < fl->ifl_size; j++) { in iflib_rx_sds_free()
2375 fl->ifl_buf_tag, in iflib_rx_sds_free()
2376 fl->ifl_sds.ifsd_map[j], in iflib_rx_sds_free()
2379 fl->ifl_buf_tag, in iflib_rx_sds_free()
2380 fl->ifl_sds.ifsd_map[j]); in iflib_rx_sds_free()
2382 fl->ifl_buf_tag, in iflib_rx_sds_free()
2383 fl->ifl_sds.ifsd_map[j]); in iflib_rx_sds_free()
2386 bus_dma_tag_destroy(fl->ifl_buf_tag); in iflib_rx_sds_free()
2387 fl->ifl_buf_tag = NULL; in iflib_rx_sds_free()
2389 free(fl->ifl_sds.ifsd_m, M_IFLIB); in iflib_rx_sds_free()
2390 free(fl->ifl_sds.ifsd_cl, M_IFLIB); in iflib_rx_sds_free()
2391 free(fl->ifl_sds.ifsd_ba, M_IFLIB); in iflib_rx_sds_free()
2392 free(fl->ifl_sds.ifsd_map, M_IFLIB); in iflib_rx_sds_free()
2393 free(fl->ifl_rx_bitmap, M_IFLIB); in iflib_rx_sds_free()
2394 fl->ifl_sds.ifsd_m = NULL; in iflib_rx_sds_free()
2395 fl->ifl_sds.ifsd_cl = NULL; in iflib_rx_sds_free()
2396 fl->ifl_sds.ifsd_ba = NULL; in iflib_rx_sds_free()
2397 fl->ifl_sds.ifsd_map = NULL; in iflib_rx_sds_free()
2398 fl->ifl_rx_bitmap = NULL; in iflib_rx_sds_free()
2400 free(rxq->ifr_fl, M_IFLIB); in iflib_rx_sds_free()
2401 rxq->ifr_fl = NULL; in iflib_rx_sds_free()
2402 free(rxq->ifr_ifdi, M_IFLIB); in iflib_rx_sds_free()
2403 rxq->ifr_ifdi = NULL; in iflib_rx_sds_free()
2404 rxq->ifr_cq_cidx = 0; in iflib_rx_sds_free()
2415 if_ctx_t ctx = txq->ift_ctx; in iflib_timer()
2416 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; in iflib_timer()
2419 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) in iflib_timer()
2427 if (this_tick - txq->ift_last_timer_tick >= iflib_timer_default) { in iflib_timer()
2428 txq->ift_last_timer_tick = this_tick; in iflib_timer()
2429 IFDI_TIMER(ctx, txq->ift_id); in iflib_timer()
2430 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && in iflib_timer()
2431 ((txq->ift_cleaned_prev == txq->ift_cleaned) || in iflib_timer()
2432 (sctx->isc_pause_frames == 0))) in iflib_timer()
2435 if (txq->ift_qstatus != IFLIB_QUEUE_IDLE && in iflib_timer()
2436 ifmp_ring_is_stalled(txq->ift_br)) { in iflib_timer()
2437 KASSERT(ctx->ifc_link_state == LINK_STATE_UP, in iflib_timer()
2439 txq->ift_qstatus = IFLIB_QUEUE_HUNG; in iflib_timer()
2441 txq->ift_cleaned_prev = txq->ift_cleaned; in iflib_timer()
2444 if (txq->ift_db_pending) in iflib_timer()
2445 GROUPTASK_ENQUEUE(&txq->ift_task); in iflib_timer()
2447 sctx->isc_pause_frames = 0; in iflib_timer()
2448 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) in iflib_timer()
2449 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, in iflib_timer()
2450 txq, txq->ift_timer.c_cpu); in iflib_timer()
2454 device_printf(ctx->ifc_dev, in iflib_timer()
2455 "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n", in iflib_timer()
2456 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); in iflib_timer()
2458 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); in iflib_timer()
2459 ctx->ifc_flags |= (IFC_DO_WATCHDOG | IFC_DO_RESET); in iflib_timer()
2477 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; in iflib_calc_rx_mbuf_sz()
2483 ctx->ifc_rx_mbuf_sz = in iflib_calc_rx_mbuf_sz()
2484 iflib_get_mbuf_size_for(sctx->isc_max_frame_size); in iflib_calc_rx_mbuf_sz()
2491 return (ctx->ifc_rx_mbuf_sz); in iflib_get_rx_mbuf_sz()
2497 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_init_locked()
2498 if_t ifp = ctx->ifc_ifp; in iflib_init_locked()
2513 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); in iflib_init_locked()
2514 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); in iflib_init_locked()
2526 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { in iflib_init_locked()
2528 callout_stop(&txq->ift_timer); in iflib_init_locked()
2530 callout_stop(&txq->ift_netmap_timer); in iflib_init_locked()
2548 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { in iflib_init_locked()
2553 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { in iflib_init_locked()
2555 device_printf(ctx->ifc_dev, in iflib_init_locked()
2556 "setting up free list %d failed - " in iflib_init_locked()
2563 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); in iflib_init_locked()
2565 txq = ctx->ifc_txqs; in iflib_init_locked()
2566 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) in iflib_init_locked()
2567 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq, in iflib_init_locked()
2568 txq->ift_timer.c_cpu); in iflib_init_locked()
2570 /* Re-enable txsync/rxsync. */ in iflib_init_locked()
2601 iflib_txq_t txq = ctx->ifc_txqs; in iflib_stop()
2602 iflib_rxq_t rxq = ctx->ifc_rxqs; in iflib_stop()
2603 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_stop()
2604 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_stop()
2610 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); in iflib_stop()
2622 netmap_disable_all_rings(ctx->ifc_ifp); in iflib_stop()
2626 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { in iflib_stop()
2630 callout_stop(&txq->ift_timer); in iflib_stop()
2632 callout_stop(&txq->ift_netmap_timer); in iflib_stop()
2636 if (!ctx->ifc_sysctl_simple_tx) { in iflib_stop()
2641 for (j = 0; j < txq->ift_size; j++) { in iflib_stop()
2644 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; in iflib_stop()
2645 txq->ift_in_use = txq->ift_gen = txq->ift_no_desc_avail = 0; in iflib_stop()
2646 if (sctx->isc_flags & IFLIB_PRESERVE_TX_INDICES) in iflib_stop()
2647 txq->ift_cidx = txq->ift_pidx; in iflib_stop()
2649 txq->ift_cidx = txq->ift_pidx = 0; in iflib_stop()
2651 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; in iflib_stop()
2652 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; in iflib_stop()
2653 txq->ift_pullups = 0; in iflib_stop()
2654 ifmp_ring_reset_stats(txq->ift_br); in iflib_stop()
2655 for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++) in iflib_stop()
2656 bzero((void *)di->idi_vaddr, di->idi_size); in iflib_stop()
2658 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { in iflib_stop()
2659 if (rxq->ifr_task.gt_taskqueue != NULL) in iflib_stop()
2660 gtaskqueue_drain(rxq->ifr_task.gt_taskqueue, in iflib_stop()
2661 &rxq->ifr_task.gt_task); in iflib_stop()
2663 rxq->ifr_cq_cidx = 0; in iflib_stop()
2664 for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++) in iflib_stop()
2665 bzero((void *)di->idi_vaddr, di->idi_size); in iflib_stop()
2667 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) in iflib_stop()
2679 nrxd = fl->ifl_size; in calc_next_rxd()
2680 size = fl->ifl_rxd_size; in calc_next_rxd()
2681 start = fl->ifl_ifdi->idi_vaddr; in calc_next_rxd()
2695 int nrxd = fl->ifl_size; in prefetch_pkts()
2698 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd - 1); in prefetch_pkts()
2699 prefetch(&fl->ifl_sds.ifsd_m[nextptr]); in prefetch_pkts()
2700 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); in prefetch_pkts()
2703 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd - 1)]); in prefetch_pkts()
2704 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd - 1)]); in prefetch_pkts()
2705 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd - 1)]); in prefetch_pkts()
2706 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd - 1)]); in prefetch_pkts()
2707 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd - 1)]); in prefetch_pkts()
2708 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd - 1)]); in prefetch_pkts()
2709 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd - 1)]); in prefetch_pkts()
2710 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd - 1)]); in prefetch_pkts()
2724 flid = irf->irf_flid; in rxd_frag_to_sd()
2725 cidx = irf->irf_idx; in rxd_frag_to_sd()
2726 fl = &rxq->ifr_fl[flid]; in rxd_frag_to_sd()
2727 sd->ifsd_fl = fl; in rxd_frag_to_sd()
2728 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; in rxd_frag_to_sd()
2729 fl->ifl_credits--; in rxd_frag_to_sd()
2731 fl->ifl_m_dequeued++; in rxd_frag_to_sd()
2733 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) in rxd_frag_to_sd()
2735 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size - 1); in rxd_frag_to_sd()
2736 prefetch(&fl->ifl_sds.ifsd_map[next]); in rxd_frag_to_sd()
2737 map = fl->ifl_sds.ifsd_map[cidx]; in rxd_frag_to_sd()
2739 bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD); in rxd_frag_to_sd()
2741 if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL && in rxd_frag_to_sd()
2742 irf->irf_len != 0) { in rxd_frag_to_sd()
2743 payload = *sd->ifsd_cl; in rxd_frag_to_sd()
2744 payload += ri->iri_pad; in rxd_frag_to_sd()
2745 len = ri->iri_len - ri->iri_pad; in rxd_frag_to_sd()
2746 *pf_rv = pfil_mem_in(rxq->pfil, payload, len, ri->iri_ifp, &m); in rxd_frag_to_sd()
2768 m = fl->ifl_sds.ifsd_m[cidx]; in rxd_frag_to_sd()
2769 fl->ifl_sds.ifsd_m[cidx] = NULL; in rxd_frag_to_sd()
2775 m = fl->ifl_sds.ifsd_m[cidx]; in rxd_frag_to_sd()
2776 fl->ifl_sds.ifsd_m[cidx] = NULL; in rxd_frag_to_sd()
2781 if (unload && irf->irf_len != 0) in rxd_frag_to_sd()
2782 bus_dmamap_unload(fl->ifl_buf_tag, map); in rxd_frag_to_sd()
2783 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size - 1); in rxd_frag_to_sd()
2784 if (__predict_false(fl->ifl_cidx == 0)) in rxd_frag_to_sd()
2785 fl->ifl_gen = 0; in rxd_frag_to_sd()
2786 bit_clear(fl->ifl_rx_bitmap, cidx); in rxd_frag_to_sd()
2804 m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd, in assemble_segments()
2807 MPASS(*sd->ifsd_cl != NULL); in assemble_segments()
2810 * Exclude zero-length frags & frags from in assemble_segments()
2813 if (ri->iri_frags[i].irf_len == 0 || consumed || in assemble_segments()
2829 padlen = ri->iri_pad; in assemble_segments()
2832 mt->m_next = m; in assemble_segments()
2837 cl = *sd->ifsd_cl; in assemble_segments()
2838 *sd->ifsd_cl = NULL; in assemble_segments()
2842 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); in assemble_segments()
2846 m->m_data += padlen; in assemble_segments()
2847 ri->iri_len -= padlen; in assemble_segments()
2848 m->m_len = ri->iri_frags[i].irf_len; in assemble_segments()
2849 } while (++i < ri->iri_nfrags); in assemble_segments()
2865 if (ri->iri_nfrags == 1 && in iflib_rxd_pkt_get()
2866 ri->iri_frags[0].irf_len != 0 && in iflib_rxd_pkt_get()
2867 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { in iflib_rxd_pkt_get()
2868 m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd, in iflib_rxd_pkt_get()
2875 if (!IP_ALIGNED(m) && ri->iri_pad == 0) in iflib_rxd_pkt_get()
2876 m->m_data += 2; in iflib_rxd_pkt_get()
2878 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); in iflib_rxd_pkt_get()
2879 m->m_len = ri->iri_frags[0].irf_len; in iflib_rxd_pkt_get()
2880 m->m_data += ri->iri_pad; in iflib_rxd_pkt_get()
2881 ri->iri_len -= ri->iri_pad; in iflib_rxd_pkt_get()
2890 m->m_pkthdr.len = ri->iri_len; in iflib_rxd_pkt_get()
2891 m->m_pkthdr.rcvif = ri->iri_ifp; in iflib_rxd_pkt_get()
2892 m->m_flags |= ri->iri_flags; in iflib_rxd_pkt_get()
2893 m->m_pkthdr.ether_vtag = ri->iri_vtag; in iflib_rxd_pkt_get()
2894 m->m_pkthdr.flowid = ri->iri_flowid; in iflib_rxd_pkt_get()
2896 m->m_pkthdr.numa_domain = if_getnumadomain(ri->iri_ifp); in iflib_rxd_pkt_get()
2898 M_HASHTYPE_SET(m, ri->iri_rsstype); in iflib_rxd_pkt_get()
2899 m->m_pkthdr.csum_flags = ri->iri_csum_flags; in iflib_rxd_pkt_get()
2900 m->m_pkthdr.csum_data = ri->iri_csum_data; in iflib_rxd_pkt_get()
2909 GROUPTASK_ENQUEUE(&rxq->ifr_task); in _task_fn_rx_watchdog()
2916 if_ctx_t ctx = rxq->ifr_ctx; in iflib_rxeof()
2917 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_rxeof()
2918 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_rxeof()
2937 ifp = ctx->ifc_ifp; in iflib_rxeof()
2941 if (sctx->isc_flags & IFLIB_HAS_RXCQ) in iflib_rxeof()
2942 cidxp = &rxq->ifr_cq_cidx; in iflib_rxeof()
2944 cidxp = &rxq->ifr_fl[0].ifl_cidx; in iflib_rxeof()
2946 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) in iflib_rxeof()
2967 ri.iri_qsidx = rxq->ifr_id; in iflib_rxeof()
2970 ri.iri_frags = rxq->ifr_frags; in iflib_rxeof()
2971 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); in iflib_rxeof()
2977 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { in iflib_rxeof()
2980 /* XXX NB: shurd - check if this is still safe */ in iflib_rxeof()
2981 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) in iflib_rxeof()
2982 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; in iflib_rxeof()
2992 avail--; in iflib_rxeof()
2993 budget_left--; in iflib_rxeof()
3006 tcp_lro_queue_mbuf(&rxq->ifr_lc, m); in iflib_rxeof()
3014 mt->m_nextpkt = m; in iflib_rxeof()
3020 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) in iflib_rxeof()
3035 tcp_lro_flush_all(&rxq->ifr_lc); in iflib_rxeof()
3042 ctx->ifc_flags |= IFC_DO_RESET; in iflib_rxeof()
3048 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq) - 1)
3053 qidx_t minthresh = txq->ift_size / 8; in txq_max_db_deferred()
3067 qidx_t minthresh = txq->ift_size / 8; in txq_max_rs_deferred()
3068 if (txq->ift_in_use > 4 * minthresh) in txq_max_rs_deferred()
3070 if (txq->ift_in_use > 2 * minthresh) in txq_max_rs_deferred()
3072 if (txq->ift_in_use > minthresh) in txq_max_rs_deferred()
3077 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
3078 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
3086 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
3087 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
3088 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ct…
3089 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_…
3091 #define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
3092 (ctx)->ifc_softc_ctx.isc_tx_nsegments)
3097 if_ctx_t ctx = txq->ift_ctx; in iflib_txd_db_check()
3100 max = TXQ_MAX_DB_DEFERRED(txq, txq->ift_in_use); in iflib_txd_db_check()
3103 if (ring || (txq->ift_db_pending >= max) || (TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2)) { in iflib_txd_db_check()
3110 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; in iflib_txd_db_check()
3111 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, in iflib_txd_db_check()
3113 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); in iflib_txd_db_check()
3118 txq->ift_db_pending = txq->ift_npending = 0; in iflib_txd_db_check()
3129 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); in print_pkt()
3131 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); in print_pkt()
3133 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); in print_pkt()
3137 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
3138 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
3139 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
3140 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
3158 if (__predict_false(m->m_len < sizeof(*eh))) { in iflib_parse_ether_header()
3164 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { in iflib_parse_ether_header()
3165 pi->ipi_etype = ntohs(eh->evl_proto); in iflib_parse_ether_header()
3166 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; in iflib_parse_ether_header()
3168 pi->ipi_etype = ntohs(eh->evl_encap_proto); in iflib_parse_ether_header()
3169 pi->ipi_ehdrlen = ETHER_HDR_LEN; in iflib_parse_ether_header()
3201 /* Fills out pi->ipi_etype */ in iflib_parse_header_partial()
3207 switch (pi->ipi_etype) { in iflib_parse_header_partial()
3215 miniplen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip)); in iflib_parse_header_partial()
3216 if (__predict_false(m->m_len < miniplen)) { in iflib_parse_header_partial()
3221 if (m->m_len == pi->ipi_ehdrlen) { in iflib_parse_header_partial()
3222 n = m->m_next; in iflib_parse_header_partial()
3225 if (n->m_len >= sizeof(*ip)) { in iflib_parse_header_partial()
3226 ip = (struct ip *)n->m_data; in iflib_parse_header_partial()
3231 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); in iflib_parse_header_partial()
3237 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); in iflib_parse_header_partial()
3240 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); in iflib_parse_header_partial()
3244 pi->ipi_ip_hlen = ip->ip_hl << 2; in iflib_parse_header_partial()
3245 pi->ipi_ipproto = ip->ip_p; in iflib_parse_header_partial()
3246 pi->ipi_ip_tos = ip->ip_tos; in iflib_parse_header_partial()
3247 pi->ipi_flags |= IPI_TX_IPV4; in iflib_parse_header_partial()
3257 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { in iflib_parse_header_partial()
3259 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) in iflib_parse_header_partial()
3262 ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); in iflib_parse_header_partial()
3265 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); in iflib_parse_header_partial()
3266 pi->ipi_ipproto = ip6->ip6_nxt; in iflib_parse_header_partial()
3267 pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6); in iflib_parse_header_partial()
3268 pi->ipi_flags |= IPI_TX_IPV6; in iflib_parse_header_partial()
3274 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; in iflib_parse_header_partial()
3275 pi->ipi_ip_hlen = 0; in iflib_parse_header_partial()
3287 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; in iflib_parse_header()
3292 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && in iflib_parse_header()
3303 /* Fills out pi->ipi_etype */ in iflib_parse_header()
3304 err = iflib_parse_ether_header(pi, mp, &txq->ift_pullups); in iflib_parse_header()
3309 switch (pi->ipi_etype) { in iflib_parse_header()
3317 hlen = pi->ipi_ehdrlen + sizeof(*ip); in iflib_parse_header()
3318 if (__predict_false(m->m_len < hlen)) { in iflib_parse_header()
3319 txq->ift_pullups++; in iflib_parse_header()
3323 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); in iflib_parse_header()
3324 hlen = pi->ipi_ehdrlen + (ip->ip_hl << 2); in iflib_parse_header()
3325 if (ip->ip_p == IPPROTO_TCP) { in iflib_parse_header()
3327 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2)); in iflib_parse_header()
3328 } else if (ip->ip_p == IPPROTO_UDP) { in iflib_parse_header()
3331 if (__predict_false(m->m_len < hlen)) { in iflib_parse_header()
3332 txq->ift_pullups++; in iflib_parse_header()
3336 pi->ipi_ip_hlen = ip->ip_hl << 2; in iflib_parse_header()
3337 pi->ipi_ipproto = ip->ip_p; in iflib_parse_header()
3338 pi->ipi_ip_tos = ip->ip_tos; in iflib_parse_header()
3339 pi->ipi_flags |= IPI_TX_IPV4; in iflib_parse_header()
3343 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { in iflib_parse_header()
3344 pi->ipi_tcp_hflags = tcp_get_flags(th); in iflib_parse_header()
3345 pi->ipi_tcp_hlen = th->th_off << 2; in iflib_parse_header()
3346 pi->ipi_tcp_seq = th->th_seq; in iflib_parse_header()
3349 if (__predict_false(ip->ip_p != IPPROTO_TCP)) in iflib_parse_header()
3354 pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP); in iflib_parse_header()
3355 th->th_sum = in_pseudo(ip->ip_src.s_addr, in iflib_parse_header()
3356 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); in iflib_parse_header()
3357 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; in iflib_parse_header()
3358 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { in iflib_parse_header()
3359 ip->ip_sum = 0; in iflib_parse_header()
3360 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); in iflib_parse_header()
3364 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) in iflib_parse_header()
3365 ip->ip_sum = 0; in iflib_parse_header()
3373 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); in iflib_parse_header()
3375 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); in iflib_parse_header()
3377 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { in iflib_parse_header()
3378 txq->ift_pullups++; in iflib_parse_header()
3379 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) in iflib_parse_header()
3382 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); in iflib_parse_header()
3384 /* XXX-BZ this will go badly in case of ext hdrs. */ in iflib_parse_header()
3385 pi->ipi_ipproto = ip6->ip6_nxt; in iflib_parse_header()
3386 pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6); in iflib_parse_header()
3387 pi->ipi_flags |= IPI_TX_IPV6; in iflib_parse_header()
3391 if (pi->ipi_ipproto == IPPROTO_TCP) { in iflib_parse_header()
3392 …if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { in iflib_parse_header()
3393 txq->ift_pullups++; in iflib_parse_header()
3394 …if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcph… in iflib_parse_header()
3397 pi->ipi_tcp_hflags = tcp_get_flags(th); in iflib_parse_header()
3398 pi->ipi_tcp_hlen = th->th_off << 2; in iflib_parse_header()
3399 pi->ipi_tcp_seq = th->th_seq; in iflib_parse_header()
3402 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) in iflib_parse_header()
3407 pi->ipi_csum_flags |= CSUM_IP6_TCP; in iflib_parse_header()
3408 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); in iflib_parse_header()
3409 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; in iflib_parse_header()
3416 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; in iflib_parse_header()
3417 pi->ipi_ip_hlen = 0; in iflib_parse_header()
3436 ifsd_m = txq->ift_sds.ifsd_m; in iflib_remove_mbuf()
3437 ntxd = txq->ift_size; in iflib_remove_mbuf()
3438 pidx = txq->ift_pidx & (ntxd - 1); in iflib_remove_mbuf()
3439 ifsd_m = txq->ift_sds.ifsd_m; in iflib_remove_mbuf()
3442 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]); in iflib_remove_mbuf()
3443 if (txq->ift_sds.ifsd_tso_map != NULL) in iflib_remove_mbuf()
3444 bus_dmamap_unload(txq->ift_tso_buf_tag, in iflib_remove_mbuf()
3445 txq->ift_sds.ifsd_tso_map[pidx]); in iflib_remove_mbuf()
3447 txq->ift_dequeued++; in iflib_remove_mbuf()
3459 ntxd = txq->ift_size; in calc_next_txd()
3460 size = txq->ift_txd_size[qid]; in calc_next_txd()
3461 start = txq->ift_ifdi[qid].idi_vaddr; in calc_next_txd()
3499 for (n = min_frame_size - (*m_head)->m_pkthdr.len; in iflib_ether_pad()
3500 n > 0; n -= sizeof(pad)) in iflib_ether_pad()
3530 ctx = txq->ift_ctx; in iflib_encap()
3531 sctx = ctx->ifc_sctx; in iflib_encap()
3532 scctx = &ctx->ifc_softc_ctx; in iflib_encap()
3533 segs = txq->ift_segs; in iflib_encap()
3534 ntxd = txq->ift_size; in iflib_encap()
3541 cidx = txq->ift_cidx; in iflib_encap()
3542 pidx = txq->ift_pidx; in iflib_encap()
3543 if (ctx->ifc_flags & IFC_PREFETCH) { in iflib_encap()
3544 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd - 1); in iflib_encap()
3545 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { in iflib_encap()
3551 prefetch(&txq->ift_sds.ifsd_m[next]); in iflib_encap()
3552 prefetch(&txq->ift_sds.ifsd_map[next]); in iflib_encap()
3553 next = (cidx + CACHE_LINE_SIZE) & (ntxd - 1); in iflib_encap()
3555 map = txq->ift_sds.ifsd_map[pidx]; in iflib_encap()
3556 ifsd_m = txq->ift_sds.ifsd_m; in iflib_encap()
3558 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { in iflib_encap()
3559 buf_tag = txq->ift_tso_buf_tag; in iflib_encap()
3560 max_segs = scctx->isc_tx_tso_segments_max; in iflib_encap()
3561 map = txq->ift_sds.ifsd_tso_map[pidx]; in iflib_encap()
3565 buf_tag = txq->ift_buf_tag; in iflib_encap()
3566 max_segs = scctx->isc_tx_nsegments; in iflib_encap()
3567 map = txq->ift_sds.ifsd_map[pidx]; in iflib_encap()
3569 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && in iflib_encap()
3570 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { in iflib_encap()
3571 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); in iflib_encap()
3580 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG | M_BCAST | M_MCAST)); in iflib_encap()
3582 pi.ipi_qsidx = txq->ift_id; in iflib_encap()
3583 pi.ipi_len = m_head->m_pkthdr.len; in iflib_encap()
3584 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; in iflib_encap()
3585 pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0; in iflib_encap()
3611 txq->ift_mbuf_defrag++; in iflib_encap()
3626 txq->ift_no_tx_dma_setup++; in iflib_encap()
3629 txq->ift_no_tx_dma_setup++; in iflib_encap()
3635 txq->ift_map_failed++; in iflib_encap()
3643 * descriptors - this does not hold true on all drivers, e.g. in iflib_encap()
3649 txq->ift_no_desc_avail++; in iflib_encap()
3653 if (ctx->ifc_sysctl_simple_tx) { in iflib_encap()
3659 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) in iflib_encap()
3660 GROUPTASK_ENQUEUE(&txq->ift_task); in iflib_encap()
3670 txq->ift_rs_pending += nsegs + 1; in iflib_encap()
3671 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || in iflib_encap()
3672 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { in iflib_encap()
3674 txq->ift_rs_pending = 0; in iflib_encap()
3680 MPASS(pidx >= 0 && pidx < txq->ift_size); in iflib_encap()
3684 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { in iflib_encap()
3687 MPASS(pi.ipi_new_pidx < txq->ift_size); in iflib_encap()
3689 ndesc = pi.ipi_new_pidx - pi.ipi_pidx; in iflib_encap()
3691 ndesc += txq->ift_size; in iflib_encap()
3692 txq->ift_gen = 1; in iflib_encap()
3701 txq->ift_in_use += ndesc; in iflib_encap()
3702 txq->ift_db_pending += ndesc; in iflib_encap()
3708 txq->ift_pidx = pi.ipi_new_pidx; in iflib_encap()
3709 txq->ift_npending += pi.ipi_ndescs; in iflib_encap()
3713 txq->ift_txd_encap_efbig++; in iflib_encap()
3722 * err can't possibly be non-zero here, so we don't neet to test it in iflib_encap()
3728 txq->ift_mbuf_defrag_failed++; in iflib_encap()
3729 txq->ift_map_failed++; in iflib_encap()
3744 cidx = txq->ift_cidx; in iflib_tx_desc_free()
3745 gen = txq->ift_gen; in iflib_tx_desc_free()
3746 qsize = txq->ift_size; in iflib_tx_desc_free()
3747 mask = qsize - 1; in iflib_tx_desc_free()
3748 ifsd_m = txq->ift_sds.ifsd_m; in iflib_tx_desc_free()
3749 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); in iflib_tx_desc_free()
3751 while (n-- > 0) { in iflib_tx_desc_free()
3758 if (m->m_pkthdr.csum_flags & CSUM_TSO) { in iflib_tx_desc_free()
3759 bus_dmamap_sync(txq->ift_tso_buf_tag, in iflib_tx_desc_free()
3760 txq->ift_sds.ifsd_tso_map[cidx], in iflib_tx_desc_free()
3762 bus_dmamap_unload(txq->ift_tso_buf_tag, in iflib_tx_desc_free()
3763 txq->ift_sds.ifsd_tso_map[cidx]); in iflib_tx_desc_free()
3765 bus_dmamap_sync(txq->ift_buf_tag, in iflib_tx_desc_free()
3766 txq->ift_sds.ifsd_map[cidx], in iflib_tx_desc_free()
3768 bus_dmamap_unload(txq->ift_buf_tag, in iflib_tx_desc_free()
3769 txq->ift_sds.ifsd_map[cidx]); in iflib_tx_desc_free()
3772 MPASS(m->m_nextpkt == NULL); in iflib_tx_desc_free()
3776 txq->ift_dequeued++; in iflib_tx_desc_free()
3785 txq->ift_cidx = cidx; in iflib_tx_desc_free()
3786 txq->ift_gen = gen; in iflib_tx_desc_free()
3794 if_ctx_t ctx = txq->ift_ctx; in iflib_completed_tx_reclaim()
3796 thresh = txq->ift_reclaim_thresh; in iflib_completed_tx_reclaim()
3798 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); in iflib_completed_tx_reclaim()
3801 if (now <= (txq->ift_last_reclaim + txq->ift_reclaim_ticks) && in iflib_completed_tx_reclaim()
3802 txq->ift_in_use < thresh) in iflib_completed_tx_reclaim()
3804 txq->ift_last_reclaim = now; in iflib_completed_tx_reclaim()
3806 * Need a rate-limiting check so that this isn't called every time in iflib_completed_tx_reclaim()
3811 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { in iflib_completed_tx_reclaim()
3815 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, in iflib_completed_tx_reclaim()
3822 txq->ift_cleaned += reclaim; in iflib_completed_tx_reclaim()
3823 txq->ift_in_use -= reclaim; in iflib_completed_tx_reclaim()
3834 size = r->size; in _ring_peek_one()
3835 next = (cidx + CACHE_PTR_INCREMENT) & (size - 1); in _ring_peek_one()
3836 items = __DEVOLATILE(struct mbuf **, &r->items[0]); in _ring_peek_one()
3838 prefetch(items[(cidx + offset) & (size - 1)]); in _ring_peek_one()
3841 prefetch2cachelines(items[(cidx + offset + 1) & (size - 1)]); in _ring_peek_one()
3842 prefetch2cachelines(items[(cidx + offset + 2) & (size - 1)]); in _ring_peek_one()
3843 prefetch2cachelines(items[(cidx + offset + 3) & (size - 1)]); in _ring_peek_one()
3845 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size - 1)])); in _ring_peek_one()
3852 ifmp_ring_check_drainage(txq->ift_br, budget); in iflib_txq_check_drain()
3858 iflib_txq_t txq = r->cookie; in iflib_txq_can_drain()
3859 if_ctx_t ctx = txq->ift_ctx; in iflib_txq_can_drain()
3863 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, in iflib_txq_can_drain()
3865 return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, in iflib_txq_can_drain()
3872 iflib_txq_t txq = r->cookie; in iflib_txq_drain()
3873 if_ctx_t ctx = txq->ift_ctx; in iflib_txq_drain()
3874 if_t ifp = ctx->ifc_ifp; in iflib_txq_drain()
3886 rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending); in iflib_txq_drain()
3887 avail = IDXDIFF(pidx, cidx, r->size); in iflib_txq_drain()
3889 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { in iflib_txq_drain()
3895 if (__predict_true(r->items[(cidx + i) & (r->size - 1)] != (void *)txq)) in iflib_txq_drain()
3896 m_freem(r->items[(cidx + i) & (r->size - 1)]); in iflib_txq_drain()
3897 r->items[(cidx + i) & (r->size - 1)] = NULL; in iflib_txq_drain()
3902 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { in iflib_txq_drain()
3903 txq->ift_qstatus = IFLIB_QUEUE_IDLE; in iflib_txq_drain()
3905 callout_stop(&txq->ift_timer); in iflib_txq_drain()
3915 txq->ift_qstatus = IFLIB_QUEUE_IDLE; in iflib_txq_drain()
3921 avail, ctx->ifc_flags, TXQ_AVAIL(txq)); in iflib_txq_drain()
3923 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); in iflib_txq_drain()
3926 int rem = do_prefetch ? count - i : 0; in iflib_txq_drain()
3944 /* no room - bail out */ in iflib_txq_drain()
3948 /* we can't send this packet - skip it */ in iflib_txq_drain()
3954 bytes_sent += m->m_pkthdr.len; in iflib_txq_drain()
3955 mcast_sent += !!(m->m_flags & M_MCAST); in iflib_txq_drain()
3963 /* deliberate use of bitwise or to avoid gratuitous short-circuit */ in iflib_txq_drain()
3964 ring = rang ? false : (iflib_min_tx_latency | err | (!!txq->ift_reclaim_thresh)); in iflib_txq_drain()
3990 txq = r->cookie; in iflib_txq_drain_free()
3992 txq->ift_qstatus = IFLIB_QUEUE_IDLE; in iflib_txq_drain_free()
3994 callout_stop(&txq->ift_timer); in iflib_txq_drain_free()
3997 avail = IDXDIFF(pidx, cidx, r->size); in iflib_txq_drain_free()
3999 mp = _ring_peek_one(r, cidx, i, avail - i); in iflib_txq_drain_free()
4014 r = txq->ift_br; in iflib_ifmp_purge()
4015 r->drain = iflib_txq_drain_free; in iflib_ifmp_purge()
4016 r->can_drain = iflib_txq_drain_always; in iflib_ifmp_purge()
4018 ifmp_ring_check_drainage(r, r->size); in iflib_ifmp_purge()
4020 r->drain = iflib_txq_drain; in iflib_ifmp_purge()
4021 r->can_drain = iflib_txq_can_drain; in iflib_ifmp_purge()
4028 if_ctx_t ctx = txq->ift_ctx; in _task_fn_tx()
4029 if_t ifp = ctx->ifc_ifp; in _task_fn_tx()
4030 int abdicate = ctx->ifc_sysctl_tx_abdicate; in _task_fn_tx()
4033 txq->ift_cpu_exec_count[curcpu]++; in _task_fn_tx()
4039 netmap_tx_irq(ifp, txq->ift_id)) in _task_fn_tx()
4042 if (ctx->ifc_sysctl_simple_tx) { in _task_fn_tx()
4043 mtx_lock(&txq->ift_mtx); in _task_fn_tx()
4045 mtx_unlock(&txq->ift_mtx); in _task_fn_tx()
4052 if (txq->ift_db_pending) in _task_fn_tx()
4053 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); in _task_fn_tx()
4055 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); in _task_fn_tx()
4060 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); in _task_fn_tx()
4063 if (ctx->ifc_flags & IFC_LEGACY) in _task_fn_tx()
4066 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); in _task_fn_tx()
4073 if_ctx_t ctx = rxq->ifr_ctx; in _task_fn_rx()
4082 rxq->ifr_cpu_exec_count[curcpu]++; in _task_fn_rx()
4085 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) in _task_fn_rx()
4088 nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work); in _task_fn_rx()
4094 budget = ctx->ifc_sysctl_rx_budget; in _task_fn_rx()
4102 if (ctx->ifc_flags & IFC_LEGACY) in _task_fn_rx()
4105 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); in _task_fn_rx()
4108 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) in _task_fn_rx()
4112 GROUPTASK_ENQUEUE(&rxq->ifr_task); in _task_fn_rx()
4114 callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq); in _task_fn_rx()
4121 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; in _task_fn_admin()
4127 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); in _task_fn_admin()
4128 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); in _task_fn_admin()
4129 do_reset = (ctx->ifc_flags & IFC_DO_RESET); in _task_fn_admin()
4130 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); in _task_fn_admin()
4131 in_detach = (ctx->ifc_flags & IFC_IN_DETACH); in _task_fn_admin()
4132 ctx->ifc_flags &= ~(IFC_DO_RESET | IFC_DO_WATCHDOG); in _task_fn_admin()
4135 if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) in _task_fn_admin()
4141 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { in _task_fn_admin()
4143 callout_stop(&txq->ift_timer); in _task_fn_admin()
4146 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_ADMINCQ) in _task_fn_admin()
4149 ctx->ifc_watchdog_events++; in _task_fn_admin()
4153 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { in _task_fn_admin()
4154 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq, in _task_fn_admin()
4155 txq->ift_timer.c_cpu); in _task_fn_admin()
4164 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) in _task_fn_admin()
4173 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) && in _task_fn_iov()
4174 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) in _task_fn_iov()
4190 ctx = info->iidi_ctx; in iflib_sysctl_int_delay()
4191 info->iidi_req = req; in iflib_sysctl_int_delay()
4192 info->iidi_oidp = oidp; in iflib_sysctl_int_delay()
4236 MPASS(m->m_nextpkt == NULL); in iflib_if_transmit()
4237 /* ALTQ-enabled interfaces always use queue 0. */ in iflib_if_transmit()
4239 /* Use driver-supplied queue selection method if it exists */ in iflib_if_transmit()
4240 if (ctx->isc_txq_select_v2) { in iflib_if_transmit()
4248 ctx->ifc_txqs[0].ift_pullups += early_pullups; in iflib_if_transmit()
4253 qidx = ctx->isc_txq_select_v2(ctx->ifc_softc, m, &pi); in iflib_if_transmit()
4254 ctx->ifc_txqs[qidx].ift_pullups += early_pullups; in iflib_if_transmit()
4257 else if (ctx->isc_txq_select) in iflib_if_transmit()
4258 qidx = ctx->isc_txq_select(ctx->ifc_softc, m); in iflib_if_transmit()
4264 txq = &ctx->ifc_txqs[qidx]; in iflib_if_transmit()
4267 if (txq->ift_closed) { in iflib_if_transmit()
4269 next = m->m_nextpkt; in iflib_if_transmit()
4270 m->m_nextpkt = NULL; in iflib_if_transmit()
4284 next = next->m_nextpkt; in iflib_if_transmit()
4297 next = next->m_nextpkt; in iflib_if_transmit()
4298 mp[i]->m_nextpkt = NULL; in iflib_if_transmit()
4302 abdicate = ctx->ifc_sysctl_tx_abdicate; in iflib_if_transmit()
4304 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); in iflib_if_transmit()
4307 GROUPTASK_ENQUEUE(&txq->ift_task); in iflib_if_transmit()
4310 GROUPTASK_ENQUEUE(&txq->ift_task); in iflib_if_transmit()
4313 txq->ift_closed = TRUE; in iflib_if_transmit()
4315 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); in iflib_if_transmit()
4333 * ALTQ-specific code required in iflib. It is assumed that the overhead of
4352 struct ifaltq *ifq = &ifp->if_snd; /* XXX - DRVAPI */ in iflib_altq_if_start()
4370 IFQ_ENQUEUE(&ifp->if_snd, m, err); /* XXX - DRVAPI */ in iflib_altq_if_transmit()
4384 iflib_txq_t txq = ctx->ifc_txqs; in iflib_if_qflush()
4388 ctx->ifc_flags |= IFC_QFLUSH; in iflib_if_qflush()
4391 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) in iflib_if_qflush()
4394 ctx->ifc_flags &= ~IFC_QFLUSH; in iflib_if_qflush()
4423 if (ifa->ifa_addr->sa_family == AF_INET) in iflib_if_ioctl()
4427 if (ifa->ifa_addr->sa_family == AF_INET6) in iflib_if_ioctl()
4447 if (ifr->ifr_mtu == if_getmtu(ifp)) { in iflib_if_ioctl()
4455 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { in iflib_if_ioctl()
4457 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) in iflib_if_ioctl()
4458 ctx->ifc_flags |= IFC_MULTISEG; in iflib_if_ioctl()
4460 ctx->ifc_flags &= ~IFC_MULTISEG; in iflib_if_ioctl()
4462 err = if_setmtu(ifp, ifr->ifr_mtu); in iflib_if_ioctl()
4474 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & in iflib_if_ioctl()
4485 ctx->ifc_if_flags = if_getflags(ifp); in iflib_if_ioctl()
4505 err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command); in iflib_if_ioctl()
4533 mask = ifr->ifr_reqcap ^ oldmask; in iflib_if_ioctl()
4534 mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_MEXTPG; in iflib_if_ioctl()
4547 setmask |= ctx->ifc_softc_ctx.isc_capabilities & in iflib_if_ioctl()
4561 ctx->ifc_softc_ctx.isc_capenable ^= setmask; in iflib_if_ioctl()
4622 /* Re-init to load the changes, if required */ in iflib_vlan_register()
4644 /* Re-init to load the changes, if required */ in iflib_vlan_unregister()
4674 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) in iflib_device_probe()
4682 if (sctx->isc_parse_devinfo != NULL) in iflib_device_probe()
4683 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); in iflib_device_probe()
4685 ent = sctx->isc_vendor_info; in iflib_device_probe()
4686 while (ent->pvi_vendor_id != 0) { in iflib_device_probe()
4687 if (pci_vendor_id != ent->pvi_vendor_id) { in iflib_device_probe()
4691 if ((pci_device_id == ent->pvi_device_id) && in iflib_device_probe()
4692 ((pci_subvendor_id == ent->pvi_subvendor_id) || in iflib_device_probe()
4693 (ent->pvi_subvendor_id == 0)) && in iflib_device_probe()
4694 ((pci_subdevice_id == ent->pvi_subdevice_id) || in iflib_device_probe()
4695 (ent->pvi_subdevice_id == 0)) && in iflib_device_probe()
4696 ((pci_rev_id == ent->pvi_rev_id) || in iflib_device_probe()
4697 (ent->pvi_rev_id == 0))) { in iflib_device_probe()
4698 device_set_desc_copy(dev, ent->pvi_name); in iflib_device_probe()
4700 * ever stops re-probing on best match because the sctx in iflib_device_probe()
4726 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_reset_qvalues()
4727 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_reset_qvalues()
4728 device_t dev = ctx->ifc_dev; in iflib_reset_qvalues()
4731 if (ctx->ifc_sysctl_ntxqs != 0) in iflib_reset_qvalues()
4732 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; in iflib_reset_qvalues()
4733 if (ctx->ifc_sysctl_nrxqs != 0) in iflib_reset_qvalues()
4734 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; in iflib_reset_qvalues()
4736 for (i = 0; i < sctx->isc_ntxqs; i++) { in iflib_reset_qvalues()
4737 if (ctx->ifc_sysctl_ntxds[i] != 0) in iflib_reset_qvalues()
4738 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; in iflib_reset_qvalues()
4740 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; in iflib_reset_qvalues()
4743 for (i = 0; i < sctx->isc_nrxqs; i++) { in iflib_reset_qvalues()
4744 if (ctx->ifc_sysctl_nrxds[i] != 0) in iflib_reset_qvalues()
4745 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; in iflib_reset_qvalues()
4747 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; in iflib_reset_qvalues()
4750 for (i = 0; i < sctx->isc_nrxqs; i++) { in iflib_reset_qvalues()
4751 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { in iflib_reset_qvalues()
4752 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", in iflib_reset_qvalues()
4753 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); in iflib_reset_qvalues()
4754 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; in iflib_reset_qvalues()
4756 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { in iflib_reset_qvalues()
4757 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", in iflib_reset_qvalues()
4758 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); in iflib_reset_qvalues()
4759 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; in iflib_reset_qvalues()
4761 if (!powerof2(scctx->isc_nrxd[i])) { in iflib_reset_qvalues()
4762 device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n", in iflib_reset_qvalues()
4763 i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]); in iflib_reset_qvalues()
4764 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; in iflib_reset_qvalues()
4768 for (i = 0; i < sctx->isc_ntxqs; i++) { in iflib_reset_qvalues()
4769 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { in iflib_reset_qvalues()
4770 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", in iflib_reset_qvalues()
4771 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); in iflib_reset_qvalues()
4772 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; in iflib_reset_qvalues()
4774 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { in iflib_reset_qvalues()
4775 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", in iflib_reset_qvalues()
4776 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); in iflib_reset_qvalues()
4777 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; in iflib_reset_qvalues()
4779 if (!powerof2(scctx->isc_ntxd[i])) { in iflib_reset_qvalues()
4780 device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n", in iflib_reset_qvalues()
4781 i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]); in iflib_reset_qvalues()
4782 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; in iflib_reset_qvalues()
4798 pa.pa_headname = if_name(ctx->ifc_ifp); in iflib_add_pfil()
4801 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { in iflib_add_pfil()
4802 rxq->pfil = pfil; in iflib_add_pfil()
4813 rxq = ctx->ifc_rxqs; in iflib_rem_pfil()
4814 pfil = rxq->pfil; in iflib_rem_pfil()
4816 rxq->pfil = NULL; in iflib_rem_pfil()
4823 * Advance forward by n members of the cpuset ctx->ifc_cpus starting from
4833 MPASS(CPU_ISSET(cpuid, &ctx->ifc_cpus)); in cpuid_advance()
4836 MPASS(!CPU_EMPTY(&ctx->ifc_cpus)); in cpuid_advance()
4838 first_valid = CPU_FFS(&ctx->ifc_cpus) - 1; in cpuid_advance()
4839 last_valid = CPU_FLS(&ctx->ifc_cpus) - 1; in cpuid_advance()
4840 n = n % CPU_COUNT(&ctx->ifc_cpus); in cpuid_advance()
4846 } while (!CPU_ISSET(cpuid, &ctx->ifc_cpus)); in cpuid_advance()
4847 n--; in cpuid_advance()
4854 extern struct cpu_group *cpu_top; /* CPU topology */
4857 find_child_with_core(int cpu, struct cpu_group *grp) in find_child_with_core() argument
4861 if (grp->cg_children == 0) in find_child_with_core()
4862 return (-1); in find_child_with_core()
4864 MPASS(grp->cg_child); in find_child_with_core()
4865 for (i = 0; i < grp->cg_children; i++) { in find_child_with_core()
4866 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) in find_child_with_core()
4870 return (-1); in find_child_with_core()
4875 * Find an L2 neighbor of the given CPU or return -1 if none found. This
4876 * does not distinguish among multiple L2 neighbors if the given CPU has
4880 find_l2_neighbor(int cpu) in find_l2_neighbor() argument
4887 return (-1); in find_l2_neighbor()
4890 * Find the smallest CPU group that contains the given core. in find_l2_neighbor()
4893 while ((i = find_child_with_core(cpu, grp)) != -1) { in find_l2_neighbor()
4895 * If the smallest group containing the given CPU has less in find_l2_neighbor()
4896 * than two members, we conclude the given CPU has no in find_l2_neighbor()
4899 if (grp->cg_child[i].cg_count <= 1) in find_l2_neighbor()
4900 return (-1); in find_l2_neighbor()
4901 grp = &grp->cg_child[i]; in find_l2_neighbor()
4905 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) in find_l2_neighbor()
4906 return (-1); in find_l2_neighbor()
4910 * CPU, which at this point is guaranteed to exist. in find_l2_neighbor()
4913 if (CPU_ISSET(i, &grp->cg_mask) && i != cpu) in find_l2_neighbor()
4918 return (-1); in find_l2_neighbor()
4923 find_l2_neighbor(int cpu) in find_l2_neighbor() argument
4926 return (-1); in find_l2_neighbor()
4931 * CPU mapping behaviors
4932 * ---------------------
4939 * ---------- --------- ------ ------------------------------------------------
4940 * - - X RX and TX queues mapped to consecutive physical
4943 * - X X RX and TX queues mapped to consecutive cores
4946 * X - X RX and TX queues mapped to consecutive physical
4952 * - n/a - RX and TX queues mapped to consecutive cores of
4955 * X n/a - RX and TX queues mapped to consecutive cores of
4962 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in get_cpuid_for_queue()
4965 if (ctx->ifc_sysctl_separate_txrx) { in get_cpuid_for_queue()
4968 * will always be of a consecutive CPU out of the set of in get_cpuid_for_queue()
4972 * corresponds to an RX qid, and the CPU assigned to the in get_cpuid_for_queue()
4975 if (ctx->ifc_sysctl_use_logical_cores && in get_cpuid_for_queue()
4976 ctx->ifc_cpus_are_physical_cores && in get_cpuid_for_queue()
4977 is_tx && qid < scctx->isc_nrxqsets) { in get_cpuid_for_queue()
4983 if (l2_neighbor != -1) { in get_cpuid_for_queue()
4988 * consecutive-after-RX assignment scheme. in get_cpuid_for_queue()
4998 core_index = scctx->isc_nrxqsets + qid; in get_cpuid_for_queue()
5011 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in get_ctx_core_offset()
5015 unsigned int base_cpuid = ctx->ifc_sysctl_core_offset; in get_ctx_core_offset()
5020 first_valid = CPU_FFS(&ctx->ifc_cpus) - 1; in get_ctx_core_offset()
5021 last_valid = CPU_FLS(&ctx->ifc_cpus) - 1; in get_ctx_core_offset()
5025 * Align the user-chosen base CPU ID to the next valid CPU in get_ctx_core_offset()
5026 * for this device. If the chosen base CPU ID is smaller in get_ctx_core_offset()
5027 * than the first valid CPU or larger than the last valid in get_ctx_core_offset()
5028 * CPU, we assume the user does not know what the valid in get_ctx_core_offset()
5030 * zero-based reference frame, and so we shift the given in get_ctx_core_offset()
5033 * If the base CPU ID is within the valid first/last, but in get_ctx_core_offset()
5034 * does not correspond to a valid CPU, it is advanced to the in get_ctx_core_offset()
5035 * next valid CPU (wrapping if necessary). in get_ctx_core_offset()
5038 /* shift from zero-based to first_valid-based */ in get_ctx_core_offset()
5041 base_cpuid = (base_cpuid - first_valid) % in get_ctx_core_offset()
5042 (last_valid - first_valid + 1); in get_ctx_core_offset()
5044 if (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus)) { in get_ctx_core_offset()
5049 * with a CPU ID that is greater than base_cpuid, in get_ctx_core_offset()
5052 while (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus)) in get_ctx_core_offset()
5059 * Determine how many cores will be consumed by performing the CPU in get_ctx_core_offset()
5061 * to CPUs in the set of context CPUs. This is done using the CPU in get_ctx_core_offset()
5062 * ID first_valid as the base CPU ID, as the base CPU must be within in get_ctx_core_offset()
5069 * neighbors of CPUs that RX queues have been mapped to - in this in get_ctx_core_offset()
5071 * CPUs have been consumed, as that determines the next CPU in that in get_ctx_core_offset()
5076 for (i = 0; i < scctx->isc_ntxqsets; i++) in get_ctx_core_offset()
5079 for (i = 0; i < scctx->isc_nrxqsets; i++) in get_ctx_core_offset()
5082 CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus); in get_ctx_core_offset()
5087 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { in get_ctx_core_offset()
5088 base_cpuid = op->next_cpuid; in get_ctx_core_offset()
5089 op->next_cpuid = cpuid_advance(ctx, op->next_cpuid, in get_ctx_core_offset()
5091 MPASS(op->refcount < UINT_MAX); in get_ctx_core_offset()
5092 op->refcount++; in get_ctx_core_offset()
5101 device_printf(ctx->ifc_dev, in get_ctx_core_offset()
5102 "allocation for cpu offset failed.\n"); in get_ctx_core_offset()
5104 op->next_cpuid = cpuid_advance(ctx, base_cpuid, in get_ctx_core_offset()
5106 op->refcount = 1; in get_ctx_core_offset()
5107 CPU_COPY(&ctx->ifc_cpus, &op->set); in get_ctx_core_offset()
5123 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { in unref_ctx_core_offset()
5124 MPASS(op->refcount > 0); in unref_ctx_core_offset()
5125 op->refcount--; in unref_ctx_core_offset()
5126 if (op->refcount == 0) { in unref_ctx_core_offset()
5151 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK | M_ZERO); in iflib_device_register()
5153 ctx->ifc_flags |= IFC_SC_ALLOCATED; in iflib_device_register()
5156 ctx->ifc_sctx = sctx; in iflib_device_register()
5157 ctx->ifc_dev = dev; in iflib_device_register()
5158 ctx->ifc_softc = sc; in iflib_device_register()
5163 scctx = &ctx->ifc_softc_ctx; in iflib_device_register()
5164 ifp = ctx->ifc_ifp; in iflib_device_register()
5165 if (ctx->ifc_sysctl_simple_tx) { in iflib_device_register()
5181 ctx->ifc_txrx = *scctx->isc_txrx; in iflib_device_register()
5183 MPASS(scctx->isc_dma_width <= flsll(BUS_SPACE_MAXADDR)); in iflib_device_register()
5185 if (sctx->isc_flags & IFLIB_DRIVER_MEDIA) in iflib_device_register()
5186 ctx->ifc_mediap = scctx->isc_media; in iflib_device_register()
5189 if (scctx->isc_capabilities & IFCAP_TXCSUM) in iflib_device_register()
5190 MPASS(scctx->isc_tx_csum_flags); in iflib_device_register()
5194 scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_MEXTPG); in iflib_device_register()
5196 scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_MEXTPG); in iflib_device_register()
5198 …if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_n… in iflib_device_register()
5199 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; in iflib_device_register()
5200 …if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_n… in iflib_device_register()
5201 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; in iflib_device_register()
5206 /* XXX change for per-queue sizes */ in iflib_device_register()
5210 if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION) in iflib_device_register()
5211 scctx->isc_tx_nsegments = max(1, num_txd / in iflib_device_register()
5213 if (scctx->isc_tx_tso_segments_max > num_txd / in iflib_device_register()
5215 scctx->isc_tx_tso_segments_max = max(1, in iflib_device_register()
5218 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ in iflib_device_register()
5224 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, in iflib_device_register()
5234 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); in iflib_device_register()
5235 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); in iflib_device_register()
5237 if (scctx->isc_rss_table_size == 0) in iflib_device_register()
5238 scctx->isc_rss_table_size = 64; in iflib_device_register()
5239 scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1; in iflib_device_register()
5243 ctx->ifc_tq = taskqueue_create_fast(namebuf, M_NOWAIT, in iflib_device_register()
5244 taskqueue_thread_enqueue, &ctx->ifc_tq); in iflib_device_register()
5245 if (ctx->ifc_tq == NULL) { in iflib_device_register()
5250 err = taskqueue_start_threads(&ctx->ifc_tq, 1, PI_NET, "%s", namebuf); in iflib_device_register()
5255 taskqueue_free(ctx->ifc_tq); in iflib_device_register()
5259 TASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); in iflib_device_register()
5261 /* Set up cpu set. If it fails, use the set of all CPUs. */ in iflib_device_register()
5262 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { in iflib_device_register()
5263 device_printf(dev, "Unable to fetch CPU list\n"); in iflib_device_register()
5264 CPU_COPY(&all_cpus, &ctx->ifc_cpus); in iflib_device_register()
5265 ctx->ifc_cpus_are_physical_cores = false; in iflib_device_register()
5267 ctx->ifc_cpus_are_physical_cores = true; in iflib_device_register()
5268 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); in iflib_device_register()
5271 * Now set up MSI or MSI-X, should return us the number of supported in iflib_device_register()
5272 * vectors (will be 1 for a legacy interrupt and MSI). in iflib_device_register()
5274 if (sctx->isc_flags & IFLIB_SKIP_MSIX) { in iflib_device_register()
5275 msix = scctx->isc_vectors; in iflib_device_register()
5276 } else if (scctx->isc_msix_bar != 0) in iflib_device_register()
5283 scctx->isc_vectors = 1; in iflib_device_register()
5284 scctx->isc_ntxqsets = 1; in iflib_device_register()
5285 scctx->isc_nrxqsets = 1; in iflib_device_register()
5286 scctx->isc_intr = IFLIB_INTR_LEGACY; in iflib_device_register()
5301 ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx); in iflib_device_register()
5305 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable in iflib_device_register()
5309 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, in iflib_device_register()
5311 if (kobj_method == &kobj_desc->deflt) { in iflib_device_register()
5313 "MSI-X requires ifdi_rx_queue_intr_enable method"); in iflib_device_register()
5318 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, in iflib_device_register()
5320 if (kobj_method == &kobj_desc->deflt) { in iflib_device_register()
5322 "MSI-X requires ifdi_tx_queue_intr_enable method"); in iflib_device_register()
5328 * Assign the MSI-X vectors. in iflib_device_register()
5338 } else if (scctx->isc_intr != IFLIB_INTR_MSIX) { in iflib_device_register()
5340 if (scctx->isc_intr == IFLIB_INTR_MSI) { in iflib_device_register()
5344 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { in iflib_device_register()
5350 "Cannot use iflib with only 1 MSI-X interrupt!\n"); in iflib_device_register()
5356 * It prevents a double-locking panic with iflib_media_status when in iflib_device_register()
5360 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); in iflib_device_register()
5377 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); in iflib_device_register()
5382 DEBUGNET_SET(ctx->ifc_ifp, iflib); in iflib_device_register()
5386 ctx->ifc_flags |= IFC_INIT_DONE; in iflib_device_register()
5393 ether_ifdetach(ctx->ifc_ifp); in iflib_device_register()
5395 taskqueue_free(ctx->ifc_tq); in iflib_device_register()
5407 device_set_softc(ctx->ifc_dev, NULL); in iflib_device_register()
5408 if (ctx->ifc_flags & IFC_SC_ALLOCATED) in iflib_device_register()
5409 free(ctx->ifc_softc, M_IFLIB); in iflib_device_register()
5420 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) in iflib_device_attach()
5431 if_t ifp = ctx->ifc_ifp; in iflib_device_deregister()
5432 device_t dev = ctx->ifc_dev; in iflib_device_deregister()
5441 device_printf(dev, "SR-IOV in use; detach first.\n"); in iflib_device_deregister()
5447 ctx->ifc_flags |= IFC_IN_DETACH; in iflib_device_deregister()
5461 if (ctx->ifc_led_dev != NULL) in iflib_device_deregister()
5462 led_destroy(ctx->ifc_led_dev); in iflib_device_deregister()
5473 taskqueue_free(ctx->ifc_tq); in iflib_device_deregister()
5474 ctx->ifc_tq = NULL; in iflib_device_deregister()
5476 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ in iflib_device_deregister()
5483 device_set_softc(ctx->ifc_dev, NULL); in iflib_device_deregister()
5484 if (ctx->ifc_flags & IFC_SC_ALLOCATED) in iflib_device_deregister()
5485 free(ctx->ifc_softc, M_IFLIB); in iflib_device_deregister()
5501 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { in iflib_tqg_detach()
5502 callout_drain(&txq->ift_timer); in iflib_tqg_detach()
5504 callout_drain(&txq->ift_netmap_timer); in iflib_tqg_detach()
5506 if (txq->ift_task.gt_uniq != NULL) in iflib_tqg_detach()
5507 taskqgroup_detach(tqg, &txq->ift_task); in iflib_tqg_detach()
5509 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { in iflib_tqg_detach()
5510 if (rxq->ifr_task.gt_uniq != NULL) in iflib_tqg_detach()
5511 taskqgroup_detach(tqg, &rxq->ifr_task); in iflib_tqg_detach()
5519 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { in iflib_free_intr_mem()
5520 iflib_irq_free(ctx, &ctx->ifc_legacy_irq); in iflib_free_intr_mem()
5522 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { in iflib_free_intr_mem()
5523 pci_release_msi(ctx->ifc_dev); in iflib_free_intr_mem()
5525 if (ctx->ifc_msix_mem != NULL) { in iflib_free_intr_mem()
5526 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, in iflib_free_intr_mem()
5527 rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem); in iflib_free_intr_mem()
5528 ctx->ifc_msix_mem = NULL; in iflib_free_intr_mem()
5567 iflib_txq_t txq = ctx->ifc_txqs; in iflib_device_resume()
5622 * - Start a fast taskqueue thread for each core
5623 * - Start a taskqueue for control operations
5663 MPASS(sctx->isc_tx_maxsize); in _iflib_assert()
5664 MPASS(sctx->isc_tx_maxsegsize); in _iflib_assert()
5666 MPASS(sctx->isc_rx_maxsize); in _iflib_assert()
5667 MPASS(sctx->isc_rx_nsegments); in _iflib_assert()
5668 MPASS(sctx->isc_rx_maxsegsize); in _iflib_assert()
5670 MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8); in _iflib_assert()
5671 for (i = 0; i < sctx->isc_nrxqs; i++) { in _iflib_assert()
5672 MPASS(sctx->isc_nrxd_min[i]); in _iflib_assert()
5673 MPASS(powerof2(sctx->isc_nrxd_min[i])); in _iflib_assert()
5674 MPASS(sctx->isc_nrxd_max[i]); in _iflib_assert()
5675 MPASS(powerof2(sctx->isc_nrxd_max[i])); in _iflib_assert()
5676 MPASS(sctx->isc_nrxd_default[i]); in _iflib_assert()
5677 MPASS(powerof2(sctx->isc_nrxd_default[i])); in _iflib_assert()
5680 MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8); in _iflib_assert()
5681 for (i = 0; i < sctx->isc_ntxqs; i++) { in _iflib_assert()
5682 MPASS(sctx->isc_ntxd_min[i]); in _iflib_assert()
5683 MPASS(powerof2(sctx->isc_ntxd_min[i])); in _iflib_assert()
5684 MPASS(sctx->isc_ntxd_max[i]); in _iflib_assert()
5685 MPASS(powerof2(sctx->isc_ntxd_max[i])); in _iflib_assert()
5686 MPASS(sctx->isc_ntxd_default[i]); in _iflib_assert()
5687 MPASS(powerof2(sctx->isc_ntxd_default[i])); in _iflib_assert()
5695 MPASS(scctx->isc_txrx->ift_txd_encap); in _iflib_pre_assert()
5696 MPASS(scctx->isc_txrx->ift_txd_flush); in _iflib_pre_assert()
5697 MPASS(scctx->isc_txrx->ift_txd_credits_update); in _iflib_pre_assert()
5698 MPASS(scctx->isc_txrx->ift_rxd_available); in _iflib_pre_assert()
5699 MPASS(scctx->isc_txrx->ift_rxd_pkt_get); in _iflib_pre_assert()
5700 MPASS(scctx->isc_txrx->ift_rxd_refill); in _iflib_pre_assert()
5701 MPASS(scctx->isc_txrx->ift_rxd_flush); in _iflib_pre_assert()
5707 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_register()
5708 driver_t *driver = sctx->isc_driver; in iflib_register()
5709 device_t dev = ctx->ifc_dev; in iflib_register()
5715 STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); in iflib_register()
5716 ifp = ctx->ifc_ifp = if_alloc_dev(IFT_ETHER, dev); in iflib_register()
5739 ctx->ifc_vlan_attach_event = in iflib_register()
5742 ctx->ifc_vlan_detach_event = in iflib_register()
5746 if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) { in iflib_register()
5747 ctx->ifc_mediap = &ctx->ifc_media; in iflib_register()
5748 ifmedia_init(ctx->ifc_mediap, IFM_IMASK, in iflib_register()
5757 if (ctx->ifc_vlan_attach_event != NULL) { in iflib_unregister_vlan_handlers()
5758 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); in iflib_unregister_vlan_handlers()
5759 ctx->ifc_vlan_attach_event = NULL; in iflib_unregister_vlan_handlers()
5761 if (ctx->ifc_vlan_detach_event != NULL) { in iflib_unregister_vlan_handlers()
5762 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); in iflib_unregister_vlan_handlers()
5763 ctx->ifc_vlan_detach_event = NULL; in iflib_unregister_vlan_handlers()
5771 if_t ifp = ctx->ifc_ifp; in iflib_deregister()
5774 ifmedia_removeall(&ctx->ifc_media); in iflib_deregister()
5787 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ in iflib_deregister()
5794 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_queues_alloc()
5795 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_queues_alloc()
5796 device_t dev = ctx->ifc_dev; in iflib_queues_alloc()
5797 int nrxqsets = scctx->isc_nrxqsets; in iflib_queues_alloc()
5798 int ntxqsets = scctx->isc_ntxqsets; in iflib_queues_alloc()
5802 int i, j, cpu, err, txconf, rxconf; in iflib_queues_alloc() local
5804 uint32_t *rxqsizes = scctx->isc_rxqsizes; in iflib_queues_alloc()
5805 uint32_t *txqsizes = scctx->isc_txqsizes; in iflib_queues_alloc()
5806 uint8_t nrxqs = sctx->isc_nrxqs; in iflib_queues_alloc()
5807 uint8_t ntxqs = sctx->isc_ntxqs; in iflib_queues_alloc()
5808 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; in iflib_queues_alloc()
5809 int fl_offset = (sctx->isc_flags & IFLIB_HAS_RXCQ ? 1 : 0); in iflib_queues_alloc()
5819 if (!(ctx->ifc_txqs = in iflib_queues_alloc()
5828 if (!(ctx->ifc_rxqs = in iflib_queues_alloc()
5836 txq = ctx->ifc_txqs; in iflib_queues_alloc()
5837 rxq = ctx->ifc_rxqs; in iflib_queues_alloc()
5842 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { in iflib_queues_alloc()
5852 txq->ift_ifdi = ifdip; in iflib_queues_alloc()
5860 txq->ift_txd_size[j] = scctx->isc_txd_size[j]; in iflib_queues_alloc()
5861 bzero((void *)ifdip->idi_vaddr, txqsizes[j]); in iflib_queues_alloc()
5863 txq->ift_ctx = ctx; in iflib_queues_alloc()
5864 txq->ift_id = i; in iflib_queues_alloc()
5865 if (sctx->isc_flags & IFLIB_HAS_TXCQ) { in iflib_queues_alloc()
5866 txq->ift_br_offset = 1; in iflib_queues_alloc()
5868 txq->ift_br_offset = 0; in iflib_queues_alloc()
5878 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout", in iflib_queues_alloc()
5879 device_get_nameunit(dev), txq->ift_id); in iflib_queues_alloc()
5880 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); in iflib_queues_alloc()
5881 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); in iflib_queues_alloc()
5882 txq->ift_timer.c_cpu = cpu; in iflib_queues_alloc()
5884 callout_init_mtx(&txq->ift_netmap_timer, &txq->ift_mtx, 0); in iflib_queues_alloc()
5885 txq->ift_netmap_timer.c_cpu = cpu; in iflib_queues_alloc()
5888 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, in iflib_queues_alloc()
5895 txq->ift_reclaim_thresh = ctx->ifc_sysctl_tx_reclaim_thresh; in iflib_queues_alloc()
5900 callout_init(&rxq->ifr_watchdog, 1); in iflib_queues_alloc()
5910 rxq->ifr_ifdi = ifdip; in iflib_queues_alloc()
5912 rxq->ifr_ntxqirq = 1; in iflib_queues_alloc()
5913 rxq->ifr_txqid[0] = i; in iflib_queues_alloc()
5921 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); in iflib_queues_alloc()
5923 rxq->ifr_ctx = ctx; in iflib_queues_alloc()
5924 rxq->ifr_id = i; in iflib_queues_alloc()
5925 rxq->ifr_fl_offset = fl_offset; in iflib_queues_alloc()
5926 rxq->ifr_nfl = nfree_lists; in iflib_queues_alloc()
5933 rxq->ifr_fl = fl; in iflib_queues_alloc()
5937 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; in iflib_queues_alloc()
5938 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; in iflib_queues_alloc()
5948 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) in iflib_queues_alloc()
5949 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, in iflib_queues_alloc()
5957 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; in iflib_queues_alloc()
5960 vaddrs[i * ntxqs + j] = di->idi_vaddr; in iflib_queues_alloc()
5961 paddrs[i * ntxqs + j] = di->idi_paddr; in iflib_queues_alloc()
5965 device_printf(ctx->ifc_dev, in iflib_queues_alloc()
5979 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; in iflib_queues_alloc()
5982 vaddrs[i * nrxqs + j] = di->idi_vaddr; in iflib_queues_alloc()
5983 paddrs[i * nrxqs + j] = di->idi_paddr; in iflib_queues_alloc()
5987 device_printf(ctx->ifc_dev, in iflib_queues_alloc()
6003 if (ctx->ifc_rxqs != NULL) in iflib_queues_alloc()
6004 free(ctx->ifc_rxqs, M_IFLIB); in iflib_queues_alloc()
6005 ctx->ifc_rxqs = NULL; in iflib_queues_alloc()
6006 if (ctx->ifc_txqs != NULL) in iflib_queues_alloc()
6007 free(ctx->ifc_txqs, M_IFLIB); in iflib_queues_alloc()
6008 ctx->ifc_txqs = NULL; in iflib_queues_alloc()
6016 iflib_txq_t txq = ctx->ifc_txqs; in iflib_tx_structures_setup()
6028 iflib_txq_t txq = ctx->ifc_txqs; in iflib_tx_structures_free()
6029 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_tx_structures_free()
6033 for (j = 0; j < sctx->isc_ntxqs; j++) in iflib_tx_structures_free()
6034 iflib_dma_free(&txq->ift_ifdi[j]); in iflib_tx_structures_free()
6037 free(ctx->ifc_txqs, M_IFLIB); in iflib_tx_structures_free()
6038 ctx->ifc_txqs = NULL; in iflib_tx_structures_free()
6049 iflib_rxq_t rxq = ctx->ifc_rxqs; in iflib_rx_structures_setup()
6055 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { in iflib_rx_structures_setup()
6057 err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, in iflib_rx_structures_setup()
6059 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset])); in iflib_rx_structures_setup()
6061 device_printf(ctx->ifc_dev, in iflib_rx_structures_setup()
6066 IFDI_RXQ_SETUP(ctx, rxq->ifr_id); in iflib_rx_structures_setup()
6076 rxq = ctx->ifc_rxqs; in iflib_rx_structures_setup()
6078 tcp_lro_free(&rxq->ifr_lc); in iflib_rx_structures_setup()
6092 iflib_rxq_t rxq = ctx->ifc_rxqs; in iflib_rx_structures_free()
6093 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_rx_structures_free()
6096 for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { in iflib_rx_structures_free()
6097 for (j = 0; j < sctx->isc_nrxqs; j++) in iflib_rx_structures_free()
6098 iflib_dma_free(&rxq->ifr_ifdi[j]); in iflib_rx_structures_free()
6101 tcp_lro_free(&rxq->ifr_lc); in iflib_rx_structures_free()
6104 free(ctx->ifc_rxqs, M_IFLIB); in iflib_rx_structures_free()
6105 ctx->ifc_rxqs = NULL; in iflib_rx_structures_free()
6118 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); in iflib_qset_structures_setup()
6123 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); in iflib_qset_structures_setup()
6146 dev = ctx->ifc_dev; in iflib_irq_set_affinity()
6147 base_cpuid = ctx->ifc_sysctl_core_offset; in iflib_irq_set_affinity()
6150 irq ? irq->ii_res : NULL, name); in iflib_irq_set_affinity()
6156 if (cpuid > ctx->ifc_cpuid_highest) in iflib_irq_set_affinity()
6157 ctx->ifc_cpuid_highest = cpuid; in iflib_irq_set_affinity()
6191 dev = ctx->ifc_dev; in iflib_irq_alloc_generic_subctx()
6192 subdev = subctx->ifc_dev; in iflib_irq_alloc_generic_subctx()
6196 q = &subctx->ifc_rxqs[qid]; in iflib_irq_alloc_generic_subctx()
6197 info = &subctx->ifc_rxqs[qid].ifr_filter_info; in iflib_irq_alloc_generic_subctx()
6198 gtask = &subctx->ifc_rxqs[qid].ifr_task; in iflib_irq_alloc_generic_subctx()
6210 info->ifi_filter = filter; in iflib_irq_alloc_generic_subctx()
6211 info->ifi_filter_arg = filter_arg; in iflib_irq_alloc_generic_subctx()
6212 info->ifi_task = gtask; in iflib_irq_alloc_generic_subctx()
6213 info->ifi_ctx = q; in iflib_irq_alloc_generic_subctx()
6225 if (tqrid != -1) { in iflib_irq_alloc_generic_subctx()
6231 taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name); in iflib_irq_alloc_generic_subctx()
6251 info = &ctx->ifc_filter_info; in iflib_irq_alloc_generic()
6257 q = &ctx->ifc_txqs[qid]; in iflib_irq_alloc_generic()
6258 info = &ctx->ifc_txqs[qid].ift_filter_info; in iflib_irq_alloc_generic()
6259 gtask = &ctx->ifc_txqs[qid].ift_task; in iflib_irq_alloc_generic()
6264 ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; in iflib_irq_alloc_generic()
6267 q = &ctx->ifc_rxqs[qid]; in iflib_irq_alloc_generic()
6268 info = &ctx->ifc_rxqs[qid].ifr_filter_info; in iflib_irq_alloc_generic()
6269 gtask = &ctx->ifc_rxqs[qid].ifr_task; in iflib_irq_alloc_generic()
6276 q = &ctx->ifc_rxqs[qid]; in iflib_irq_alloc_generic()
6277 info = &ctx->ifc_rxqs[qid].ifr_filter_info; in iflib_irq_alloc_generic()
6278 gtask = &ctx->ifc_rxqs[qid].ifr_task; in iflib_irq_alloc_generic()
6286 tqrid = -1; in iflib_irq_alloc_generic()
6287 info = &ctx->ifc_filter_info; in iflib_irq_alloc_generic()
6292 device_printf(ctx->ifc_dev, "%s: unknown net intr type\n", in iflib_irq_alloc_generic()
6297 info->ifi_filter = filter; in iflib_irq_alloc_generic()
6298 info->ifi_filter_arg = filter_arg; in iflib_irq_alloc_generic()
6299 info->ifi_task = gtask; in iflib_irq_alloc_generic()
6300 info->ifi_ctx = q; in iflib_irq_alloc_generic()
6302 dev = ctx->ifc_dev; in iflib_irq_alloc_generic()
6311 if (tqrid != -1) { in iflib_irq_alloc_generic()
6317 taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name); in iflib_irq_alloc_generic()
6336 q = &ctx->ifc_txqs[qid]; in iflib_softirq_alloc_generic()
6337 gtask = &ctx->ifc_txqs[qid].ift_task; in iflib_softirq_alloc_generic()
6343 q = &ctx->ifc_rxqs[qid]; in iflib_softirq_alloc_generic()
6344 gtask = &ctx->ifc_rxqs[qid].ifr_task; in iflib_softirq_alloc_generic()
6350 TASK_INIT(&ctx->ifc_vflr_task, 0, _task_fn_iov, ctx); in iflib_softirq_alloc_generic()
6357 dev = ctx->ifc_dev; in iflib_softirq_alloc_generic()
6358 taskqgroup_attach(tqg, gtask, q, dev, irq ? irq->ii_res : NULL, in iflib_softirq_alloc_generic()
6367 if (irq->ii_tag) in iflib_irq_free()
6368 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); in iflib_irq_free()
6370 if (irq->ii_res) in iflib_irq_free()
6371 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, in iflib_irq_free()
6372 rman_get_rid(irq->ii_res), irq->ii_res); in iflib_irq_free()
6378 iflib_txq_t txq = ctx->ifc_txqs; in iflib_legacy_setup()
6379 iflib_rxq_t rxq = ctx->ifc_rxqs; in iflib_legacy_setup()
6380 if_irq_t irq = &ctx->ifc_legacy_irq; in iflib_legacy_setup()
6388 info = &rxq->ifr_filter_info; in iflib_legacy_setup()
6389 gtask = &rxq->ifr_task; in iflib_legacy_setup()
6391 rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0; in iflib_legacy_setup()
6393 ctx->ifc_flags |= IFC_LEGACY; in iflib_legacy_setup()
6394 info->ifi_filter = filter; in iflib_legacy_setup()
6395 info->ifi_filter_arg = filter_arg; in iflib_legacy_setup()
6396 info->ifi_task = gtask; in iflib_legacy_setup()
6397 info->ifi_ctx = rxq; in iflib_legacy_setup()
6399 dev = ctx->ifc_dev; in iflib_legacy_setup()
6406 res = irq->ii_res; in iflib_legacy_setup()
6409 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); in iflib_legacy_setup()
6410 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res, in iflib_legacy_setup()
6419 ctx->ifc_led_dev = led_create(iflib_led_func, ctx, in iflib_led_create()
6420 device_get_nameunit(ctx->ifc_dev)); in iflib_led_create()
6427 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); in iflib_tx_intr_deferred()
6434 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); in iflib_rx_intr_deferred()
6441 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_admin_task); in iflib_admin_intr_deferred()
6448 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_vflr_task); in iflib_iov_intr_deferred()
6452 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name) in iflib_io_tqg_attach() argument
6455 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL, in iflib_io_tqg_attach()
6468 taskqueue_enqueue(ctx->ifc_tq, config_task); in iflib_config_task_enqueue()
6474 if_t ifp = ctx->ifc_ifp; in iflib_link_state_change()
6475 iflib_txq_t txq = ctx->ifc_txqs; in iflib_link_state_change()
6480 ctx->ifc_flags |= IFC_PREFETCH; in iflib_link_state_change()
6484 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { in iflib_link_state_change()
6485 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) in iflib_link_state_change()
6486 txq->ift_qstatus = IFLIB_QUEUE_IDLE; in iflib_link_state_change()
6488 ctx->ifc_link_state = link_state; in iflib_link_state_change()
6497 int credits_pre = txq->ift_cidx_processed; in iflib_tx_credits_update()
6500 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, in iflib_tx_credits_update()
6502 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) in iflib_tx_credits_update()
6505 txq->ift_processed += credits; in iflib_tx_credits_update()
6506 txq->ift_cidx_processed += credits; in iflib_tx_credits_update()
6508 MPASS(credits_pre + credits == txq->ift_cidx_processed); in iflib_tx_credits_update()
6509 if (txq->ift_cidx_processed >= txq->ift_size) in iflib_tx_credits_update()
6510 txq->ift_cidx_processed -= txq->ift_size; in iflib_tx_credits_update()
6520 for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++) in iflib_rxd_avail()
6521 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, in iflib_rxd_avail()
6523 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, in iflib_rxd_avail()
6532 info->iidi_ctx = ctx; in iflib_add_int_delay_sysctl()
6533 info->iidi_offset = offset; in iflib_add_int_delay_sysctl()
6534 info->iidi_value = value; in iflib_add_int_delay_sysctl()
6535 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), in iflib_add_int_delay_sysctl()
6536 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), in iflib_add_int_delay_sysctl()
6545 return (&ctx->ifc_ctx_sx); in iflib_ctx_lock_get()
6551 device_t dev = ctx->ifc_dev; in iflib_msix_init()
6552 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_msix_init()
6553 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_msix_init()
6555 int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors; in iflib_msix_init() local
6557 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; in iflib_msix_init()
6558 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; in iflib_msix_init()
6562 imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); in iflib_msix_init()
6565 if (scctx->isc_disable_msix) in iflib_msix_init()
6568 /* First try MSI-X */ in iflib_msix_init()
6571 device_printf(dev, "MSI-X not supported or disabled\n"); in iflib_msix_init()
6575 bar = ctx->ifc_softc_ctx.isc_msix_bar; in iflib_msix_init()
6577 * bar == -1 => "trust me I know what I'm doing" in iflib_msix_init()
6581 * allows shoddy garbage to use MSI-X in this framework. in iflib_msix_init()
6583 if (bar != -1) { in iflib_msix_init()
6584 ctx->ifc_msix_mem = bus_alloc_resource_any(dev, in iflib_msix_init()
6586 if (ctx->ifc_msix_mem == NULL) { in iflib_msix_init()
6587 device_printf(dev, "Unable to map MSI-X table\n"); in iflib_msix_init()
6592 admincnt = sctx->isc_admin_intrcnt; in iflib_msix_init()
6595 queuemsgs = min(msgs - admincnt, 1); in iflib_msix_init()
6597 queuemsgs = msgs - admincnt; in iflib_msix_init()
6604 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); in iflib_msix_init()
6608 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); in iflib_msix_init()
6614 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) in iflib_msix_init()
6619 if (rx_queues > scctx->isc_nrxqsets) in iflib_msix_init()
6620 rx_queues = scctx->isc_nrxqsets; in iflib_msix_init()
6630 if (tx_queues > scctx->isc_ntxqsets) in iflib_msix_init()
6631 tx_queues = scctx->isc_ntxqsets; in iflib_msix_init()
6633 if (ctx->ifc_sysctl_qs_eq_override == 0) { in iflib_msix_init()
6644 vectors = rx_queues + admincnt; in iflib_msix_init()
6645 if (msgs < vectors) { in iflib_msix_init()
6647 "insufficient number of MSI-X vectors " in iflib_msix_init()
6648 "(supported %d, need %d)\n", msgs, vectors); in iflib_msix_init()
6654 msgs = vectors; in iflib_msix_init()
6655 if ((err = pci_alloc_msix(dev, &vectors)) == 0) { in iflib_msix_init()
6656 if (vectors != msgs) { in iflib_msix_init()
6658 "Unable to allocate sufficient MSI-X vectors " in iflib_msix_init()
6659 "(got %d, need %d)\n", vectors, msgs); in iflib_msix_init()
6661 if (bar != -1) { in iflib_msix_init()
6663 ctx->ifc_msix_mem); in iflib_msix_init()
6664 ctx->ifc_msix_mem = NULL; in iflib_msix_init()
6668 device_printf(dev, "Using MSI-X interrupts with %d vectors\n", in iflib_msix_init()
6669 vectors); in iflib_msix_init()
6670 scctx->isc_vectors = vectors; in iflib_msix_init()
6671 scctx->isc_nrxqsets = rx_queues; in iflib_msix_init()
6672 scctx->isc_ntxqsets = tx_queues; in iflib_msix_init()
6673 scctx->isc_intr = IFLIB_INTR_MSIX; in iflib_msix_init()
6675 return (vectors); in iflib_msix_init()
6678 "failed to allocate %d MSI-X vectors, err: %d\n", vectors, in iflib_msix_init()
6680 if (bar != -1) { in iflib_msix_init()
6682 ctx->ifc_msix_mem); in iflib_msix_init()
6683 ctx->ifc_msix_mem = NULL; in iflib_msix_init()
6688 vectors = pci_msi_count(dev); in iflib_msix_init()
6689 scctx->isc_nrxqsets = 1; in iflib_msix_init()
6690 scctx->isc_ntxqsets = 1; in iflib_msix_init()
6691 scctx->isc_vectors = vectors; in iflib_msix_init()
6692 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { in iflib_msix_init()
6694 scctx->isc_intr = IFLIB_INTR_MSI; in iflib_msix_init()
6696 scctx->isc_vectors = 1; in iflib_msix_init()
6698 scctx->isc_intr = IFLIB_INTR_LEGACY; in iflib_msix_init()
6701 return (vectors); in iflib_msix_init()
6710 uint16_t *state = ((uint16_t *)oidp->oid_arg1); in mp_ring_state_handler()
6751 ndesc = ctx->ifc_sysctl_ntxds; in mp_ndesc_handler()
6752 if (ctx->ifc_sctx) in mp_ndesc_handler()
6753 nqs = ctx->ifc_sctx->isc_ntxqs; in mp_ndesc_handler()
6756 ndesc = ctx->ifc_sysctl_nrxds; in mp_ndesc_handler()
6757 if (ctx->ifc_sctx) in mp_ndesc_handler()
6758 nqs = ctx->ifc_sctx->isc_nrxqs; in mp_ndesc_handler()
6776 if (rc || req->newptr == NULL) in mp_ndesc_handler()
6795 thresh = ctx->ifc_sysctl_tx_reclaim_thresh; in iflib_handle_tx_reclaim_thresh()
6801 if (thresh == ctx->ifc_sysctl_tx_reclaim_thresh) in iflib_handle_tx_reclaim_thresh()
6804 if (thresh > ctx->ifc_softc_ctx.isc_ntxd[0] / 2) { in iflib_handle_tx_reclaim_thresh()
6805 device_printf(ctx->ifc_dev, "TX Reclaim thresh must be <= %d\n", in iflib_handle_tx_reclaim_thresh()
6806 ctx->ifc_softc_ctx.isc_ntxd[0] / 2); in iflib_handle_tx_reclaim_thresh()
6810 ctx->ifc_sysctl_tx_reclaim_thresh = thresh; in iflib_handle_tx_reclaim_thresh()
6811 if (ctx->ifc_txqs == NULL) in iflib_handle_tx_reclaim_thresh()
6814 txq = &ctx->ifc_txqs[0]; in iflib_handle_tx_reclaim_thresh()
6816 txq->ift_reclaim_thresh = thresh; in iflib_handle_tx_reclaim_thresh()
6829 ticks = ctx->ifc_sysctl_tx_reclaim_ticks; in iflib_handle_tx_reclaim_ticks()
6835 if (ticks == ctx->ifc_sysctl_tx_reclaim_ticks) in iflib_handle_tx_reclaim_ticks()
6839 device_printf(ctx->ifc_dev, in iflib_handle_tx_reclaim_ticks()
6844 ctx->ifc_sysctl_tx_reclaim_ticks = ticks; in iflib_handle_tx_reclaim_ticks()
6845 if (ctx->ifc_txqs == NULL) in iflib_handle_tx_reclaim_ticks()
6848 txq = &ctx->ifc_txqs[0]; in iflib_handle_tx_reclaim_ticks()
6850 txq->ift_reclaim_ticks = ticks; in iflib_handle_tx_reclaim_ticks()
6866 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, in iflib_add_device_sysctl_pre()
6872 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version"); in iflib_add_device_sysctl_pre()
6875 CTLFLAG_RDTUN, &ctx->ifc_sysctl_simple_tx, 0, in iflib_add_device_sysctl_pre()
6878 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, in iflib_add_device_sysctl_pre()
6881 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, in iflib_add_device_sysctl_pre()
6884 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, in iflib_add_device_sysctl_pre()
6887 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, in iflib_add_device_sysctl_pre()
6888 "disable MSI-X (default 0)"); in iflib_add_device_sysctl_pre()
6890 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the RX budget"); in iflib_add_device_sysctl_pre()
6892 CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, in iflib_add_device_sysctl_pre()
6894 ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED; in iflib_add_device_sysctl_pre()
6896 CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0, in iflib_add_device_sysctl_pre()
6899 CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0, in iflib_add_device_sysctl_pre()
6902 CTLFLAG_RDTUN, &ctx->ifc_sysctl_use_logical_cores, 0, in iflib_add_device_sysctl_pre()
6905 CTLFLAG_RDTUN, &ctx->ifc_sysctl_extra_msix_vectors, 0, in iflib_add_device_sysctl_pre()
6906 …"attempt to reserve the given number of extra MSI-X vectors during driver load for the creation of… in iflib_add_device_sysctl_pre()
6908 CTLFLAG_RDTUN, &ctx->ifc_softc_ctx.isc_vectors, 0, in iflib_add_device_sysctl_pre()
6909 "total # of MSI-X vectors allocated by driver"); in iflib_add_device_sysctl_pre()
6911 /* XXX change for per-queue sizes */ in iflib_add_device_sysctl_pre()
6925 if_shared_ctx_t sctx = ctx->ifc_sctx; in iflib_add_device_sysctl_post()
6926 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; in iflib_add_device_sysctl_post()
6940 node = ctx->ifc_sysctl_node; in iflib_add_device_sysctl_post()
6953 if (scctx->isc_ntxqsets > 100) in iflib_add_device_sysctl_post()
6955 else if (scctx->isc_ntxqsets > 10) in iflib_add_device_sysctl_post()
6959 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { in iflib_add_device_sysctl_post()
6964 SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu", in iflib_add_device_sysctl_post()
6965 CTLFLAG_RD, &txq->ift_task.gt_cpu, 0, in iflib_add_device_sysctl_post()
6966 "cpu this queue is bound to"); in iflib_add_device_sysctl_post()
6969 CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed"); in iflib_add_device_sysctl_post()
6971 CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued"); in iflib_add_device_sysctl_post()
6974 CTLFLAG_RD, &txq->ift_mbuf_defrag, in iflib_add_device_sysctl_post()
6977 CTLFLAG_RD, &txq->ift_pullups, in iflib_add_device_sysctl_post()
6981 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); in iflib_add_device_sysctl_post()
6983 "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail, in iflib_add_device_sysctl_post()
6986 "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed, in iflib_add_device_sysctl_post()
6989 "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig, in iflib_add_device_sysctl_post()
6992 "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup, in iflib_add_device_sysctl_post()
6995 CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index"); in iflib_add_device_sysctl_post()
6997 CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index"); in iflib_add_device_sysctl_post()
6999 "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed, in iflib_add_device_sysctl_post()
7002 CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use"); in iflib_add_device_sysctl_post()
7004 "txq_processed", CTLFLAG_RD, &txq->ift_processed, in iflib_add_device_sysctl_post()
7007 CTLFLAG_RD, &txq->ift_cleaned, "total cleaned"); in iflib_add_device_sysctl_post()
7010 __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0, in iflib_add_device_sysctl_post()
7013 "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues, in iflib_add_device_sysctl_post()
7016 "r_drops", CTLFLAG_RD, &txq->ift_br->drops, in iflib_add_device_sysctl_post()
7019 "r_starts", CTLFLAG_RD, &txq->ift_br->starts, in iflib_add_device_sysctl_post()
7022 "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls, in iflib_add_device_sysctl_post()
7025 "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts, in iflib_add_device_sysctl_post()
7028 "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications, in iflib_add_device_sysctl_post()
7032 if (scctx->isc_nrxqsets > 100) in iflib_add_device_sysctl_post()
7034 else if (scctx->isc_nrxqsets > 10) in iflib_add_device_sysctl_post()
7038 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { in iflib_add_device_sysctl_post()
7043 SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu", in iflib_add_device_sysctl_post()
7044 CTLFLAG_RD, &rxq->ifr_task.gt_cpu, 0, in iflib_add_device_sysctl_post()
7045 "cpu this queue is bound to"); in iflib_add_device_sysctl_post()
7046 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { in iflib_add_device_sysctl_post()
7048 "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1, in iflib_add_device_sysctl_post()
7052 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { in iflib_add_device_sysctl_post()
7059 CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index"); in iflib_add_device_sysctl_post()
7061 CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index"); in iflib_add_device_sysctl_post()
7063 CTLFLAG_RD, &fl->ifl_credits, 1, in iflib_add_device_sysctl_post()
7066 CTLFLAG_RD, &fl->ifl_buf_size, 1, "buffer size"); in iflib_add_device_sysctl_post()
7069 "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued, in iflib_add_device_sysctl_post()
7072 "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued, in iflib_add_device_sysctl_post()
7075 "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued, in iflib_add_device_sysctl_post()
7078 "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued, in iflib_add_device_sysctl_post()
7091 ctx->ifc_flags |= IFC_DO_RESET; in iflib_request_reset()
7101 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { in iflib_fixup_rx()
7102 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); in iflib_fixup_rx()
7103 m->m_data += ETHER_HDR_LEN; in iflib_fixup_rx()
7111 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); in iflib_fixup_rx()
7112 m->m_data += ETHER_HDR_LEN; in iflib_fixup_rx()
7113 m->m_len -= ETHER_HDR_LEN; in iflib_fixup_rx()
7114 n->m_len = ETHER_HDR_LEN; in iflib_fixup_rx()
7116 n->m_next = m; in iflib_fixup_rx()
7131 *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; in iflib_debugnet_init()
7132 *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; in iflib_debugnet_init()
7146 scctx = &ctx->ifc_softc_ctx; in iflib_debugnet_event()
7150 for (i = 0; i < scctx->isc_nrxqsets; i++) { in iflib_debugnet_event()
7151 rxq = &ctx->ifc_rxqs[i]; in iflib_debugnet_event()
7152 for (j = 0; j < rxq->ifr_nfl; j++) { in iflib_debugnet_event()
7153 fl = rxq->ifr_fl; in iflib_debugnet_event()
7154 fl->ifl_zone = m_getzone(fl->ifl_buf_size); in iflib_debugnet_event()
7176 txq = &ctx->ifc_txqs[0]; in iflib_debugnet_transmit()
7193 scctx = &ctx->ifc_softc_ctx; in iflib_debugnet_poll()
7199 txq = &ctx->ifc_txqs[0]; in iflib_debugnet_poll()
7203 for (i = 0; i < scctx->isc_nrxqsets; i++) in iflib_debugnet_poll()
7204 (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); in iflib_debugnet_poll()
7219 qidx = NTXQSETS(ctx) + FIRST_QSET(ctx) - 1; in iflib_simple_select_queue()
7220 return (&ctx->ifc_txqs[qidx]); in iflib_simple_select_queue()
7237 mtx_lock(&txq->ift_mtx); in iflib_simple_transmit()
7241 bytes_sent += m->m_pkthdr.len; in iflib_simple_transmit()
7242 mcast_sent += !!(m->m_flags & M_MCAST); in iflib_simple_transmit()
7251 mtx_unlock(&txq->ift_mtx); in iflib_simple_transmit()